diff --git a/Package.swift b/Package.swift index e3ae39bda0..1a28163df7 100644 --- a/Package.swift +++ b/Package.swift @@ -210,8 +210,6 @@ let package = Package( .library(name: "SotoInternetMonitor", targets: ["SotoInternetMonitor"]), .library(name: "SotoInvoicing", targets: ["SotoInvoicing"]), .library(name: "SotoIoT", targets: ["SotoIoT"]), - .library(name: "SotoIoT1ClickDevicesService", targets: ["SotoIoT1ClickDevicesService"]), - .library(name: "SotoIoT1ClickProjects", targets: ["SotoIoT1ClickProjects"]), .library(name: "SotoIoTAnalytics", targets: ["SotoIoTAnalytics"]), .library(name: "SotoIoTDataPlane", targets: ["SotoIoTDataPlane"]), .library(name: "SotoIoTDeviceAdvisor", targets: ["SotoIoTDeviceAdvisor"]), @@ -1529,18 +1527,6 @@ let package = Package( path: "./Sources/Soto/Services/IoT", swiftSettings: swiftSettings ), - .target( - name: "SotoIoT1ClickDevicesService", - dependencies: [.product(name: "SotoCore", package: "soto-core")], - path: "./Sources/Soto/Services/IoT1ClickDevicesService", - swiftSettings: swiftSettings - ), - .target( - name: "SotoIoT1ClickProjects", - dependencies: [.product(name: "SotoCore", package: "soto-core")], - path: "./Sources/Soto/Services/IoT1ClickProjects", - swiftSettings: swiftSettings - ), .target( name: "SotoIoTAnalytics", dependencies: [.product(name: "SotoCore", package: "soto-core")], diff --git a/Sources/Soto/Services/APIGateway/APIGateway_api.swift b/Sources/Soto/Services/APIGateway/APIGateway_api.swift index f4e76cc742..737b8f28b4 100644 --- a/Sources/Soto/Services/APIGateway/APIGateway_api.swift +++ b/Sources/Soto/Services/APIGateway/APIGateway_api.swift @@ -253,7 +253,7 @@ public struct APIGateway: AWSService { /// Parameters: /// - basePath: The base path name that callers of the API must provide as part of the URL after the domain name. This value must be unique for all of the mappings across a single API. Specify '(none)' if you do not want callers to specify a base path name after the domain name. /// - domainName: The domain name of the BasePathMapping resource to create. - /// - domainNameId: The identifier for the domain name resource. Supported only for private custom domain names. + /// - domainNameId: The identifier for the domain name resource. Required for private custom domain names. /// - restApiId: The string identifier of the associated RestApi. /// - stage: The name of the API's stage that you want to use for this mapping. Specify '(none)' if you want callers to explicitly specify the stage name after any base path name. /// - logger: Logger use during operation @@ -2193,7 +2193,7 @@ public struct APIGateway: AWSService { /// /// Parameters: /// - domainName: The name of the DomainName resource. - /// - domainNameId: The identifier for the domain name resource. Supported only for private custom domain names. + /// - domainNameId: The identifier for the domain name resource. Required for private custom domain names. /// - logger: Logger use during operation @inlinable public func getDomainName( diff --git a/Sources/Soto/Services/APIGateway/APIGateway_shapes.swift b/Sources/Soto/Services/APIGateway/APIGateway_shapes.swift index 3b75c2eeb3..e6bb784911 100644 --- a/Sources/Soto/Services/APIGateway/APIGateway_shapes.swift +++ b/Sources/Soto/Services/APIGateway/APIGateway_shapes.swift @@ -659,7 +659,7 @@ extension APIGateway { public let basePath: String? /// The domain name of the BasePathMapping resource to create. public let domainName: String - /// The identifier for the domain name resource. Supported only for private custom domain names. + /// The identifier for the domain name resource. Required for private custom domain names. public let domainNameId: String? /// The string identifier of the associated RestApi. public let restApiId: String @@ -2628,7 +2628,7 @@ extension APIGateway { public struct GetDomainNameRequest: AWSEncodableShape { /// The name of the DomainName resource. public let domainName: String - /// The identifier for the domain name resource. Supported only for private custom domain names. + /// The identifier for the domain name resource. Required for private custom domain names. public let domainNameId: String? @inlinable diff --git a/Sources/Soto/Services/Amplify/Amplify_shapes.swift b/Sources/Soto/Services/Amplify/Amplify_shapes.swift index 3532c02c8c..463a8fe785 100644 --- a/Sources/Soto/Services/Amplify/Amplify_shapes.swift +++ b/Sources/Soto/Services/Amplify/Amplify_shapes.swift @@ -55,6 +55,7 @@ extension Amplify { public enum JobStatus: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case cancelled = "CANCELLED" case cancelling = "CANCELLING" + case created = "CREATED" case failed = "FAILED" case pending = "PENDING" case provisioning = "PROVISIONING" @@ -111,6 +112,15 @@ extension Amplify { public var description: String { return self.rawValue } } + public enum WafStatus: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case associating = "ASSOCIATING" + case associationFailed = "ASSOCIATION_FAILED" + case associationSuccess = "ASSOCIATION_SUCCESS" + case disassociating = "DISASSOCIATING" + case disassociationFailed = "DISASSOCIATION_FAILED" + public var description: String { return self.rawValue } + } + // MARK: Shapes public struct App: AWSDecodableShape { @@ -128,7 +138,7 @@ extension Amplify { public let buildSpec: String? /// The cache configuration for the Amplify app. If you don't specify the cache configuration type, Amplify uses the default AMPLIFY_MANAGED setting. public let cacheConfig: CacheConfig? - /// Creates a date and time for the Amplify app. + /// A timestamp of when Amplify created the application. public let createTime: Date /// Describes the custom HTTP headers for the Amplify app. public let customHeaders: String? @@ -162,11 +172,15 @@ extension Amplify { public let repositoryCloneMethod: RepositoryCloneMethod? /// The tag for the Amplify app. public let tags: [String: String]? - /// Updates the date and time for the Amplify app. + /// A timestamp of when Amplify updated the application. public let updateTime: Date + /// Describes the Firewall configuration for the Amplify app. Firewall support enables you to protect your hosted applications with a direct integration with WAF. + public let wafConfiguration: WafConfiguration? + /// A timestamp of when Amplify created the webhook in your Git repository. + public let webhookCreateTime: Date? @inlinable - public init(appArn: String, appId: String, autoBranchCreationConfig: AutoBranchCreationConfig? = nil, autoBranchCreationPatterns: [String]? = nil, basicAuthCredentials: String? = nil, buildSpec: String? = nil, cacheConfig: CacheConfig? = nil, createTime: Date, customHeaders: String? = nil, customRules: [CustomRule]? = nil, defaultDomain: String, description: String? = nil, enableAutoBranchCreation: Bool? = nil, enableBasicAuth: Bool, enableBranchAutoBuild: Bool, enableBranchAutoDeletion: Bool? = nil, environmentVariables: [String: String]? = nil, iamServiceRoleArn: String? = nil, name: String, platform: Platform, productionBranch: ProductionBranch? = nil, repository: String? = nil, repositoryCloneMethod: RepositoryCloneMethod? = nil, tags: [String: String]? = nil, updateTime: Date) { + public init(appArn: String, appId: String, autoBranchCreationConfig: AutoBranchCreationConfig? = nil, autoBranchCreationPatterns: [String]? = nil, basicAuthCredentials: String? = nil, buildSpec: String? = nil, cacheConfig: CacheConfig? = nil, createTime: Date, customHeaders: String? = nil, customRules: [CustomRule]? = nil, defaultDomain: String, description: String? = nil, enableAutoBranchCreation: Bool? = nil, enableBasicAuth: Bool, enableBranchAutoBuild: Bool, enableBranchAutoDeletion: Bool? = nil, environmentVariables: [String: String]? = nil, iamServiceRoleArn: String? = nil, name: String, platform: Platform, productionBranch: ProductionBranch? = nil, repository: String? = nil, repositoryCloneMethod: RepositoryCloneMethod? = nil, tags: [String: String]? = nil, updateTime: Date, wafConfiguration: WafConfiguration? = nil, webhookCreateTime: Date? = nil) { self.appArn = appArn self.appId = appId self.autoBranchCreationConfig = autoBranchCreationConfig @@ -192,6 +206,8 @@ extension Amplify { self.repositoryCloneMethod = repositoryCloneMethod self.tags = tags self.updateTime = updateTime + self.wafConfiguration = wafConfiguration + self.webhookCreateTime = webhookCreateTime } private enum CodingKeys: String, CodingKey { @@ -220,6 +236,8 @@ extension Amplify { case repositoryCloneMethod = "repositoryCloneMethod" case tags = "tags" case updateTime = "updateTime" + case wafConfiguration = "wafConfiguration" + case webhookCreateTime = "webhookCreateTime" } } @@ -379,7 +397,7 @@ extension Amplify { public let branchName: String /// The build specification (build spec) content for the branch of an Amplify app. public let buildSpec: String? - /// The creation date and time for a branch that is part of an Amplify app. + /// A timestamp of when Amplify created the branch. public let createTime: Date /// The custom domains for a branch of an Amplify app. public let customDomains: [String] @@ -417,7 +435,7 @@ extension Amplify { public let totalNumberOfJobs: String /// The content Time to Live (TTL) for the website in seconds. public let ttl: String - /// The last updated date and time for a branch that is part of an Amplify app. + /// A timestamp for the last updated time for a branch. public let updateTime: Date @inlinable @@ -1850,7 +1868,7 @@ extension Amplify { public let commitId: String /// The commit message from a third-party repository provider for the job. public let commitMessage: String - /// The commit date and time for the job. + /// The commit date and time for the job. public let commitTime: Date /// The end date and time for the job. public let endTime: Date? @@ -3196,14 +3214,36 @@ extension Amplify { } } + public struct WafConfiguration: AWSDecodableShape { + /// The reason for the current status of the Firewall configuration. + public let statusReason: String? + /// The status of the process to associate or disassociate a web ACL to an Amplify app. + public let wafStatus: WafStatus? + /// The Amazon Resource Name (ARN) for the web ACL associated with an Amplify app. + public let webAclArn: String? + + @inlinable + public init(statusReason: String? = nil, wafStatus: WafStatus? = nil, webAclArn: String? = nil) { + self.statusReason = statusReason + self.wafStatus = wafStatus + self.webAclArn = webAclArn + } + + private enum CodingKeys: String, CodingKey { + case statusReason = "statusReason" + case wafStatus = "wafStatus" + case webAclArn = "webAclArn" + } + } + public struct Webhook: AWSDecodableShape { /// The name for a branch that is part of an Amplify app. public let branchName: String - /// The create date and time for a webhook. + /// A timestamp of when Amplify created the webhook in your Git repository. public let createTime: Date /// The description for a webhook. public let description: String - /// Updates the date and time for a webhook. + /// A timestamp of when Amplify updated the webhook in your Git repository. public let updateTime: Date /// The Amazon Resource Name (ARN) for the webhook. public let webhookArn: String diff --git a/Sources/Soto/Services/AppStream/AppStream_shapes.swift b/Sources/Soto/Services/AppStream/AppStream_shapes.swift index 437fb8b0ac..cb33f432d6 100644 --- a/Sources/Soto/Services/AppStream/AppStream_shapes.swift +++ b/Sources/Soto/Services/AppStream/AppStream_shapes.swift @@ -239,6 +239,7 @@ extension AppStream { public enum PlatformType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case amazonLinux2 = "AMAZON_LINUX2" case rhel8 = "RHEL8" + case rockyLinux8 = "ROCKY_LINUX8" case windows = "WINDOWS" case windowsServer2016 = "WINDOWS_SERVER_2016" case windowsServer2019 = "WINDOWS_SERVER_2019" diff --git a/Sources/Soto/Services/AppSync/AppSync_api.swift b/Sources/Soto/Services/AppSync/AppSync_api.swift index 35c8154d0c..05a500111d 100644 --- a/Sources/Soto/Services/AppSync/AppSync_api.swift +++ b/Sources/Soto/Services/AppSync/AppSync_api.swift @@ -2671,7 +2671,7 @@ public struct AppSync: AWSService { public func updateGraphqlApi( additionalAuthenticationProviders: [AdditionalAuthenticationProvider]? = nil, apiId: String, - authenticationType: AuthenticationType? = nil, + authenticationType: AuthenticationType, enhancedMetricsConfig: EnhancedMetricsConfig? = nil, introspectionConfig: GraphQLApiIntrospectionConfig? = nil, lambdaAuthorizerConfig: LambdaAuthorizerConfig? = nil, diff --git a/Sources/Soto/Services/AppSync/AppSync_shapes.swift b/Sources/Soto/Services/AppSync/AppSync_shapes.swift index 23af3d19f1..1b081d16c9 100644 --- a/Sources/Soto/Services/AppSync/AppSync_shapes.swift +++ b/Sources/Soto/Services/AppSync/AppSync_shapes.swift @@ -5093,7 +5093,7 @@ extension AppSync { /// The API ID. public let apiId: String /// The new authentication type for the GraphqlApi object. - public let authenticationType: AuthenticationType? + public let authenticationType: AuthenticationType /// The enhancedMetricsConfig object. public let enhancedMetricsConfig: EnhancedMetricsConfig? /// Sets the value of the GraphQL API to enable (ENABLED) or disable (DISABLED) introspection. If no value is provided, the introspection configuration will be set to ENABLED by default. This field will produce an error if the operation attempts to use the introspection feature while this field is disabled. For more information about introspection, see GraphQL introspection. @@ -5120,7 +5120,7 @@ extension AppSync { public let xrayEnabled: Bool? @inlinable - public init(additionalAuthenticationProviders: [AdditionalAuthenticationProvider]? = nil, apiId: String, authenticationType: AuthenticationType? = nil, enhancedMetricsConfig: EnhancedMetricsConfig? = nil, introspectionConfig: GraphQLApiIntrospectionConfig? = nil, lambdaAuthorizerConfig: LambdaAuthorizerConfig? = nil, logConfig: LogConfig? = nil, mergedApiExecutionRoleArn: String? = nil, name: String, openIDConnectConfig: OpenIDConnectConfig? = nil, ownerContact: String? = nil, queryDepthLimit: Int? = nil, resolverCountLimit: Int? = nil, userPoolConfig: UserPoolConfig? = nil, xrayEnabled: Bool? = nil) { + public init(additionalAuthenticationProviders: [AdditionalAuthenticationProvider]? = nil, apiId: String, authenticationType: AuthenticationType, enhancedMetricsConfig: EnhancedMetricsConfig? = nil, introspectionConfig: GraphQLApiIntrospectionConfig? = nil, lambdaAuthorizerConfig: LambdaAuthorizerConfig? = nil, logConfig: LogConfig? = nil, mergedApiExecutionRoleArn: String? = nil, name: String, openIDConnectConfig: OpenIDConnectConfig? = nil, ownerContact: String? = nil, queryDepthLimit: Int? = nil, resolverCountLimit: Int? = nil, userPoolConfig: UserPoolConfig? = nil, xrayEnabled: Bool? = nil) { self.additionalAuthenticationProviders = additionalAuthenticationProviders self.apiId = apiId self.authenticationType = authenticationType @@ -5143,7 +5143,7 @@ extension AppSync { var container = encoder.container(keyedBy: CodingKeys.self) try container.encodeIfPresent(self.additionalAuthenticationProviders, forKey: .additionalAuthenticationProviders) request.encodePath(self.apiId, key: "apiId") - try container.encodeIfPresent(self.authenticationType, forKey: .authenticationType) + try container.encode(self.authenticationType, forKey: .authenticationType) try container.encodeIfPresent(self.enhancedMetricsConfig, forKey: .enhancedMetricsConfig) try container.encodeIfPresent(self.introspectionConfig, forKey: .introspectionConfig) try container.encodeIfPresent(self.lambdaAuthorizerConfig, forKey: .lambdaAuthorizerConfig) diff --git a/Sources/Soto/Services/Batch/Batch_api.swift b/Sources/Soto/Services/Batch/Batch_api.swift index b80e792f59..70ada99516 100644 --- a/Sources/Soto/Services/Batch/Batch_api.swift +++ b/Sources/Soto/Services/Batch/Batch_api.swift @@ -93,6 +93,7 @@ public struct Batch: AWSService { "ap-southeast-3": "fips.batch.ap-southeast-3.amazonaws.com", "ap-southeast-4": "fips.batch.ap-southeast-4.amazonaws.com", "ap-southeast-5": "fips.batch.ap-southeast-5.amazonaws.com", + "ap-southeast-7": "fips.batch.ap-southeast-7.amazonaws.com", "ca-central-1": "fips.batch.ca-central-1.amazonaws.com", "ca-west-1": "fips.batch.ca-west-1.amazonaws.com", "eu-central-1": "fips.batch.eu-central-1.amazonaws.com", @@ -106,6 +107,7 @@ public struct Batch: AWSService { "il-central-1": "fips.batch.il-central-1.amazonaws.com", "me-central-1": "fips.batch.me-central-1.amazonaws.com", "me-south-1": "fips.batch.me-south-1.amazonaws.com", + "mx-central-1": "fips.batch.mx-central-1.amazonaws.com", "sa-east-1": "fips.batch.sa-east-1.amazonaws.com", "us-east-1": "fips.batch.us-east-1.amazonaws.com", "us-east-2": "fips.batch.us-east-2.amazonaws.com", diff --git a/Sources/Soto/Services/Batch/Batch_shapes.swift b/Sources/Soto/Services/Batch/Batch_shapes.swift index 2e3d42d7ba..0117ab5f20 100644 --- a/Sources/Soto/Services/Batch/Batch_shapes.swift +++ b/Sources/Soto/Services/Batch/Batch_shapes.swift @@ -2378,7 +2378,7 @@ extension Batch { public struct FairsharePolicy: AWSEncodableShape & AWSDecodableShape { /// A value used to reserve some of the available maximum vCPU for fair share identifiers that aren't already used. The reserved ratio is (computeReservation/100)^ActiveFairShares where ActiveFairShares is the number of active fair share identifiers. For example, a computeReservation value of 50 indicates that Batch reserves 50% of the maximum available vCPU if there's only one fair share identifier. It reserves 25% if there are two fair share identifiers. It reserves 12.5% if there are three fair share identifiers. A computeReservation value of 25 indicates that Batch should reserve 25% of the maximum available vCPU if there's only one fair share identifier, 6.25% if there are two fair share identifiers, and 1.56% if there are three fair share identifiers. The minimum value is 0 and the maximum value is 99. public let computeReservation: Int? - /// The amount of time (in seconds) to use to calculate a fair share percentage for each fair share identifier in use. A value of zero (0) indicates that only current usage is measured. The decay allows for more recently run jobs to have more weight than jobs that ran earlier. The maximum supported value is 604800 (1 week). + /// The amount of time (in seconds) to use to calculate a fair share percentage for each fair share identifier in use. A value of zero (0) indicates the default minimum time window (600 seconds). The maximum supported value is 604800 (1 week). The decay allows for more recently run jobs to have more weight than jobs that ran earlier. Consider adjusting this number if you have jobs that (on average) run longer than ten minutes, or a large difference in job count or job run times between share identifiers, and the allocation of resources doesn’t meet your needs. public let shareDecaySeconds: Int? /// An array of SharedIdentifier objects that contain the weights for the fair share identifiers for the fair share policy. Fair share identifiers that aren't included have a default weight of 1.0. public let shareDistribution: [ShareAttributes]? @@ -2734,7 +2734,7 @@ extension Batch { public let jobQueueName: String? /// The set of actions that Batch perform on jobs that remain at the head of the job queue in the specified state longer than specified times. Batch will perform each action after maxTimeSeconds has passed. public let jobStateTimeLimitActions: [JobStateTimeLimitAction]? - /// The priority of the job queue. Job queues with a higher priority (or a higher integer value for the priority parameter) are evaluated first when associated with the same compute environment. Priority is determined in descending order. For example, a job queue with a priority value of 10 is given scheduling preference over a job queue with a priority value of 1. All of the compute environments must be either Amazon EC2 (EC2 or SPOT) or Fargate (FARGATE or FARGATE_SPOT). Amazon EC2 and Fargate compute environments can't be mixed. + /// The priority of the job queue. Job queue priority determines the order that job queues are evaluated when multiple queues dispatch jobs within a shared compute environment. A higher value for priority indicates a higher priority. Queues are evaluated in cycles, in descending order by priority. For example, a job queue with a priority value of 10 is evaluated before a queue with a priority value of 1. All of the compute environments must be either Amazon EC2 (EC2 or SPOT) or Fargate (FARGATE or FARGATE_SPOT). Amazon EC2 and Fargate compute environments can't be mixed. Job queue priority doesn't guarantee that a particular job executes before a job in a lower priority queue. Jobs added to higher priority queues during the queue evaluation cycle might not be evaluated until the next cycle. A job is dispatched from a queue only if resources are available when the queue is evaluated. If there are insufficient resources available at that time, the cycle proceeds to the next queue. This means that jobs added to higher priority queues might have to wait for jobs in multiple lower priority queues to complete before they are dispatched. You can use job dependencies to control the order for jobs from queues with different priorities. For more information, see Job Dependencies in the Batch User Guide. public let priority: Int? /// The Amazon Resource Name (ARN) of the scheduling policy. The format is aws:Partition:batch:Region:Account:scheduling-policy/Name . For example, aws:aws:batch:us-west-2:123456789012:scheduling-policy/MySchedulingPolicy. public let schedulingPolicyArn: String? @@ -2940,7 +2940,7 @@ extension Batch { public let launchTemplateId: String? /// The name of the launch template. Note: If you specify the launchTemplateName you can't specify the launchTemplateId as well. public let launchTemplateName: String? - /// The instance type or family that this this override launch template should be applied to. This parameter is required when defining a launch template override. Information included in this parameter must meet the following requirements: Must be a valid Amazon EC2 instance type or family. optimal isn't allowed. targetInstanceTypes can target only instance types and families that are included within the ComputeResource.instanceTypes set. targetInstanceTypes doesn't need to include all of the instances from the instanceType set, but at least a subset. For example, if ComputeResource.instanceTypes includes [m5, g5], targetInstanceTypes can include [m5.2xlarge] and [m5.large] but not [c5.large]. targetInstanceTypes included within the same launch template override or across launch template overrides can't overlap for the same compute environment. For example, you can't define one launch template override to target an instance family and another define an instance type within this same family. + /// The instance type or family that this override launch template should be applied to. This parameter is required when defining a launch template override. Information included in this parameter must meet the following requirements: Must be a valid Amazon EC2 instance type or family. optimal isn't allowed. targetInstanceTypes can target only instance types and families that are included within the ComputeResource.instanceTypes set. targetInstanceTypes doesn't need to include all of the instances from the instanceType set, but at least a subset. For example, if ComputeResource.instanceTypes includes [m5, g5], targetInstanceTypes can include [m5.2xlarge] and [m5.large] but not [c5.large]. targetInstanceTypes included within the same launch template override or across launch template overrides can't overlap for the same compute environment. For example, you can't define one launch template override to target an instance family and another define an instance type within this same family. public let targetInstanceTypes: [String]? /// The version number of the launch template, $Default, or $Latest. If the value is $Default, the default version of the launch template is used. If the value is $Latest, the latest version of the launch template is used. If the AMI ID that's used in a compute environment is from the launch template, the AMI isn't changed when the compute environment is updated. It's only changed if the updateToLatestImageVersion parameter for the compute environment is set to true. During an infrastructure update, if either $Default or $Latest is specified, Batch re-evaluates the launch template version, and it might use a different version of the launch template. This is the case even if the launch template isn't specified in the update. When updating a compute environment, changing the launch template requires an infrastructure update of the compute environment. For more information, see Updating compute environments in the Batch User Guide. Default: $Default Latest: $Latest public let version: String? @@ -3886,7 +3886,7 @@ extension Batch { public let command: [String]? /// A list of containers that this container depends on. public let dependsOn: [TaskContainerDependency]? - /// The environment variables to pass to a container. This parameter maps to Env inthe Create a container section of the Docker Remote API and the --env parameter to docker run. We don't recommend using plaintext environment variables for sensitive information, such as credential data. Environment variables cannot start with AWS_BATCH. This naming convention is reserved for variables that Batch sets. + /// The environment variables to pass to a container. This parameter maps to Env in the Create a container section of the Docker Remote API and the --env parameter to docker run. We don't recommend using plaintext environment variables for sensitive information, such as credential data. Environment variables cannot start with AWS_BATCH. This naming convention is reserved for variables that Batch sets. public let environment: [KeyValuePair]? /// If the essential parameter of a container is marked as true, and that container fails or stops for any reason, all other containers that are part of the task are stopped. If the essential parameter of a container is marked as false, its failure doesn't affect the rest of the containers in a task. If this parameter is omitted, a container is assumed to be essential. All jobs must have at least one essential container. If you have an application that's composed of multiple containers, group containers that are used for a common purpose into components, and separate the different components into multiple task definitions. For more information, see Application Architecture in the Amazon Elastic Container Service Developer Guide. public let essential: Bool? diff --git a/Sources/Soto/Services/Bedrock/Bedrock_shapes.swift b/Sources/Soto/Services/Bedrock/Bedrock_shapes.swift index 93afed3006..535f8fac3e 100644 --- a/Sources/Soto/Services/Bedrock/Bedrock_shapes.swift +++ b/Sources/Soto/Services/Bedrock/Bedrock_shapes.swift @@ -250,6 +250,12 @@ extension Bedrock { public var description: String { return self.rawValue } } + public enum PerformanceConfigLatency: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case optimized = "optimized" + case standard = "standard" + public var description: String { return self.rawValue } + } + public enum PromptRouterStatus: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case available = "AVAILABLE" public var description: String { return self.rawValue } @@ -1976,11 +1982,14 @@ extension Bedrock { public let inferenceParams: String? /// The ARN of the Amazon Bedrock model or inference profile specified. public let modelIdentifier: String + /// Specifies performance settings for the model or inference profile. + public let performanceConfig: PerformanceConfiguration? @inlinable - public init(inferenceParams: String? = nil, modelIdentifier: String) { + public init(inferenceParams: String? = nil, modelIdentifier: String, performanceConfig: PerformanceConfiguration? = nil) { self.inferenceParams = inferenceParams self.modelIdentifier = modelIdentifier + self.performanceConfig = performanceConfig } public func validate(name: String) throws { @@ -1994,6 +2003,7 @@ extension Bedrock { private enum CodingKeys: String, CodingKey { case inferenceParams = "inferenceParams" case modelIdentifier = "modelIdentifier" + case performanceConfig = "performanceConfig" } } @@ -5738,6 +5748,20 @@ extension Bedrock { } } + public struct PerformanceConfiguration: AWSEncodableShape & AWSDecodableShape { + /// Specifies whether to use the latency-optimized or standard version of a model or inference profile. + public let latency: PerformanceConfigLatency? + + @inlinable + public init(latency: PerformanceConfigLatency? = nil) { + self.latency = latency + } + + private enum CodingKeys: String, CodingKey { + case latency = "latency" + } + } + public struct PromptRouterSummary: AWSDecodableShape { /// When the router was created. @OptionalCustomCoding diff --git a/Sources/Soto/Services/BedrockAgent/BedrockAgent_shapes.swift b/Sources/Soto/Services/BedrockAgent/BedrockAgent_shapes.swift index 80b33a8871..f30d2a3cba 100644 --- a/Sources/Soto/Services/BedrockAgent/BedrockAgent_shapes.swift +++ b/Sources/Soto/Services/BedrockAgent/BedrockAgent_shapes.swift @@ -359,6 +359,7 @@ extension BedrockAgent { public enum PromptType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case knowledgeBaseResponseGeneration = "KNOWLEDGE_BASE_RESPONSE_GENERATION" + case memorySummarization = "MEMORY_SUMMARIZATION" case orchestration = "ORCHESTRATION" case postProcessing = "POST_PROCESSING" case preProcessing = "PRE_PROCESSING" @@ -414,6 +415,7 @@ extension BedrockAgent { public enum SharePointAuthType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case oauth2ClientCredentials = "OAUTH2_CLIENT_CREDENTIALS" + case oauth2SharepointAppOnlyClientCredentials = "OAUTH2_SHAREPOINT_APP_ONLY_CLIENT_CREDENTIALS" public var description: String { return self.rawValue } } @@ -6963,24 +6965,29 @@ extension BedrockAgent { public struct MemoryConfiguration: AWSEncodableShape & AWSDecodableShape { /// The type of memory that is stored. public let enabledMemoryTypes: [MemoryType] + /// Contains the configuration for SESSION_SUMMARY memory type enabled for the agent. + public let sessionSummaryConfiguration: SessionSummaryConfiguration? /// The number of days the agent is configured to retain the conversational context. public let storageDays: Int? @inlinable - public init(enabledMemoryTypes: [MemoryType], storageDays: Int? = nil) { + public init(enabledMemoryTypes: [MemoryType], sessionSummaryConfiguration: SessionSummaryConfiguration? = nil, storageDays: Int? = nil) { self.enabledMemoryTypes = enabledMemoryTypes + self.sessionSummaryConfiguration = sessionSummaryConfiguration self.storageDays = storageDays } public func validate(name: String) throws { try self.validate(self.enabledMemoryTypes, name: "enabledMemoryTypes", parent: name, max: 1) try self.validate(self.enabledMemoryTypes, name: "enabledMemoryTypes", parent: name, min: 1) - try self.validate(self.storageDays, name: "storageDays", parent: name, max: 30) + try self.sessionSummaryConfiguration?.validate(name: "\(name).sessionSummaryConfiguration") + try self.validate(self.storageDays, name: "storageDays", parent: name, max: 365) try self.validate(self.storageDays, name: "storageDays", parent: name, min: 0) } private enum CodingKeys: String, CodingKey { case enabledMemoryTypes = "enabledMemoryTypes" + case sessionSummaryConfiguration = "sessionSummaryConfiguration" case storageDays = "storageDays" } } @@ -8746,6 +8753,24 @@ extension BedrockAgent { } } + public struct SessionSummaryConfiguration: AWSEncodableShape & AWSDecodableShape { + /// Maximum number of recent session summaries to include in the agent's prompt context. + public let maxRecentSessions: Int? + + @inlinable + public init(maxRecentSessions: Int? = nil) { + self.maxRecentSessions = maxRecentSessions + } + + public func validate(name: String) throws { + try self.validate(self.maxRecentSessions, name: "maxRecentSessions", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case maxRecentSessions = "maxRecentSessions" + } + } + public struct SharePointCrawlerConfiguration: AWSEncodableShape & AWSDecodableShape { /// The configuration of filtering the SharePoint content. For example, configuring regular expression patterns to include or exclude certain content. public let filterConfiguration: CrawlFilterConfiguration? @@ -10460,13 +10485,16 @@ extension BedrockAgent { public let inclusionFilters: [String]? /// The scope of what is crawled for your URLs. You can choose to crawl only web pages that belong to the same host or primary domain. For example, only web pages that contain the seed URL "https://docs.aws.amazon.com/bedrock/latest/userguide/" and no other domains. You can choose to include sub domains in addition to the host or primary domain. For example, web pages that contain "aws.amazon.com" can also include sub domain "docs.aws.amazon.com". public let scope: WebScopeType? + /// A string used for identifying the crawler or a bot when it accesses a web server. By default, this is set to bedrockbot_UUID for your crawler. You can optionally append a custom string to bedrockbot_UUID to allowlist a specific user agent permitted to access your source URLs. + public let userAgent: String? @inlinable - public init(crawlerLimits: WebCrawlerLimits? = nil, exclusionFilters: [String]? = nil, inclusionFilters: [String]? = nil, scope: WebScopeType? = nil) { + public init(crawlerLimits: WebCrawlerLimits? = nil, exclusionFilters: [String]? = nil, inclusionFilters: [String]? = nil, scope: WebScopeType? = nil, userAgent: String? = nil) { self.crawlerLimits = crawlerLimits self.exclusionFilters = exclusionFilters self.inclusionFilters = inclusionFilters self.scope = scope + self.userAgent = userAgent } public func validate(name: String) throws { @@ -10482,6 +10510,8 @@ extension BedrockAgent { } try self.validate(self.inclusionFilters, name: "inclusionFilters", parent: name, max: 25) try self.validate(self.inclusionFilters, name: "inclusionFilters", parent: name, min: 1) + try self.validate(self.userAgent, name: "userAgent", parent: name, max: 40) + try self.validate(self.userAgent, name: "userAgent", parent: name, min: 15) } private enum CodingKeys: String, CodingKey { @@ -10489,19 +10519,24 @@ extension BedrockAgent { case exclusionFilters = "exclusionFilters" case inclusionFilters = "inclusionFilters" case scope = "scope" + case userAgent = "userAgent" } } public struct WebCrawlerLimits: AWSEncodableShape & AWSDecodableShape { + /// The max number of web pages crawled from your source URLs, up to 25,000 pages. If the web pages exceed this limit, the data source sync will fail and no web pages will be ingested. + public let maxPages: Int? /// The max rate at which pages are crawled, up to 300 per minute per host. public let rateLimit: Int? @inlinable - public init(rateLimit: Int? = nil) { + public init(maxPages: Int? = nil, rateLimit: Int? = nil) { + self.maxPages = maxPages self.rateLimit = rateLimit } private enum CodingKeys: String, CodingKey { + case maxPages = "maxPages" case rateLimit = "rateLimit" } } diff --git a/Sources/Soto/Services/BedrockAgentRuntime/BedrockAgentRuntime_api.swift b/Sources/Soto/Services/BedrockAgentRuntime/BedrockAgentRuntime_api.swift index 2b30053b2f..0690149736 100644 --- a/Sources/Soto/Services/BedrockAgentRuntime/BedrockAgentRuntime_api.swift +++ b/Sources/Soto/Services/BedrockAgentRuntime/BedrockAgentRuntime_api.swift @@ -99,18 +99,21 @@ public struct BedrockAgentRuntime: AWSService { /// - agentAliasId: The unique identifier of an alias of an agent. /// - agentId: The unique identifier of the agent to which the alias belongs. /// - memoryId: The unique identifier of the memory. + /// - sessionId: The unique session identifier of the memory. /// - logger: Logger use during operation @inlinable public func deleteAgentMemory( agentAliasId: String, agentId: String, memoryId: String? = nil, + sessionId: String? = nil, logger: Logger = AWSClient.loggingDisabled ) async throws -> DeleteAgentMemoryResponse { let input = DeleteAgentMemoryRequest( agentAliasId: agentAliasId, agentId: agentId, - memoryId: memoryId + memoryId: memoryId, + sessionId: sessionId ) return try await self.deleteAgentMemory(input, logger: logger) } @@ -191,7 +194,7 @@ public struct BedrockAgentRuntime: AWSService { return try await self.getAgentMemory(input, logger: logger) } - /// The CLI doesn't support streaming operations in Amazon Bedrock, including InvokeAgent. Sends a prompt for the agent to process and respond to. Note the following fields for the request: To continue the same conversation with an agent, use the same sessionId value in the request. To activate trace enablement, turn enableTrace to true. Trace enablement helps you follow the agent's reasoning process that led it to the information it processed, the actions it took, and the final result it yielded. For more information, see Trace enablement. End a conversation by setting endSession to true. In the sessionState object, you can include attributes for the session or prompt or, if you configured an action group to return control, results from invocation of the action group. The response is returned in the bytes field of the chunk object. The attribution object contains citations for parts of the response. If you set enableTrace to true in the request, you can trace the agent's steps and reasoning process that led it to the response. If the action predicted was configured to return control, the response returns parameters for the action, elicited from the user, in the returnControl field. Errors are also surfaced in the response. + /// Sends a prompt for the agent to process and respond to. Note the following fields for the request: To continue the same conversation with an agent, use the same sessionId value in the request. To activate trace enablement, turn enableTrace to true. Trace enablement helps you follow the agent's reasoning process that led it to the information it processed, the actions it took, and the final result it yielded. For more information, see Trace enablement. To stream agent responses, make sure that only orchestration prompt is enabled. Agent streaming is not supported for the following steps: Pre-processing Post-processing Agent with 1 Knowledge base and User Input not enabled End a conversation by setting endSession to true. In the sessionState object, you can include attributes for the session or prompt or, if you configured an action group to return control, results from invocation of the action group. The response contains both chunk and trace attributes. The final response is returned in the bytes field of the chunk object. The InvokeAgent returns one chunk for the entire interaction. The attribution object contains citations for parts of the response. If you set enableTrace to true in the request, you can trace the agent's steps and reasoning process that led it to the response. If the action predicted was configured to return control, the response returns parameters for the action, elicited from the user, in the returnControl field. Errors are also surfaced in the response. @Sendable @inlinable public func invokeAgent(_ input: InvokeAgentRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> InvokeAgentResponse { @@ -204,11 +207,12 @@ public struct BedrockAgentRuntime: AWSService { logger: logger ) } - /// The CLI doesn't support streaming operations in Amazon Bedrock, including InvokeAgent. Sends a prompt for the agent to process and respond to. Note the following fields for the request: To continue the same conversation with an agent, use the same sessionId value in the request. To activate trace enablement, turn enableTrace to true. Trace enablement helps you follow the agent's reasoning process that led it to the information it processed, the actions it took, and the final result it yielded. For more information, see Trace enablement. End a conversation by setting endSession to true. In the sessionState object, you can include attributes for the session or prompt or, if you configured an action group to return control, results from invocation of the action group. The response is returned in the bytes field of the chunk object. The attribution object contains citations for parts of the response. If you set enableTrace to true in the request, you can trace the agent's steps and reasoning process that led it to the response. If the action predicted was configured to return control, the response returns parameters for the action, elicited from the user, in the returnControl field. Errors are also surfaced in the response. + /// Sends a prompt for the agent to process and respond to. Note the following fields for the request: To continue the same conversation with an agent, use the same sessionId value in the request. To activate trace enablement, turn enableTrace to true. Trace enablement helps you follow the agent's reasoning process that led it to the information it processed, the actions it took, and the final result it yielded. For more information, see Trace enablement. To stream agent responses, make sure that only orchestration prompt is enabled. Agent streaming is not supported for the following steps: Pre-processing Post-processing Agent with 1 Knowledge base and User Input not enabled End a conversation by setting endSession to true. In the sessionState object, you can include attributes for the session or prompt or, if you configured an action group to return control, results from invocation of the action group. The response contains both chunk and trace attributes. The final response is returned in the bytes field of the chunk object. The InvokeAgent returns one chunk for the entire interaction. The attribution object contains citations for parts of the response. If you set enableTrace to true in the request, you can trace the agent's steps and reasoning process that led it to the response. If the action predicted was configured to return control, the response returns parameters for the action, elicited from the user, in the returnControl field. Errors are also surfaced in the response. /// /// Parameters: /// - agentAliasId: The alias of the agent to use. /// - agentId: The unique identifier of the agent to use. + /// - bedrockModelConfigurations: Model performance settings for the request. /// - enableTrace: Specifies whether to turn on the trace or not to track the agent's reasoning process. For more information, see Trace enablement. /// - endSession: Specifies whether to end the session with the agent or not. /// - inputText: The prompt text to send the agent. If you include returnControlInvocationResults in the sessionState field, the inputText field will be ignored. @@ -216,12 +220,13 @@ public struct BedrockAgentRuntime: AWSService { /// - sessionId: The unique identifier of the session. Use the same value across requests to continue the same conversation. /// - sessionState: Contains parameters that specify various attributes of the session. For more information, see Control session context. If you include returnControlInvocationResults in the sessionState field, the inputText field will be ignored. /// - sourceArn: The ARN of the resource making the request. - /// - streamingConfigurations: Specifies the configurations for streaming. + /// - streamingConfigurations: Specifies the configurations for streaming. To use agent streaming, you need permissions to perform the bedrock:InvokeModelWithResponseStream action. /// - logger: Logger use during operation @inlinable public func invokeAgent( agentAliasId: String, agentId: String, + bedrockModelConfigurations: BedrockModelConfigurations? = nil, enableTrace: Bool? = nil, endSession: Bool? = nil, inputText: String? = nil, @@ -235,6 +240,7 @@ public struct BedrockAgentRuntime: AWSService { let input = InvokeAgentRequest( agentAliasId: agentAliasId, agentId: agentId, + bedrockModelConfigurations: bedrockModelConfigurations, enableTrace: enableTrace, endSession: endSession, inputText: inputText, @@ -264,28 +270,34 @@ public struct BedrockAgentRuntime: AWSService { /// /// Parameters: /// - enableTrace: Specifies whether to return the trace for the flow or not. Traces track inputs and outputs for nodes in the flow. For more information, see Track each step in your prompt flow by viewing its trace in Amazon Bedrock. + /// - executionId: The unique identifier for the current flow execution. If you don't provide a value, Amazon Bedrock creates the identifier for you. /// - flowAliasIdentifier: The unique identifier of the flow alias. /// - flowIdentifier: The unique identifier of the flow. /// - inputs: A list of objects, each containing information about an input into the flow. + /// - modelPerformanceConfiguration: Model performance settings for the request. /// - logger: Logger use during operation @inlinable public func invokeFlow( enableTrace: Bool? = nil, + executionId: String? = nil, flowAliasIdentifier: String, flowIdentifier: String, inputs: [FlowInput], + modelPerformanceConfiguration: ModelPerformanceConfiguration? = nil, logger: Logger = AWSClient.loggingDisabled ) async throws -> InvokeFlowResponse { let input = InvokeFlowRequest( enableTrace: enableTrace, + executionId: executionId, flowAliasIdentifier: flowAliasIdentifier, flowIdentifier: flowIdentifier, - inputs: inputs + inputs: inputs, + modelPerformanceConfiguration: modelPerformanceConfiguration ) return try await self.invokeFlow(input, logger: logger) } - /// Invokes an inline Amazon Bedrock agent using the configurations you provide with the request. Specify the following fields for security purposes. (Optional) customerEncryptionKeyArn – The Amazon Resource Name (ARN) of a KMS key to encrypt the creation of the agent. (Optional) idleSessionTTLinSeconds – Specify the number of seconds for which the agent should maintain session information. After this time expires, the subsequent InvokeInlineAgent request begins a new session. To override the default prompt behavior for agent orchestration and to use advanced prompts, include a promptOverrideConfiguration object. For more information, see Advanced prompts. The agent instructions will not be honored if your agent has only one knowledge base, uses default prompts, has no action group, and user input is disabled. The CLI doesn't support streaming operations in Amazon Bedrock, including InvokeInlineAgent. + /// Invokes an inline Amazon Bedrock agent using the configurations you provide with the request. Specify the following fields for security purposes. (Optional) customerEncryptionKeyArn – The Amazon Resource Name (ARN) of a KMS key to encrypt the creation of the agent. (Optional) idleSessionTTLinSeconds – Specify the number of seconds for which the agent should maintain session information. After this time expires, the subsequent InvokeInlineAgent request begins a new session. To override the default prompt behavior for agent orchestration and to use advanced prompts, include a promptOverrideConfiguration object. For more information, see Advanced prompts. The agent instructions will not be honored if your agent has only one knowledge base, uses default prompts, has no action group, and user input is disabled. @Sendable @inlinable public func invokeInlineAgent(_ input: InvokeInlineAgentRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> InvokeInlineAgentResponse { @@ -298,10 +310,11 @@ public struct BedrockAgentRuntime: AWSService { logger: logger ) } - /// Invokes an inline Amazon Bedrock agent using the configurations you provide with the request. Specify the following fields for security purposes. (Optional) customerEncryptionKeyArn – The Amazon Resource Name (ARN) of a KMS key to encrypt the creation of the agent. (Optional) idleSessionTTLinSeconds – Specify the number of seconds for which the agent should maintain session information. After this time expires, the subsequent InvokeInlineAgent request begins a new session. To override the default prompt behavior for agent orchestration and to use advanced prompts, include a promptOverrideConfiguration object. For more information, see Advanced prompts. The agent instructions will not be honored if your agent has only one knowledge base, uses default prompts, has no action group, and user input is disabled. The CLI doesn't support streaming operations in Amazon Bedrock, including InvokeInlineAgent. + /// Invokes an inline Amazon Bedrock agent using the configurations you provide with the request. Specify the following fields for security purposes. (Optional) customerEncryptionKeyArn – The Amazon Resource Name (ARN) of a KMS key to encrypt the creation of the agent. (Optional) idleSessionTTLinSeconds – Specify the number of seconds for which the agent should maintain session information. After this time expires, the subsequent InvokeInlineAgent request begins a new session. To override the default prompt behavior for agent orchestration and to use advanced prompts, include a promptOverrideConfiguration object. For more information, see Advanced prompts. The agent instructions will not be honored if your agent has only one knowledge base, uses default prompts, has no action group, and user input is disabled. /// /// Parameters: /// - actionGroups: A list of action groups with each action group defining the action the inline agent needs to carry out. + /// - bedrockModelConfigurations: Model settings for the request. /// - customerEncryptionKeyArn: The Amazon Resource Name (ARN) of the Amazon Web Services KMS key to use to encrypt your inline agent. /// - enableTrace: Specifies whether to turn on the trace or not to track the agent's reasoning process. For more information, see Using trace. /// - endSession: Specifies whether to end the session with the inline agent or not. @@ -314,10 +327,12 @@ public struct BedrockAgentRuntime: AWSService { /// - knowledgeBases: Contains information of the knowledge bases to associate with. /// - promptOverrideConfiguration: Configurations for advanced prompts used to override the default prompts to enhance the accuracy of the inline agent. /// - sessionId: The unique identifier of the session. Use the same value across requests to continue the same conversation. + /// - streamingConfigurations: Specifies the configurations for streaming. To use agent streaming, you need permissions to perform the bedrock:InvokeModelWithResponseStream action. /// - logger: Logger use during operation @inlinable public func invokeInlineAgent( actionGroups: [AgentActionGroup]? = nil, + bedrockModelConfigurations: InlineBedrockModelConfigurations? = nil, customerEncryptionKeyArn: String? = nil, enableTrace: Bool? = nil, endSession: Bool? = nil, @@ -330,10 +345,12 @@ public struct BedrockAgentRuntime: AWSService { knowledgeBases: [KnowledgeBase]? = nil, promptOverrideConfiguration: PromptOverrideConfiguration? = nil, sessionId: String, + streamingConfigurations: StreamingConfigurations? = nil, logger: Logger = AWSClient.loggingDisabled ) async throws -> InvokeInlineAgentResponse { let input = InvokeInlineAgentRequest( actionGroups: actionGroups, + bedrockModelConfigurations: bedrockModelConfigurations, customerEncryptionKeyArn: customerEncryptionKeyArn, enableTrace: enableTrace, endSession: endSession, @@ -345,7 +362,8 @@ public struct BedrockAgentRuntime: AWSService { instruction: instruction, knowledgeBases: knowledgeBases, promptOverrideConfiguration: promptOverrideConfiguration, - sessionId: sessionId + sessionId: sessionId, + streamingConfigurations: streamingConfigurations ) return try await self.invokeInlineAgent(input, logger: logger) } diff --git a/Sources/Soto/Services/BedrockAgentRuntime/BedrockAgentRuntime_shapes.swift b/Sources/Soto/Services/BedrockAgentRuntime/BedrockAgentRuntime_shapes.swift index 89753a0e36..57c9fdb791 100644 --- a/Sources/Soto/Services/BedrockAgentRuntime/BedrockAgentRuntime_shapes.swift +++ b/Sources/Soto/Services/BedrockAgentRuntime/BedrockAgentRuntime_shapes.swift @@ -95,6 +95,7 @@ extension BedrockAgentRuntime { } public enum FlowCompletionReason: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case inputRequired = "INPUT_REQUIRED" case success = "SUCCESS" public var description: String { return self.rawValue } } @@ -245,6 +246,12 @@ extension BedrockAgentRuntime { public var description: String { return self.rawValue } } + public enum PerformanceConfigLatency: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case optimized = "optimized" + case standard = "standard" + public var description: String { return self.rawValue } + } + public enum PromptState: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case disabled = "DISABLED" case enabled = "ENABLED" @@ -451,6 +458,8 @@ extension BedrockAgentRuntime { case dependencyFailedException(DependencyFailedException) /// Contains information about why the flow completed. case flowCompletionEvent(FlowCompletionEvent) + /// The event stream containing the multi-turn input request information from the flow. + case flowMultiTurnInputRequestEvent(FlowMultiTurnInputRequestEvent) /// Contains information about an output from flow invocation. case flowOutputEvent(FlowOutputEvent) /// Contains information about a trace, which tracks an input or output for a node in the flow. @@ -491,6 +500,9 @@ extension BedrockAgentRuntime { case .flowCompletionEvent: let value = try container.decode(FlowCompletionEvent.self, forKey: .flowCompletionEvent) self = .flowCompletionEvent(value) + case .flowMultiTurnInputRequestEvent: + let value = try container.decode(FlowMultiTurnInputRequestEvent.self, forKey: .flowMultiTurnInputRequestEvent) + self = .flowMultiTurnInputRequestEvent(value) case .flowOutputEvent: let value = try container.decode(FlowOutputEvent.self, forKey: .flowOutputEvent) self = .flowOutputEvent(value) @@ -521,6 +533,7 @@ extension BedrockAgentRuntime { case conflictException = "conflictException" case dependencyFailedException = "dependencyFailedException" case flowCompletionEvent = "flowCompletionEvent" + case flowMultiTurnInputRequestEvent = "flowMultiTurnInputRequestEvent" case flowOutputEvent = "flowOutputEvent" case flowTraceEvent = "flowTraceEvent" case internalServerException = "internalServerException" @@ -963,6 +976,8 @@ extension BedrockAgentRuntime { case files(FilePart) /// An internal server error occurred. Retry your request. case internalServerException(InternalServerException) + /// The model specified in the request is not ready to serve Inference requests. The AWS SDK will automatically retry the operation up to 5 times. For information about configuring automatic retries, see Retry behavior in the AWS SDKs and Tools reference guide. + case modelNotReadyException(ModelNotReadyException) /// The specified resource Amazon Resource Name (ARN) was not found. Check the Amazon Resource Name (ARN) and try your request again. case resourceNotFoundException(ResourceNotFoundException) /// Contains the parameters and information that the agent elicited from the customer to carry out an action. This information is returned to the system and can be used in your own setup for fulfilling the action. @@ -1007,6 +1022,9 @@ extension BedrockAgentRuntime { case .internalServerException: let value = try container.decode(InternalServerException.self, forKey: .internalServerException) self = .internalServerException(value) + case .modelNotReadyException: + let value = try container.decode(ModelNotReadyException.self, forKey: .modelNotReadyException) + self = .modelNotReadyException(value) case .resourceNotFoundException: let value = try container.decode(ResourceNotFoundException.self, forKey: .resourceNotFoundException) self = .resourceNotFoundException(value) @@ -1036,6 +1054,7 @@ extension BedrockAgentRuntime { case dependencyFailedException = "dependencyFailedException" case files = "files" case internalServerException = "internalServerException" + case modelNotReadyException = "modelNotReadyException" case resourceNotFoundException = "resourceNotFoundException" case returnControl = "returnControl" case serviceQuotaExceededException = "serviceQuotaExceededException" @@ -1721,6 +1740,20 @@ extension BedrockAgentRuntime { } } + public struct BedrockModelConfigurations: AWSEncodableShape { + /// The performance configuration for the model. + public let performanceConfig: PerformanceConfiguration? + + @inlinable + public init(performanceConfig: PerformanceConfiguration? = nil) { + self.performanceConfig = performanceConfig + } + + private enum CodingKeys: String, CodingKey { + case performanceConfig = "performanceConfig" + } + } + public struct BedrockRerankingConfiguration: AWSEncodableShape { /// Contains configurations for a reranker model. public let modelConfiguration: BedrockRerankingModelConfiguration @@ -1980,12 +2013,15 @@ extension BedrockAgentRuntime { public let agentId: String /// The unique identifier of the memory. public let memoryId: String? + /// The unique session identifier of the memory. + public let sessionId: String? @inlinable - public init(agentAliasId: String, agentId: String, memoryId: String? = nil) { + public init(agentAliasId: String, agentId: String, memoryId: String? = nil, sessionId: String? = nil) { self.agentAliasId = agentAliasId self.agentId = agentId self.memoryId = memoryId + self.sessionId = sessionId } public func encode(to encoder: Encoder) throws { @@ -1994,6 +2030,7 @@ extension BedrockAgentRuntime { request.encodePath(self.agentAliasId, key: "agentAliasId") request.encodePath(self.agentId, key: "agentId") request.encodeQuery(self.memoryId, key: "memoryId") + request.encodeQuery(self.sessionId, key: "sessionId") } public func validate(name: String) throws { @@ -2004,6 +2041,9 @@ extension BedrockAgentRuntime { try self.validate(self.memoryId, name: "memoryId", parent: name, max: 100) try self.validate(self.memoryId, name: "memoryId", parent: name, min: 2) try self.validate(self.memoryId, name: "memoryId", parent: name, pattern: "^[0-9a-zA-Z._:-]+$") + try self.validate(self.sessionId, name: "sessionId", parent: name, max: 100) + try self.validate(self.sessionId, name: "sessionId", parent: name, min: 2) + try self.validate(self.sessionId, name: "sessionId", parent: name, pattern: "^[0-9a-zA-Z._:-]+$") } private enum CodingKeys: CodingKey {} @@ -2064,14 +2104,17 @@ extension BedrockAgentRuntime { public let guardrailConfiguration: GuardrailConfiguration? /// Configuration settings for inference when using RetrieveAndGenerate to generate responses while using an external source. public let inferenceConfig: InferenceConfig? + /// The latency configuration for the model. + public let performanceConfig: PerformanceConfiguration? /// Contain the textPromptTemplate string for the external source wrapper object. public let promptTemplate: PromptTemplate? @inlinable - public init(additionalModelRequestFields: [String: String]? = nil, guardrailConfiguration: GuardrailConfiguration? = nil, inferenceConfig: InferenceConfig? = nil, promptTemplate: PromptTemplate? = nil) { + public init(additionalModelRequestFields: [String: String]? = nil, guardrailConfiguration: GuardrailConfiguration? = nil, inferenceConfig: InferenceConfig? = nil, performanceConfig: PerformanceConfiguration? = nil, promptTemplate: PromptTemplate? = nil) { self.additionalModelRequestFields = additionalModelRequestFields self.guardrailConfiguration = guardrailConfiguration self.inferenceConfig = inferenceConfig + self.performanceConfig = performanceConfig self.promptTemplate = promptTemplate } @@ -2088,6 +2131,7 @@ extension BedrockAgentRuntime { case additionalModelRequestFields = "additionalModelRequestFields" case guardrailConfiguration = "guardrailConfiguration" case inferenceConfig = "inferenceConfig" + case performanceConfig = "performanceConfig" case promptTemplate = "promptTemplate" } } @@ -2253,30 +2297,57 @@ extension BedrockAgentRuntime { public struct FlowInput: AWSEncodableShape { /// Contains information about an input into the prompt flow. public let content: FlowInputContent + /// The name of the input from the flow input node. + public let nodeInputName: String? /// The name of the flow input node that begins the prompt flow. public let nodeName: String /// The name of the output from the flow input node that begins the prompt flow. - public let nodeOutputName: String + public let nodeOutputName: String? @inlinable - public init(content: FlowInputContent, nodeName: String, nodeOutputName: String) { + public init(content: FlowInputContent, nodeInputName: String? = nil, nodeName: String, nodeOutputName: String? = nil) { self.content = content + self.nodeInputName = nodeInputName self.nodeName = nodeName self.nodeOutputName = nodeOutputName } public func validate(name: String) throws { + try self.validate(self.nodeInputName, name: "nodeInputName", parent: name, pattern: "^[a-zA-Z]([_]?[0-9a-zA-Z]){0,99}$") try self.validate(self.nodeName, name: "nodeName", parent: name, pattern: "^[a-zA-Z]([_]?[0-9a-zA-Z]){0,99}$") try self.validate(self.nodeOutputName, name: "nodeOutputName", parent: name, pattern: "^[a-zA-Z]([_]?[0-9a-zA-Z]){0,99}$") } private enum CodingKeys: String, CodingKey { case content = "content" + case nodeInputName = "nodeInputName" case nodeName = "nodeName" case nodeOutputName = "nodeOutputName" } } + public struct FlowMultiTurnInputRequestEvent: AWSDecodableShape { + /// The content payload containing the input request details for the multi-turn interaction. + public let content: FlowMultiTurnInputContent + /// The name of the node in the flow that is requesting the input. + public let nodeName: String + /// The type of the node in the flow that is requesting the input. + public let nodeType: NodeType + + @inlinable + public init(content: FlowMultiTurnInputContent, nodeName: String, nodeType: NodeType) { + self.content = content + self.nodeName = nodeName + self.nodeType = nodeType + } + + private enum CodingKeys: String, CodingKey { + case content = "content" + case nodeName = "nodeName" + case nodeType = "nodeType" + } + } + public struct FlowOutputEvent: AWSDecodableShape { /// The content in the output. public let content: FlowOutputContent @@ -2633,14 +2704,17 @@ extension BedrockAgentRuntime { public let guardrailConfiguration: GuardrailConfiguration? /// Configuration settings for inference when using RetrieveAndGenerate to generate responses while using a knowledge base as a source. public let inferenceConfig: InferenceConfig? + /// The latency configuration for the model. + public let performanceConfig: PerformanceConfiguration? /// Contains the template for the prompt that's sent to the model for response generation. Generation prompts must include the $search_results$ variable. For more information, see Use placeholder variables in the user guide. public let promptTemplate: PromptTemplate? @inlinable - public init(additionalModelRequestFields: [String: String]? = nil, guardrailConfiguration: GuardrailConfiguration? = nil, inferenceConfig: InferenceConfig? = nil, promptTemplate: PromptTemplate? = nil) { + public init(additionalModelRequestFields: [String: String]? = nil, guardrailConfiguration: GuardrailConfiguration? = nil, inferenceConfig: InferenceConfig? = nil, performanceConfig: PerformanceConfiguration? = nil, promptTemplate: PromptTemplate? = nil) { self.additionalModelRequestFields = additionalModelRequestFields self.guardrailConfiguration = guardrailConfiguration self.inferenceConfig = inferenceConfig + self.performanceConfig = performanceConfig self.promptTemplate = promptTemplate } @@ -2657,6 +2731,7 @@ extension BedrockAgentRuntime { case additionalModelRequestFields = "additionalModelRequestFields" case guardrailConfiguration = "guardrailConfiguration" case inferenceConfig = "inferenceConfig" + case performanceConfig = "performanceConfig" case promptTemplate = "promptTemplate" } } @@ -3192,6 +3267,20 @@ extension BedrockAgentRuntime { } } + public struct InlineBedrockModelConfigurations: AWSEncodableShape { + /// The latency configuration for the model. + public let performanceConfig: PerformanceConfiguration? + + @inlinable + public init(performanceConfig: PerformanceConfiguration? = nil) { + self.performanceConfig = performanceConfig + } + + private enum CodingKeys: String, CodingKey { + case performanceConfig = "performanceConfig" + } + } + public struct InlineSessionState: AWSEncodableShape { /// Contains information about the files used by code interpreter. public let files: [InputFile]? @@ -3308,6 +3397,8 @@ extension BedrockAgentRuntime { public let agentAliasId: String /// The unique identifier of the agent to use. public let agentId: String + /// Model performance settings for the request. + public let bedrockModelConfigurations: BedrockModelConfigurations? /// Specifies whether to turn on the trace or not to track the agent's reasoning process. For more information, see Trace enablement. public let enableTrace: Bool? /// Specifies whether to end the session with the agent or not. @@ -3322,13 +3413,14 @@ extension BedrockAgentRuntime { public let sessionState: SessionState? /// The ARN of the resource making the request. public let sourceArn: String? - /// Specifies the configurations for streaming. + /// Specifies the configurations for streaming. To use agent streaming, you need permissions to perform the bedrock:InvokeModelWithResponseStream action. public let streamingConfigurations: StreamingConfigurations? @inlinable - public init(agentAliasId: String, agentId: String, enableTrace: Bool? = nil, endSession: Bool? = nil, inputText: String? = nil, memoryId: String? = nil, sessionId: String, sessionState: SessionState? = nil, sourceArn: String? = nil, streamingConfigurations: StreamingConfigurations? = nil) { + public init(agentAliasId: String, agentId: String, bedrockModelConfigurations: BedrockModelConfigurations? = nil, enableTrace: Bool? = nil, endSession: Bool? = nil, inputText: String? = nil, memoryId: String? = nil, sessionId: String, sessionState: SessionState? = nil, sourceArn: String? = nil, streamingConfigurations: StreamingConfigurations? = nil) { self.agentAliasId = agentAliasId self.agentId = agentId + self.bedrockModelConfigurations = bedrockModelConfigurations self.enableTrace = enableTrace self.endSession = endSession self.inputText = inputText @@ -3344,6 +3436,7 @@ extension BedrockAgentRuntime { var container = encoder.container(keyedBy: CodingKeys.self) request.encodePath(self.agentAliasId, key: "agentAliasId") request.encodePath(self.agentId, key: "agentId") + try container.encodeIfPresent(self.bedrockModelConfigurations, forKey: .bedrockModelConfigurations) try container.encodeIfPresent(self.enableTrace, forKey: .enableTrace) try container.encodeIfPresent(self.endSession, forKey: .endSession) try container.encodeIfPresent(self.inputText, forKey: .inputText) @@ -3372,6 +3465,7 @@ extension BedrockAgentRuntime { } private enum CodingKeys: String, CodingKey { + case bedrockModelConfigurations = "bedrockModelConfigurations" case enableTrace = "enableTrace" case endSession = "endSession" case inputText = "inputText" @@ -3415,31 +3509,42 @@ extension BedrockAgentRuntime { public struct InvokeFlowRequest: AWSEncodableShape { /// Specifies whether to return the trace for the flow or not. Traces track inputs and outputs for nodes in the flow. For more information, see Track each step in your prompt flow by viewing its trace in Amazon Bedrock. public let enableTrace: Bool? + /// The unique identifier for the current flow execution. If you don't provide a value, Amazon Bedrock creates the identifier for you. + public let executionId: String? /// The unique identifier of the flow alias. public let flowAliasIdentifier: String /// The unique identifier of the flow. public let flowIdentifier: String /// A list of objects, each containing information about an input into the flow. public let inputs: [FlowInput] + /// Model performance settings for the request. + public let modelPerformanceConfiguration: ModelPerformanceConfiguration? @inlinable - public init(enableTrace: Bool? = nil, flowAliasIdentifier: String, flowIdentifier: String, inputs: [FlowInput]) { + public init(enableTrace: Bool? = nil, executionId: String? = nil, flowAliasIdentifier: String, flowIdentifier: String, inputs: [FlowInput], modelPerformanceConfiguration: ModelPerformanceConfiguration? = nil) { self.enableTrace = enableTrace + self.executionId = executionId self.flowAliasIdentifier = flowAliasIdentifier self.flowIdentifier = flowIdentifier self.inputs = inputs + self.modelPerformanceConfiguration = modelPerformanceConfiguration } public func encode(to encoder: Encoder) throws { let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer var container = encoder.container(keyedBy: CodingKeys.self) try container.encodeIfPresent(self.enableTrace, forKey: .enableTrace) + try container.encodeIfPresent(self.executionId, forKey: .executionId) request.encodePath(self.flowAliasIdentifier, key: "flowAliasIdentifier") request.encodePath(self.flowIdentifier, key: "flowIdentifier") try container.encode(self.inputs, forKey: .inputs) + try container.encodeIfPresent(self.modelPerformanceConfiguration, forKey: .modelPerformanceConfiguration) } public func validate(name: String) throws { + try self.validate(self.executionId, name: "executionId", parent: name, max: 100) + try self.validate(self.executionId, name: "executionId", parent: name, min: 2) + try self.validate(self.executionId, name: "executionId", parent: name, pattern: "^[0-9a-zA-Z._:-]+$") try self.validate(self.flowAliasIdentifier, name: "flowAliasIdentifier", parent: name, max: 2048) try self.validate(self.flowAliasIdentifier, name: "flowAliasIdentifier", parent: name, pattern: "^(arn:aws:bedrock:[a-z0-9-]{1,20}:[0-9]{12}:flow/[0-9a-zA-Z]{10}/alias/[0-9a-zA-Z]{10})|(\\bTSTALIASID\\b|[0-9a-zA-Z]+)$") try self.validate(self.flowIdentifier, name: "flowIdentifier", parent: name, max: 2048) @@ -3453,22 +3558,29 @@ extension BedrockAgentRuntime { private enum CodingKeys: String, CodingKey { case enableTrace = "enableTrace" + case executionId = "executionId" case inputs = "inputs" + case modelPerformanceConfiguration = "modelPerformanceConfiguration" } } public struct InvokeFlowResponse: AWSDecodableShape { public static let _options: AWSShapeOptions = [.rawPayload] + /// The unique identifier for the current flow execution. + public let executionId: String? /// The output of the flow, returned as a stream. If there's an error, the error is returned. public let responseStream: AWSEventStream @inlinable - public init(responseStream: AWSEventStream) { + public init(executionId: String? = nil, responseStream: AWSEventStream) { + self.executionId = executionId self.responseStream = responseStream } public init(from decoder: Decoder) throws { + let response = decoder.userInfo[.awsResponse]! as! ResponseDecodingContainer let container = try decoder.singleValueContainer() + self.executionId = try response.decodeHeaderIfPresent(String.self, key: "x-amz-bedrock-flow-execution-id") self.responseStream = try container.decode(AWSEventStream.self) } @@ -3478,6 +3590,8 @@ extension BedrockAgentRuntime { public struct InvokeInlineAgentRequest: AWSEncodableShape { /// A list of action groups with each action group defining the action the inline agent needs to carry out. public let actionGroups: [AgentActionGroup]? + /// Model settings for the request. + public let bedrockModelConfigurations: InlineBedrockModelConfigurations? /// The Amazon Resource Name (ARN) of the Amazon Web Services KMS key to use to encrypt your inline agent. public let customerEncryptionKeyArn: String? /// Specifies whether to turn on the trace or not to track the agent's reasoning process. For more information, see Using trace. @@ -3502,10 +3616,13 @@ extension BedrockAgentRuntime { public let promptOverrideConfiguration: PromptOverrideConfiguration? /// The unique identifier of the session. Use the same value across requests to continue the same conversation. public let sessionId: String + /// Specifies the configurations for streaming. To use agent streaming, you need permissions to perform the bedrock:InvokeModelWithResponseStream action. + public let streamingConfigurations: StreamingConfigurations? @inlinable - public init(actionGroups: [AgentActionGroup]? = nil, customerEncryptionKeyArn: String? = nil, enableTrace: Bool? = nil, endSession: Bool? = nil, foundationModel: String, guardrailConfiguration: GuardrailConfigurationWithArn? = nil, idleSessionTTLInSeconds: Int? = nil, inlineSessionState: InlineSessionState? = nil, inputText: String? = nil, instruction: String, knowledgeBases: [KnowledgeBase]? = nil, promptOverrideConfiguration: PromptOverrideConfiguration? = nil, sessionId: String) { + public init(actionGroups: [AgentActionGroup]? = nil, bedrockModelConfigurations: InlineBedrockModelConfigurations? = nil, customerEncryptionKeyArn: String? = nil, enableTrace: Bool? = nil, endSession: Bool? = nil, foundationModel: String, guardrailConfiguration: GuardrailConfigurationWithArn? = nil, idleSessionTTLInSeconds: Int? = nil, inlineSessionState: InlineSessionState? = nil, inputText: String? = nil, instruction: String, knowledgeBases: [KnowledgeBase]? = nil, promptOverrideConfiguration: PromptOverrideConfiguration? = nil, sessionId: String, streamingConfigurations: StreamingConfigurations? = nil) { self.actionGroups = actionGroups + self.bedrockModelConfigurations = bedrockModelConfigurations self.customerEncryptionKeyArn = customerEncryptionKeyArn self.enableTrace = enableTrace self.endSession = endSession @@ -3518,12 +3635,14 @@ extension BedrockAgentRuntime { self.knowledgeBases = knowledgeBases self.promptOverrideConfiguration = promptOverrideConfiguration self.sessionId = sessionId + self.streamingConfigurations = streamingConfigurations } public func encode(to encoder: Encoder) throws { let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer var container = encoder.container(keyedBy: CodingKeys.self) try container.encodeIfPresent(self.actionGroups, forKey: .actionGroups) + try container.encodeIfPresent(self.bedrockModelConfigurations, forKey: .bedrockModelConfigurations) try container.encodeIfPresent(self.customerEncryptionKeyArn, forKey: .customerEncryptionKeyArn) try container.encodeIfPresent(self.enableTrace, forKey: .enableTrace) try container.encodeIfPresent(self.endSession, forKey: .endSession) @@ -3536,6 +3655,7 @@ extension BedrockAgentRuntime { try container.encodeIfPresent(self.knowledgeBases, forKey: .knowledgeBases) try container.encodeIfPresent(self.promptOverrideConfiguration, forKey: .promptOverrideConfiguration) request.encodePath(self.sessionId, key: "sessionId") + try container.encodeIfPresent(self.streamingConfigurations, forKey: .streamingConfigurations) } public func validate(name: String) throws { @@ -3565,6 +3685,7 @@ extension BedrockAgentRuntime { private enum CodingKeys: String, CodingKey { case actionGroups = "actionGroups" + case bedrockModelConfigurations = "bedrockModelConfigurations" case customerEncryptionKeyArn = "customerEncryptionKeyArn" case enableTrace = "enableTrace" case endSession = "endSession" @@ -3576,6 +3697,7 @@ extension BedrockAgentRuntime { case instruction = "instruction" case knowledgeBases = "knowledgeBases" case promptOverrideConfiguration = "promptOverrideConfiguration" + case streamingConfigurations = "streamingConfigurations" } } @@ -3977,6 +4099,33 @@ extension BedrockAgentRuntime { } } + public struct ModelNotReadyException: AWSDecodableShape { + public let message: String? + + @inlinable + public init(message: String? = nil) { + self.message = message + } + + private enum CodingKeys: String, CodingKey { + case message = "message" + } + } + + public struct ModelPerformanceConfiguration: AWSEncodableShape { + /// The latency configuration for the model. + public let performanceConfig: PerformanceConfiguration? + + @inlinable + public init(performanceConfig: PerformanceConfiguration? = nil) { + self.performanceConfig = performanceConfig + } + + private enum CodingKeys: String, CodingKey { + case performanceConfig = "performanceConfig" + } + } + public struct Observation: AWSDecodableShape { /// Contains the JSON-formatted string returned by the API invoked by the action group. public let actionGroupInvocationOutput: ActionGroupInvocationOutput? @@ -4074,15 +4223,18 @@ extension BedrockAgentRuntime { public let additionalModelRequestFields: [String: String]? /// Configuration settings for inference when using RetrieveAndGenerate to generate responses while using a knowledge base as a source. public let inferenceConfig: InferenceConfig? + /// The latency configuration for the model. + public let performanceConfig: PerformanceConfiguration? /// Contains the template for the prompt that's sent to the model. Orchestration prompts must include the $conversation_history$ and $output_format_instructions$ variables. For more information, see Use placeholder variables in the user guide. public let promptTemplate: PromptTemplate? /// To split up the prompt and retrieve multiple sources, set the transformation type to QUERY_DECOMPOSITION. public let queryTransformationConfiguration: QueryTransformationConfiguration? @inlinable - public init(additionalModelRequestFields: [String: String]? = nil, inferenceConfig: InferenceConfig? = nil, promptTemplate: PromptTemplate? = nil, queryTransformationConfiguration: QueryTransformationConfiguration? = nil) { + public init(additionalModelRequestFields: [String: String]? = nil, inferenceConfig: InferenceConfig? = nil, performanceConfig: PerformanceConfiguration? = nil, promptTemplate: PromptTemplate? = nil, queryTransformationConfiguration: QueryTransformationConfiguration? = nil) { self.additionalModelRequestFields = additionalModelRequestFields self.inferenceConfig = inferenceConfig + self.performanceConfig = performanceConfig self.promptTemplate = promptTemplate self.queryTransformationConfiguration = queryTransformationConfiguration } @@ -4099,6 +4251,7 @@ extension BedrockAgentRuntime { private enum CodingKeys: String, CodingKey { case additionalModelRequestFields = "additionalModelRequestFields" case inferenceConfig = "inferenceConfig" + case performanceConfig = "performanceConfig" case promptTemplate = "promptTemplate" case queryTransformationConfiguration = "queryTransformationConfiguration" } @@ -4215,6 +4368,20 @@ extension BedrockAgentRuntime { } } + public struct PerformanceConfiguration: AWSEncodableShape { + /// To use a latency-optimized version of the model, set to optimized. + public let latency: PerformanceConfigLatency? + + @inlinable + public init(latency: PerformanceConfigLatency? = nil) { + self.latency = latency + } + + private enum CodingKeys: String, CodingKey { + case latency = "latency" + } + } + public struct PostProcessingModelInvocationOutput: AWSDecodableShape { /// Contains information about the foundation model output from the post-processing step. public let metadata: Metadata? @@ -5739,6 +5906,20 @@ extension BedrockAgentRuntime { } } + public struct FlowMultiTurnInputContent: AWSDecodableShape { + /// The requested additional input to send back to the multi-turn flow node. + public let document: String? + + @inlinable + public init(document: String? = nil) { + self.document = document + } + + private enum CodingKeys: String, CodingKey { + case document = "document" + } + } + public struct FlowOutputContent: AWSDecodableShape { /// The content in the output. public let document: String? @@ -5854,6 +6035,7 @@ public struct BedrockAgentRuntimeErrorType: AWSErrorType { case conflictException = "ConflictException" case dependencyFailedException = "DependencyFailedException" case internalServerException = "InternalServerException" + case modelNotReadyException = "ModelNotReadyException" case resourceNotFoundException = "ResourceNotFoundException" case serviceQuotaExceededException = "ServiceQuotaExceededException" case throttlingException = "ThrottlingException" @@ -5888,6 +6070,8 @@ public struct BedrockAgentRuntimeErrorType: AWSErrorType { public static var dependencyFailedException: Self { .init(.dependencyFailedException) } /// An internal server error occurred. Retry your request. public static var internalServerException: Self { .init(.internalServerException) } + /// The model specified in the request is not ready to serve inference requests. The AWS SDK will automatically retry the operation up to 5 times. For information about configuring automatic retries, see Retry behavior in the AWS SDKs and Tools reference guide. + public static var modelNotReadyException: Self { .init(.modelNotReadyException) } /// The specified resource Amazon Resource Name (ARN) was not found. Check the Amazon Resource Name (ARN) and try your request again. public static var resourceNotFoundException: Self { .init(.resourceNotFoundException) } /// The number of requests exceeds the service quota. Resubmit your request later. diff --git a/Sources/Soto/Services/BedrockDataAutomation/BedrockDataAutomation_api.swift b/Sources/Soto/Services/BedrockDataAutomation/BedrockDataAutomation_api.swift index 535abd9495..fa42f1dce2 100644 --- a/Sources/Soto/Services/BedrockDataAutomation/BedrockDataAutomation_api.swift +++ b/Sources/Soto/Services/BedrockDataAutomation/BedrockDataAutomation_api.swift @@ -25,7 +25,7 @@ import Foundation /// Service object for interacting with AWS BedrockDataAutomation service. /// -/// Amazon Bedrock Keystone Build +/// Amazon Bedrock Data Automation BuildTime public struct BedrockDataAutomation: AWSService { // MARK: Member variables @@ -80,7 +80,7 @@ public struct BedrockDataAutomation: AWSService { // MARK: API Calls - /// Creates an Amazon Bedrock Keystone Blueprint + /// Creates an Amazon Bedrock Data Automation Blueprint @Sendable @inlinable public func createBlueprint(_ input: CreateBlueprintRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateBlueprintResponse { @@ -93,7 +93,7 @@ public struct BedrockDataAutomation: AWSService { logger: logger ) } - /// Creates an Amazon Bedrock Keystone Blueprint + /// Creates an Amazon Bedrock Data Automation Blueprint /// /// Parameters: /// - blueprintName: @@ -124,7 +124,7 @@ public struct BedrockDataAutomation: AWSService { return try await self.createBlueprint(input, logger: logger) } - /// Creates a new version of an existing Amazon Bedrock Keystone Blueprint + /// Creates a new version of an existing Amazon Bedrock Data Automation Blueprint @Sendable @inlinable public func createBlueprintVersion(_ input: CreateBlueprintVersionRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateBlueprintVersionResponse { @@ -137,7 +137,7 @@ public struct BedrockDataAutomation: AWSService { logger: logger ) } - /// Creates a new version of an existing Amazon Bedrock Keystone Blueprint + /// Creates a new version of an existing Amazon Bedrock Data Automation Blueprint /// /// Parameters: /// - blueprintArn: ARN generated at the server side when a Blueprint is created @@ -156,7 +156,7 @@ public struct BedrockDataAutomation: AWSService { return try await self.createBlueprintVersion(input, logger: logger) } - /// Creates an Amazon Bedrock Keystone DataAutomationProject + /// Creates an Amazon Bedrock Data Automation Project @Sendable @inlinable public func createDataAutomationProject(_ input: CreateDataAutomationProjectRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateDataAutomationProjectResponse { @@ -169,7 +169,7 @@ public struct BedrockDataAutomation: AWSService { logger: logger ) } - /// Creates an Amazon Bedrock Keystone DataAutomationProject + /// Creates an Amazon Bedrock Data Automation Project /// /// Parameters: /// - clientToken: @@ -206,7 +206,7 @@ public struct BedrockDataAutomation: AWSService { return try await self.createDataAutomationProject(input, logger: logger) } - /// Deletes an existing Amazon Bedrock Keystone Blueprint + /// Deletes an existing Amazon Bedrock Data Automation Blueprint @Sendable @inlinable public func deleteBlueprint(_ input: DeleteBlueprintRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DeleteBlueprintResponse { @@ -219,7 +219,7 @@ public struct BedrockDataAutomation: AWSService { logger: logger ) } - /// Deletes an existing Amazon Bedrock Keystone Blueprint + /// Deletes an existing Amazon Bedrock Data Automation Blueprint /// /// Parameters: /// - blueprintArn: ARN generated at the server side when a Blueprint is created @@ -238,7 +238,7 @@ public struct BedrockDataAutomation: AWSService { return try await self.deleteBlueprint(input, logger: logger) } - /// Deletes an existing Amazon Bedrock Keystone DataAutomationProject + /// Deletes an existing Amazon Bedrock Data Automation Project @Sendable @inlinable public func deleteDataAutomationProject(_ input: DeleteDataAutomationProjectRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DeleteDataAutomationProjectResponse { @@ -251,7 +251,7 @@ public struct BedrockDataAutomation: AWSService { logger: logger ) } - /// Deletes an existing Amazon Bedrock Keystone DataAutomationProject + /// Deletes an existing Amazon Bedrock Data Automation Project /// /// Parameters: /// - projectArn: ARN generated at the server side when a DataAutomationProject is created @@ -267,7 +267,7 @@ public struct BedrockDataAutomation: AWSService { return try await self.deleteDataAutomationProject(input, logger: logger) } - /// Gets an existing Amazon Bedrock Keystone Blueprint + /// Gets an existing Amazon Bedrock Data Automation Blueprint @Sendable @inlinable public func getBlueprint(_ input: GetBlueprintRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetBlueprintResponse { @@ -280,7 +280,7 @@ public struct BedrockDataAutomation: AWSService { logger: logger ) } - /// Gets an existing Amazon Bedrock Keystone Blueprint + /// Gets an existing Amazon Bedrock Data Automation Blueprint /// /// Parameters: /// - blueprintArn: ARN generated at the server side when a Blueprint is created @@ -302,7 +302,7 @@ public struct BedrockDataAutomation: AWSService { return try await self.getBlueprint(input, logger: logger) } - /// Gets an existing Amazon Bedrock Keystone DataAutomationProject + /// Gets an existing Amazon Bedrock Data Automation Project @Sendable @inlinable public func getDataAutomationProject(_ input: GetDataAutomationProjectRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetDataAutomationProjectResponse { @@ -315,7 +315,7 @@ public struct BedrockDataAutomation: AWSService { logger: logger ) } - /// Gets an existing Amazon Bedrock Keystone DataAutomationProject + /// Gets an existing Amazon Bedrock Data Automation Project /// /// Parameters: /// - projectArn: ARN generated at the server side when a DataAutomationProject is created @@ -334,7 +334,7 @@ public struct BedrockDataAutomation: AWSService { return try await self.getDataAutomationProject(input, logger: logger) } - /// Lists all existing Amazon Bedrock Keystone Blueprints + /// Lists all existing Amazon Bedrock Data Automation Blueprints @Sendable @inlinable public func listBlueprints(_ input: ListBlueprintsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListBlueprintsResponse { @@ -347,7 +347,7 @@ public struct BedrockDataAutomation: AWSService { logger: logger ) } - /// Lists all existing Amazon Bedrock Keystone Blueprints + /// Lists all existing Amazon Bedrock Data Automation Blueprints /// /// Parameters: /// - blueprintArn: @@ -378,7 +378,7 @@ public struct BedrockDataAutomation: AWSService { return try await self.listBlueprints(input, logger: logger) } - /// Lists all existing Amazon Bedrock Keystone DataAutomationProjects + /// Lists all existing Amazon Bedrock Data Automation Projects @Sendable @inlinable public func listDataAutomationProjects(_ input: ListDataAutomationProjectsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListDataAutomationProjectsResponse { @@ -391,7 +391,7 @@ public struct BedrockDataAutomation: AWSService { logger: logger ) } - /// Lists all existing Amazon Bedrock Keystone DataAutomationProjects + /// Lists all existing Amazon Bedrock Data Automation Projects /// /// Parameters: /// - blueprintFilter: @@ -419,7 +419,7 @@ public struct BedrockDataAutomation: AWSService { return try await self.listDataAutomationProjects(input, logger: logger) } - /// Updates an existing Amazon Bedrock Blueprint + /// Updates an existing Amazon Bedrock Data Automation Blueprint @Sendable @inlinable public func updateBlueprint(_ input: UpdateBlueprintRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateBlueprintResponse { @@ -432,7 +432,7 @@ public struct BedrockDataAutomation: AWSService { logger: logger ) } - /// Updates an existing Amazon Bedrock Blueprint + /// Updates an existing Amazon Bedrock Data Automation Blueprint /// /// Parameters: /// - blueprintArn: ARN generated at the server side when a Blueprint is created @@ -454,7 +454,7 @@ public struct BedrockDataAutomation: AWSService { return try await self.updateBlueprint(input, logger: logger) } - /// Updates an existing Amazon Bedrock DataAutomationProject + /// Updates an existing Amazon Bedrock Data Automation Project @Sendable @inlinable public func updateDataAutomationProject(_ input: UpdateDataAutomationProjectRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateDataAutomationProjectResponse { @@ -467,7 +467,7 @@ public struct BedrockDataAutomation: AWSService { logger: logger ) } - /// Updates an existing Amazon Bedrock DataAutomationProject + /// Updates an existing Amazon Bedrock Data Automation Project /// /// Parameters: /// - customOutputConfiguration: diff --git a/Sources/Soto/Services/BedrockDataAutomationRuntime/BedrockDataAutomationRuntime_api.swift b/Sources/Soto/Services/BedrockDataAutomationRuntime/BedrockDataAutomationRuntime_api.swift index e1be16cbd4..e9c64b3d3f 100644 --- a/Sources/Soto/Services/BedrockDataAutomationRuntime/BedrockDataAutomationRuntime_api.swift +++ b/Sources/Soto/Services/BedrockDataAutomationRuntime/BedrockDataAutomationRuntime_api.swift @@ -25,7 +25,7 @@ import Foundation /// Service object for interacting with AWS BedrockDataAutomationRuntime service. /// -/// Amazon Bedrock Keystone Runtime +/// Amazon Bedrock Data Automation Runtime public struct BedrockDataAutomationRuntime: AWSService { // MARK: Member variables diff --git a/Sources/Soto/Services/BedrockRuntime/BedrockRuntime_shapes.swift b/Sources/Soto/Services/BedrockRuntime/BedrockRuntime_shapes.swift index 504cd55d65..d6bfcd869e 100644 --- a/Sources/Soto/Services/BedrockRuntime/BedrockRuntime_shapes.swift +++ b/Sources/Soto/Services/BedrockRuntime/BedrockRuntime_shapes.swift @@ -2450,7 +2450,7 @@ extension BedrockRuntime { public func validate(name: String) throws { try self.validate(self.name, name: "name", parent: name, max: 64) try self.validate(self.name, name: "name", parent: name, min: 1) - try self.validate(self.name, name: "name", parent: name, pattern: "^[a-zA-Z][a-zA-Z0-9_]*$") + try self.validate(self.name, name: "name", parent: name, pattern: "^[a-zA-Z0-9_-]+$") } private enum CodingKeys: String, CodingKey { @@ -2652,7 +2652,7 @@ extension BedrockRuntime { try self.validate(self.description, name: "description", parent: name, min: 1) try self.validate(self.name, name: "name", parent: name, max: 64) try self.validate(self.name, name: "name", parent: name, min: 1) - try self.validate(self.name, name: "name", parent: name, pattern: "^[a-zA-Z][a-zA-Z0-9_]*$") + try self.validate(self.name, name: "name", parent: name, pattern: "^[a-zA-Z0-9_-]+$") } private enum CodingKeys: String, CodingKey { @@ -2680,7 +2680,7 @@ extension BedrockRuntime { public func validate(name: String) throws { try self.validate(self.name, name: "name", parent: name, max: 64) try self.validate(self.name, name: "name", parent: name, min: 1) - try self.validate(self.name, name: "name", parent: name, pattern: "^[a-zA-Z][a-zA-Z0-9_]*$") + try self.validate(self.name, name: "name", parent: name, pattern: "^[a-zA-Z0-9_-]+$") try self.validate(self.toolUseId, name: "toolUseId", parent: name, max: 64) try self.validate(self.toolUseId, name: "toolUseId", parent: name, min: 1) try self.validate(self.toolUseId, name: "toolUseId", parent: name, pattern: "^[a-zA-Z0-9_-]+$") diff --git a/Sources/Soto/Services/Billing/Billing_api.swift b/Sources/Soto/Services/Billing/Billing_api.swift index e4cacaf9fc..25000817d9 100644 --- a/Sources/Soto/Services/Billing/Billing_api.swift +++ b/Sources/Soto/Services/Billing/Billing_api.swift @@ -80,6 +80,137 @@ public struct Billing: AWSService { // MARK: API Calls + /// Creates a billing view with the specified billing view attributes. + @Sendable + @inlinable + public func createBillingView(_ input: CreateBillingViewRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateBillingViewResponse { + try await self.client.execute( + operation: "CreateBillingView", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Creates a billing view with the specified billing view attributes. + /// + /// Parameters: + /// - clientToken: A unique, case-sensitive identifier you specify to ensure idempotency of the request. Idempotency ensures that an API request completes no more than one time. If the original request completes successfully, any subsequent retries complete successfully without performing any further actions with an idempotent request. + /// - dataFilterExpression: See Expression. Billing view only supports LINKED_ACCOUNT and Tags. + /// - description: The description of the billing view. + /// - name: The name of the billing view. + /// - resourceTags: A list of key value map specifying tags associated to the billing view being created. + /// - sourceViews: A list of billing views used as the data source for the custom billing view. + /// - logger: Logger use during operation + @inlinable + public func createBillingView( + clientToken: String? = CreateBillingViewRequest.idempotencyToken(), + dataFilterExpression: Expression? = nil, + description: String? = nil, + name: String, + resourceTags: [ResourceTag]? = nil, + sourceViews: [String], + logger: Logger = AWSClient.loggingDisabled + ) async throws -> CreateBillingViewResponse { + let input = CreateBillingViewRequest( + clientToken: clientToken, + dataFilterExpression: dataFilterExpression, + description: description, + name: name, + resourceTags: resourceTags, + sourceViews: sourceViews + ) + return try await self.createBillingView(input, logger: logger) + } + + /// Deletes the specified billing view. + @Sendable + @inlinable + public func deleteBillingView(_ input: DeleteBillingViewRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DeleteBillingViewResponse { + try await self.client.execute( + operation: "DeleteBillingView", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Deletes the specified billing view. + /// + /// Parameters: + /// - arn: The Amazon Resource Name (ARN) that can be used to uniquely identify the billing view. + /// - logger: Logger use during operation + @inlinable + public func deleteBillingView( + arn: String, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> DeleteBillingViewResponse { + let input = DeleteBillingViewRequest( + arn: arn + ) + return try await self.deleteBillingView(input, logger: logger) + } + + /// Returns the metadata associated to the specified billing view ARN. + @Sendable + @inlinable + public func getBillingView(_ input: GetBillingViewRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetBillingViewResponse { + try await self.client.execute( + operation: "GetBillingView", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Returns the metadata associated to the specified billing view ARN. + /// + /// Parameters: + /// - arn: The Amazon Resource Name (ARN) that can be used to uniquely identify the billing view. + /// - logger: Logger use during operation + @inlinable + public func getBillingView( + arn: String, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> GetBillingViewResponse { + let input = GetBillingViewRequest( + arn: arn + ) + return try await self.getBillingView(input, logger: logger) + } + + /// Returns the resource-based policy document attached to the resource in JSON format. + @Sendable + @inlinable + public func getResourcePolicy(_ input: GetResourcePolicyRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetResourcePolicyResponse { + try await self.client.execute( + operation: "GetResourcePolicy", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Returns the resource-based policy document attached to the resource in JSON format. + /// + /// Parameters: + /// - resourceArn: The Amazon Resource Name (ARN) of the billing view resource to which the policy is attached to. + /// - logger: Logger use during operation + @inlinable + public func getResourcePolicy( + resourceArn: String, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> GetResourcePolicyResponse { + let input = GetResourcePolicyRequest( + resourceArn: resourceArn + ) + return try await self.getResourcePolicy(input, logger: logger) + } + /// Lists the billing views available for a given time period. Every Amazon Web Services account has a unique PRIMARY billing view that represents the billing data available by default. Accounts that use Billing Conductor also have BILLING_GROUP billing views representing pro forma costs associated with each created billing group. @Sendable @inlinable @@ -97,23 +228,198 @@ public struct Billing: AWSService { /// /// Parameters: /// - activeTimeRange: The time range for the billing views listed. PRIMARY billing view is always listed. BILLING_GROUP billing views are listed for time ranges when the associated billing group resource in Billing Conductor is active. The time range must be within one calendar month. + /// - arns: The Amazon Resource Name (ARN) that can be used to uniquely identify the billing view. + /// - billingViewTypes: The type of billing view. /// - maxResults: The maximum number of billing views to retrieve. Default is 100. /// - nextToken: The pagination token that is used on subsequent calls to list billing views. + /// - ownerAccountId: The list of owners of the billing view. /// - logger: Logger use during operation @inlinable public func listBillingViews( - activeTimeRange: ActiveTimeRange, + activeTimeRange: ActiveTimeRange? = nil, + arns: [String]? = nil, + billingViewTypes: [BillingViewType]? = nil, maxResults: Int? = nil, nextToken: String? = nil, + ownerAccountId: String? = nil, logger: Logger = AWSClient.loggingDisabled ) async throws -> ListBillingViewsResponse { let input = ListBillingViewsRequest( activeTimeRange: activeTimeRange, + arns: arns, + billingViewTypes: billingViewTypes, maxResults: maxResults, - nextToken: nextToken + nextToken: nextToken, + ownerAccountId: ownerAccountId ) return try await self.listBillingViews(input, logger: logger) } + + /// Lists the source views (managed Amazon Web Services billing views) associated with the billing view. + @Sendable + @inlinable + public func listSourceViewsForBillingView(_ input: ListSourceViewsForBillingViewRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListSourceViewsForBillingViewResponse { + try await self.client.execute( + operation: "ListSourceViewsForBillingView", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Lists the source views (managed Amazon Web Services billing views) associated with the billing view. + /// + /// Parameters: + /// - arn: The Amazon Resource Name (ARN) that can be used to uniquely identify the billing view. + /// - maxResults: The number of entries a paginated response contains. + /// - nextToken: The pagination token that is used on subsequent calls to list billing views. + /// - logger: Logger use during operation + @inlinable + public func listSourceViewsForBillingView( + arn: String, + maxResults: Int? = nil, + nextToken: String? = nil, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> ListSourceViewsForBillingViewResponse { + let input = ListSourceViewsForBillingViewRequest( + arn: arn, + maxResults: maxResults, + nextToken: nextToken + ) + return try await self.listSourceViewsForBillingView(input, logger: logger) + } + + /// Lists tags associated with the billing view resource. + @Sendable + @inlinable + public func listTagsForResource(_ input: ListTagsForResourceRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListTagsForResourceResponse { + try await self.client.execute( + operation: "ListTagsForResource", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Lists tags associated with the billing view resource. + /// + /// Parameters: + /// - resourceArn: The Amazon Resource Name (ARN) of the resource. + /// - logger: Logger use during operation + @inlinable + public func listTagsForResource( + resourceArn: String, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> ListTagsForResourceResponse { + let input = ListTagsForResourceRequest( + resourceArn: resourceArn + ) + return try await self.listTagsForResource(input, logger: logger) + } + + /// An API operation for adding one or more tags (key-value pairs) to a resource. + @Sendable + @inlinable + public func tagResource(_ input: TagResourceRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> TagResourceResponse { + try await self.client.execute( + operation: "TagResource", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// An API operation for adding one or more tags (key-value pairs) to a resource. + /// + /// Parameters: + /// - resourceArn: The Amazon Resource Name (ARN) of the resource. + /// - resourceTags: A list of tag key value pairs that are associated with the resource. + /// - logger: Logger use during operation + @inlinable + public func tagResource( + resourceArn: String, + resourceTags: [ResourceTag], + logger: Logger = AWSClient.loggingDisabled + ) async throws -> TagResourceResponse { + let input = TagResourceRequest( + resourceArn: resourceArn, + resourceTags: resourceTags + ) + return try await self.tagResource(input, logger: logger) + } + + /// Removes one or more tags from a resource. Specify only tag keys in your request. Don't specify the value. + @Sendable + @inlinable + public func untagResource(_ input: UntagResourceRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UntagResourceResponse { + try await self.client.execute( + operation: "UntagResource", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Removes one or more tags from a resource. Specify only tag keys in your request. Don't specify the value. + /// + /// Parameters: + /// - resourceArn: The Amazon Resource Name (ARN) of the resource. + /// - resourceTagKeys: A list of tag key value pairs that are associated with the resource. + /// - logger: Logger use during operation + @inlinable + public func untagResource( + resourceArn: String, + resourceTagKeys: [String], + logger: Logger = AWSClient.loggingDisabled + ) async throws -> UntagResourceResponse { + let input = UntagResourceRequest( + resourceArn: resourceArn, + resourceTagKeys: resourceTagKeys + ) + return try await self.untagResource(input, logger: logger) + } + + /// An API to update the attributes of the billing view. + @Sendable + @inlinable + public func updateBillingView(_ input: UpdateBillingViewRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateBillingViewResponse { + try await self.client.execute( + operation: "UpdateBillingView", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// An API to update the attributes of the billing view. + /// + /// Parameters: + /// - arn: The Amazon Resource Name (ARN) that can be used to uniquely identify the billing view. + /// - dataFilterExpression: See Expression. Billing view only supports LINKED_ACCOUNT and Tags. + /// - description: The description of the billing view. + /// - name: The name of the billing view. + /// - logger: Logger use during operation + @inlinable + public func updateBillingView( + arn: String, + dataFilterExpression: Expression? = nil, + description: String? = nil, + name: String? = nil, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> UpdateBillingViewResponse { + let input = UpdateBillingViewRequest( + arn: arn, + dataFilterExpression: dataFilterExpression, + description: description, + name: name + ) + return try await self.updateBillingView(input, logger: logger) + } } extension Billing { @@ -151,20 +457,66 @@ extension Billing { /// /// - Parameters: /// - activeTimeRange: The time range for the billing views listed. PRIMARY billing view is always listed. BILLING_GROUP billing views are listed for time ranges when the associated billing group resource in Billing Conductor is active. The time range must be within one calendar month. + /// - arns: The Amazon Resource Name (ARN) that can be used to uniquely identify the billing view. + /// - billingViewTypes: The type of billing view. /// - maxResults: The maximum number of billing views to retrieve. Default is 100. + /// - ownerAccountId: The list of owners of the billing view. /// - logger: Logger used for logging @inlinable public func listBillingViewsPaginator( - activeTimeRange: ActiveTimeRange, + activeTimeRange: ActiveTimeRange? = nil, + arns: [String]? = nil, + billingViewTypes: [BillingViewType]? = nil, maxResults: Int? = nil, + ownerAccountId: String? = nil, logger: Logger = AWSClient.loggingDisabled ) -> AWSClient.PaginatorSequence { let input = ListBillingViewsRequest( activeTimeRange: activeTimeRange, - maxResults: maxResults + arns: arns, + billingViewTypes: billingViewTypes, + maxResults: maxResults, + ownerAccountId: ownerAccountId ) return self.listBillingViewsPaginator(input, logger: logger) } + + /// Return PaginatorSequence for operation ``listSourceViewsForBillingView(_:logger:)``. + /// + /// - Parameters: + /// - input: Input for operation + /// - logger: Logger used for logging + @inlinable + public func listSourceViewsForBillingViewPaginator( + _ input: ListSourceViewsForBillingViewRequest, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + return .init( + input: input, + command: self.listSourceViewsForBillingView, + inputKey: \ListSourceViewsForBillingViewRequest.nextToken, + outputKey: \ListSourceViewsForBillingViewResponse.nextToken, + logger: logger + ) + } + /// Return PaginatorSequence for operation ``listSourceViewsForBillingView(_:logger:)``. + /// + /// - Parameters: + /// - arn: The Amazon Resource Name (ARN) that can be used to uniquely identify the billing view. + /// - maxResults: The number of entries a paginated response contains. + /// - logger: Logger used for logging + @inlinable + public func listSourceViewsForBillingViewPaginator( + arn: String, + maxResults: Int? = nil, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + let input = ListSourceViewsForBillingViewRequest( + arn: arn, + maxResults: maxResults + ) + return self.listSourceViewsForBillingViewPaginator(input, logger: logger) + } } extension Billing.ListBillingViewsRequest: AWSPaginateToken { @@ -172,6 +524,20 @@ extension Billing.ListBillingViewsRequest: AWSPaginateToken { public func usingPaginationToken(_ token: String) -> Billing.ListBillingViewsRequest { return .init( activeTimeRange: self.activeTimeRange, + arns: self.arns, + billingViewTypes: self.billingViewTypes, + maxResults: self.maxResults, + nextToken: token, + ownerAccountId: self.ownerAccountId + ) + } +} + +extension Billing.ListSourceViewsForBillingViewRequest: AWSPaginateToken { + @inlinable + public func usingPaginationToken(_ token: String) -> Billing.ListSourceViewsForBillingViewRequest { + return .init( + arn: self.arn, maxResults: self.maxResults, nextToken: token ) diff --git a/Sources/Soto/Services/Billing/Billing_shapes.swift b/Sources/Soto/Services/Billing/Billing_shapes.swift index eaaef4b292..017adfb0fb 100644 --- a/Sources/Soto/Services/Billing/Billing_shapes.swift +++ b/Sources/Soto/Services/Billing/Billing_shapes.swift @@ -28,10 +28,16 @@ extension Billing { public enum BillingViewType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case billingGroup = "BILLING_GROUP" + case custom = "CUSTOM" case primary = "PRIMARY" public var description: String { return self.rawValue } } + public enum Dimension: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case linkedAccount = "LINKED_ACCOUNT" + public var description: String { return self.rawValue } + } + // MARK: Shapes public struct ActiveTimeRange: AWSEncodableShape { @@ -52,20 +58,65 @@ extension Billing { } } + public struct BillingViewElement: AWSDecodableShape { + /// The Amazon Resource Name (ARN) that can be used to uniquely identify the billing view. + public let arn: String? + /// The type of billing group. + public let billingViewType: BillingViewType? + /// The time when the billing view was created. + public let createdAt: Date? + /// See Expression. Billing view only supports LINKED_ACCOUNT and Tags. + public let dataFilterExpression: Expression? + /// The description of the billing view. + public let description: String? + /// A list of names of the billing view. + public let name: String? + /// The list of owners of the billing view. + public let ownerAccountId: String? + /// The time when the billing view was last updated. + public let updatedAt: Date? + + @inlinable + public init(arn: String? = nil, billingViewType: BillingViewType? = nil, createdAt: Date? = nil, dataFilterExpression: Expression? = nil, description: String? = nil, name: String? = nil, ownerAccountId: String? = nil, updatedAt: Date? = nil) { + self.arn = arn + self.billingViewType = billingViewType + self.createdAt = createdAt + self.dataFilterExpression = dataFilterExpression + self.description = description + self.name = name + self.ownerAccountId = ownerAccountId + self.updatedAt = updatedAt + } + + private enum CodingKeys: String, CodingKey { + case arn = "arn" + case billingViewType = "billingViewType" + case createdAt = "createdAt" + case dataFilterExpression = "dataFilterExpression" + case description = "description" + case name = "name" + case ownerAccountId = "ownerAccountId" + case updatedAt = "updatedAt" + } + } + public struct BillingViewListElement: AWSDecodableShape { /// The Amazon Resource Name (ARN) that can be used to uniquely identify the billing view. public let arn: String? /// The type of billing view. public let billingViewType: BillingViewType? + /// The description of the billing view. + public let description: String? /// A list of names of the Billing view. public let name: String? /// The list of owners of the Billing view. public let ownerAccountId: String? @inlinable - public init(arn: String? = nil, billingViewType: BillingViewType? = nil, name: String? = nil, ownerAccountId: String? = nil) { + public init(arn: String? = nil, billingViewType: BillingViewType? = nil, description: String? = nil, name: String? = nil, ownerAccountId: String? = nil) { self.arn = arn self.billingViewType = billingViewType + self.description = description self.name = name self.ownerAccountId = ownerAccountId } @@ -73,37 +124,286 @@ extension Billing { private enum CodingKeys: String, CodingKey { case arn = "arn" case billingViewType = "billingViewType" + case description = "description" case name = "name" case ownerAccountId = "ownerAccountId" } } + public struct CreateBillingViewRequest: AWSEncodableShape { + /// A unique, case-sensitive identifier you specify to ensure idempotency of the request. Idempotency ensures that an API request completes no more than one time. If the original request completes successfully, any subsequent retries complete successfully without performing any further actions with an idempotent request. + public let clientToken: String? + /// See Expression. Billing view only supports LINKED_ACCOUNT and Tags. + public let dataFilterExpression: Expression? + /// The description of the billing view. + public let description: String? + /// The name of the billing view. + public let name: String + /// A list of key value map specifying tags associated to the billing view being created. + public let resourceTags: [ResourceTag]? + /// A list of billing views used as the data source for the custom billing view. + public let sourceViews: [String] + + @inlinable + public init(clientToken: String? = CreateBillingViewRequest.idempotencyToken(), dataFilterExpression: Expression? = nil, description: String? = nil, name: String, resourceTags: [ResourceTag]? = nil, sourceViews: [String]) { + self.clientToken = clientToken + self.dataFilterExpression = dataFilterExpression + self.description = description + self.name = name + self.resourceTags = resourceTags + self.sourceViews = sourceViews + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + var container = encoder.container(keyedBy: CodingKeys.self) + request.encodeHeader(self.clientToken, key: "X-Amzn-Client-Token") + try container.encodeIfPresent(self.dataFilterExpression, forKey: .dataFilterExpression) + try container.encodeIfPresent(self.description, forKey: .description) + try container.encode(self.name, forKey: .name) + try container.encodeIfPresent(self.resourceTags, forKey: .resourceTags) + try container.encode(self.sourceViews, forKey: .sourceViews) + } + + public func validate(name: String) throws { + try self.validate(self.clientToken, name: "clientToken", parent: name, pattern: "^[a-zA-Z0-9-]+$") + try self.dataFilterExpression?.validate(name: "\(name).dataFilterExpression") + try self.validate(self.description, name: "description", parent: name, max: 1024) + try self.validate(self.description, name: "description", parent: name, pattern: "^([ a-zA-Z0-9_\\+=\\.\\-@]+)?$") + try self.validate(self.name, name: "name", parent: name, max: 128) + try self.validate(self.name, name: "name", parent: name, min: 1) + try self.validate(self.name, name: "name", parent: name, pattern: "^[ a-zA-Z0-9_\\+=\\.\\-@]+$") + try self.resourceTags?.forEach { + try $0.validate(name: "\(name).resourceTags[]") + } + try self.validate(self.resourceTags, name: "resourceTags", parent: name, max: 200) + try self.sourceViews.forEach { + try validate($0, name: "sourceViews[]", parent: name, pattern: "^arn:aws[a-z-]*:(billing)::[0-9]{12}:billingview/[a-zA-Z0-9/:_\\+=\\.\\-@]{0,59}[a-zA-Z0-9]$") + } + try self.validate(self.sourceViews, name: "sourceViews", parent: name, max: 1) + try self.validate(self.sourceViews, name: "sourceViews", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case dataFilterExpression = "dataFilterExpression" + case description = "description" + case name = "name" + case resourceTags = "resourceTags" + case sourceViews = "sourceViews" + } + } + + public struct CreateBillingViewResponse: AWSDecodableShape { + /// The Amazon Resource Name (ARN) that can be used to uniquely identify the billing view. + public let arn: String + /// The time when the billing view was created. + public let createdAt: Date? + + @inlinable + public init(arn: String, createdAt: Date? = nil) { + self.arn = arn + self.createdAt = createdAt + } + + private enum CodingKeys: String, CodingKey { + case arn = "arn" + case createdAt = "createdAt" + } + } + + public struct DeleteBillingViewRequest: AWSEncodableShape { + /// The Amazon Resource Name (ARN) that can be used to uniquely identify the billing view. + public let arn: String + + @inlinable + public init(arn: String) { + self.arn = arn + } + + public func validate(name: String) throws { + try self.validate(self.arn, name: "arn", parent: name, pattern: "^arn:aws[a-z-]*:(billing)::[0-9]{12}:billingview/[a-zA-Z0-9/:_\\+=\\.\\-@]{0,59}[a-zA-Z0-9]$") + } + + private enum CodingKeys: String, CodingKey { + case arn = "arn" + } + } + + public struct DeleteBillingViewResponse: AWSDecodableShape { + /// The Amazon Resource Name (ARN) that can be used to uniquely identify the billing view. + public let arn: String + + @inlinable + public init(arn: String) { + self.arn = arn + } + + private enum CodingKeys: String, CodingKey { + case arn = "arn" + } + } + + public struct DimensionValues: AWSEncodableShape & AWSDecodableShape { + /// The names of the metadata types that you can use to filter and group your results. + public let key: Dimension + /// The metadata values that you can use to filter and group your results. + public let values: [String] + + @inlinable + public init(key: Dimension, values: [String]) { + self.key = key + self.values = values + } + + public func validate(name: String) throws { + try self.values.forEach { + try validate($0, name: "values[]", parent: name, max: 1024) + try validate($0, name: "values[]", parent: name, pattern: "^[\\S\\s]*$") + } + try self.validate(self.values, name: "values", parent: name, max: 200) + try self.validate(self.values, name: "values", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case key = "key" + case values = "values" + } + } + + public struct Expression: AWSEncodableShape & AWSDecodableShape { + /// The specific Dimension to use for Expression. + public let dimensions: DimensionValues? + /// The specific Tag to use for Expression. + public let tags: TagValues? + + @inlinable + public init(dimensions: DimensionValues? = nil, tags: TagValues? = nil) { + self.dimensions = dimensions + self.tags = tags + } + + public func validate(name: String) throws { + try self.dimensions?.validate(name: "\(name).dimensions") + try self.tags?.validate(name: "\(name).tags") + } + + private enum CodingKeys: String, CodingKey { + case dimensions = "dimensions" + case tags = "tags" + } + } + + public struct GetBillingViewRequest: AWSEncodableShape { + /// The Amazon Resource Name (ARN) that can be used to uniquely identify the billing view. + public let arn: String + + @inlinable + public init(arn: String) { + self.arn = arn + } + + public func validate(name: String) throws { + try self.validate(self.arn, name: "arn", parent: name, pattern: "^arn:aws[a-z-]*:(billing)::[0-9]{12}:billingview/[a-zA-Z0-9/:_\\+=\\.\\-@]{0,59}[a-zA-Z0-9]$") + } + + private enum CodingKeys: String, CodingKey { + case arn = "arn" + } + } + + public struct GetBillingViewResponse: AWSDecodableShape { + /// The billing view element associated with the specified ARN. + public let billingView: BillingViewElement + + @inlinable + public init(billingView: BillingViewElement) { + self.billingView = billingView + } + + private enum CodingKeys: String, CodingKey { + case billingView = "billingView" + } + } + + public struct GetResourcePolicyRequest: AWSEncodableShape { + /// The Amazon Resource Name (ARN) of the billing view resource to which the policy is attached to. + public let resourceArn: String + + @inlinable + public init(resourceArn: String) { + self.resourceArn = resourceArn + } + + public func validate(name: String) throws { + try self.validate(self.resourceArn, name: "resourceArn", parent: name, pattern: "^arn:aws[a-z-]*:(billing)::[0-9]{12}:[a-zA-Z0-9/:_\\+=\\.\\@-]{0,70}[a-zA-Z0-9]$") + } + + private enum CodingKeys: String, CodingKey { + case resourceArn = "resourceArn" + } + } + + public struct GetResourcePolicyResponse: AWSDecodableShape { + /// The resource-based policy document attached to the resource in JSON format. + public let policy: String? + /// The Amazon Resource Name (ARN) of the billing view resource to which the policy is attached to. + public let resourceArn: String + + @inlinable + public init(policy: String? = nil, resourceArn: String) { + self.policy = policy + self.resourceArn = resourceArn + } + + private enum CodingKeys: String, CodingKey { + case policy = "policy" + case resourceArn = "resourceArn" + } + } + public struct ListBillingViewsRequest: AWSEncodableShape { /// The time range for the billing views listed. PRIMARY billing view is always listed. BILLING_GROUP billing views are listed for time ranges when the associated billing group resource in Billing Conductor is active. The time range must be within one calendar month. - public let activeTimeRange: ActiveTimeRange + public let activeTimeRange: ActiveTimeRange? + /// The Amazon Resource Name (ARN) that can be used to uniquely identify the billing view. + public let arns: [String]? + /// The type of billing view. + public let billingViewTypes: [BillingViewType]? /// The maximum number of billing views to retrieve. Default is 100. public let maxResults: Int? /// The pagination token that is used on subsequent calls to list billing views. public let nextToken: String? + /// The list of owners of the billing view. + public let ownerAccountId: String? @inlinable - public init(activeTimeRange: ActiveTimeRange, maxResults: Int? = nil, nextToken: String? = nil) { + public init(activeTimeRange: ActiveTimeRange? = nil, arns: [String]? = nil, billingViewTypes: [BillingViewType]? = nil, maxResults: Int? = nil, nextToken: String? = nil, ownerAccountId: String? = nil) { self.activeTimeRange = activeTimeRange + self.arns = arns + self.billingViewTypes = billingViewTypes self.maxResults = maxResults self.nextToken = nextToken + self.ownerAccountId = ownerAccountId } public func validate(name: String) throws { + try self.arns?.forEach { + try validate($0, name: "arns[]", parent: name, pattern: "^arn:aws[a-z-]*:(billing)::[0-9]{12}:billingview/[a-zA-Z0-9/:_\\+=\\.\\-@]{0,59}[a-zA-Z0-9]$") + } + try self.validate(self.arns, name: "arns", parent: name, max: 10) try self.validate(self.maxResults, name: "maxResults", parent: name, max: 100) try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) try self.validate(self.nextToken, name: "nextToken", parent: name, max: 2047) try self.validate(self.nextToken, name: "nextToken", parent: name, min: 1) + try self.validate(self.ownerAccountId, name: "ownerAccountId", parent: name, pattern: "^[0-9]{12}$") } private enum CodingKeys: String, CodingKey { case activeTimeRange = "activeTimeRange" + case arns = "arns" + case billingViewTypes = "billingViewTypes" case maxResults = "maxResults" case nextToken = "nextToken" + case ownerAccountId = "ownerAccountId" } } @@ -124,6 +424,254 @@ extension Billing { case nextToken = "nextToken" } } + + public struct ListSourceViewsForBillingViewRequest: AWSEncodableShape { + /// The Amazon Resource Name (ARN) that can be used to uniquely identify the billing view. + public let arn: String + /// The number of entries a paginated response contains. + public let maxResults: Int? + /// The pagination token that is used on subsequent calls to list billing views. + public let nextToken: String? + + @inlinable + public init(arn: String, maxResults: Int? = nil, nextToken: String? = nil) { + self.arn = arn + self.maxResults = maxResults + self.nextToken = nextToken + } + + public func validate(name: String) throws { + try self.validate(self.arn, name: "arn", parent: name, pattern: "^arn:aws[a-z-]*:(billing)::[0-9]{12}:billingview/[a-zA-Z0-9/:_\\+=\\.\\-@]{0,59}[a-zA-Z0-9]$") + try self.validate(self.maxResults, name: "maxResults", parent: name, max: 100) + try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) + try self.validate(self.nextToken, name: "nextToken", parent: name, max: 2047) + try self.validate(self.nextToken, name: "nextToken", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case arn = "arn" + case maxResults = "maxResults" + case nextToken = "nextToken" + } + } + + public struct ListSourceViewsForBillingViewResponse: AWSDecodableShape { + /// The pagination token that is used on subsequent calls to list billing views. + public let nextToken: String? + /// A list of billing views used as the data source for the custom billing view. + public let sourceViews: [String] + + @inlinable + public init(nextToken: String? = nil, sourceViews: [String]) { + self.nextToken = nextToken + self.sourceViews = sourceViews + } + + private enum CodingKeys: String, CodingKey { + case nextToken = "nextToken" + case sourceViews = "sourceViews" + } + } + + public struct ListTagsForResourceRequest: AWSEncodableShape { + /// The Amazon Resource Name (ARN) of the resource. + public let resourceArn: String + + @inlinable + public init(resourceArn: String) { + self.resourceArn = resourceArn + } + + public func validate(name: String) throws { + try self.validate(self.resourceArn, name: "resourceArn", parent: name, pattern: "^arn:aws[a-z-]*:(billing)::[0-9]{12}:[a-zA-Z0-9/:_\\+=\\.\\@-]{0,70}[a-zA-Z0-9]$") + } + + private enum CodingKeys: String, CodingKey { + case resourceArn = "resourceArn" + } + } + + public struct ListTagsForResourceResponse: AWSDecodableShape { + /// A list of tag key value pairs that are associated with the resource. + public let resourceTags: [ResourceTag]? + + @inlinable + public init(resourceTags: [ResourceTag]? = nil) { + self.resourceTags = resourceTags + } + + private enum CodingKeys: String, CodingKey { + case resourceTags = "resourceTags" + } + } + + public struct ResourceTag: AWSEncodableShape & AWSDecodableShape { + /// The key that's associated with the tag. + public let key: String + /// The value that's associated with the tag. + public let value: String? + + @inlinable + public init(key: String, value: String? = nil) { + self.key = key + self.value = value + } + + public func validate(name: String) throws { + try self.validate(self.key, name: "key", parent: name, max: 128) + try self.validate(self.key, name: "key", parent: name, min: 1) + try self.validate(self.value, name: "value", parent: name, max: 256) + } + + private enum CodingKeys: String, CodingKey { + case key = "key" + case value = "value" + } + } + + public struct TagResourceRequest: AWSEncodableShape { + /// The Amazon Resource Name (ARN) of the resource. + public let resourceArn: String + /// A list of tag key value pairs that are associated with the resource. + public let resourceTags: [ResourceTag] + + @inlinable + public init(resourceArn: String, resourceTags: [ResourceTag]) { + self.resourceArn = resourceArn + self.resourceTags = resourceTags + } + + public func validate(name: String) throws { + try self.validate(self.resourceArn, name: "resourceArn", parent: name, pattern: "^arn:aws[a-z-]*:(billing)::[0-9]{12}:[a-zA-Z0-9/:_\\+=\\.\\@-]{0,70}[a-zA-Z0-9]$") + try self.resourceTags.forEach { + try $0.validate(name: "\(name).resourceTags[]") + } + try self.validate(self.resourceTags, name: "resourceTags", parent: name, max: 200) + } + + private enum CodingKeys: String, CodingKey { + case resourceArn = "resourceArn" + case resourceTags = "resourceTags" + } + } + + public struct TagResourceResponse: AWSDecodableShape { + public init() {} + } + + public struct TagValues: AWSEncodableShape & AWSDecodableShape { + /// The key for the tag. + public let key: String + /// The specific value of the tag. + public let values: [String] + + @inlinable + public init(key: String, values: [String]) { + self.key = key + self.values = values + } + + public func validate(name: String) throws { + try self.validate(self.key, name: "key", parent: name, max: 1024) + try self.validate(self.key, name: "key", parent: name, pattern: "^[\\S\\s]*$") + try self.values.forEach { + try validate($0, name: "values[]", parent: name, max: 1024) + try validate($0, name: "values[]", parent: name, pattern: "^[\\S\\s]*$") + } + try self.validate(self.values, name: "values", parent: name, max: 200) + try self.validate(self.values, name: "values", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case key = "key" + case values = "values" + } + } + + public struct UntagResourceRequest: AWSEncodableShape { + /// The Amazon Resource Name (ARN) of the resource. + public let resourceArn: String + /// A list of tag key value pairs that are associated with the resource. + public let resourceTagKeys: [String] + + @inlinable + public init(resourceArn: String, resourceTagKeys: [String]) { + self.resourceArn = resourceArn + self.resourceTagKeys = resourceTagKeys + } + + public func validate(name: String) throws { + try self.validate(self.resourceArn, name: "resourceArn", parent: name, pattern: "^arn:aws[a-z-]*:(billing)::[0-9]{12}:[a-zA-Z0-9/:_\\+=\\.\\@-]{0,70}[a-zA-Z0-9]$") + try self.resourceTagKeys.forEach { + try validate($0, name: "resourceTagKeys[]", parent: name, max: 128) + try validate($0, name: "resourceTagKeys[]", parent: name, min: 1) + } + try self.validate(self.resourceTagKeys, name: "resourceTagKeys", parent: name, max: 200) + } + + private enum CodingKeys: String, CodingKey { + case resourceArn = "resourceArn" + case resourceTagKeys = "resourceTagKeys" + } + } + + public struct UntagResourceResponse: AWSDecodableShape { + public init() {} + } + + public struct UpdateBillingViewRequest: AWSEncodableShape { + /// The Amazon Resource Name (ARN) that can be used to uniquely identify the billing view. + public let arn: String + /// See Expression. Billing view only supports LINKED_ACCOUNT and Tags. + public let dataFilterExpression: Expression? + /// The description of the billing view. + public let description: String? + /// The name of the billing view. + public let name: String? + + @inlinable + public init(arn: String, dataFilterExpression: Expression? = nil, description: String? = nil, name: String? = nil) { + self.arn = arn + self.dataFilterExpression = dataFilterExpression + self.description = description + self.name = name + } + + public func validate(name: String) throws { + try self.validate(self.arn, name: "arn", parent: name, pattern: "^arn:aws[a-z-]*:(billing)::[0-9]{12}:billingview/[a-zA-Z0-9/:_\\+=\\.\\-@]{0,59}[a-zA-Z0-9]$") + try self.dataFilterExpression?.validate(name: "\(name).dataFilterExpression") + try self.validate(self.description, name: "description", parent: name, max: 1024) + try self.validate(self.description, name: "description", parent: name, pattern: "^([ a-zA-Z0-9_\\+=\\.\\-@]+)?$") + try self.validate(self.name, name: "name", parent: name, max: 128) + try self.validate(self.name, name: "name", parent: name, min: 1) + try self.validate(self.name, name: "name", parent: name, pattern: "^[ a-zA-Z0-9_\\+=\\.\\-@]+$") + } + + private enum CodingKeys: String, CodingKey { + case arn = "arn" + case dataFilterExpression = "dataFilterExpression" + case description = "description" + case name = "name" + } + } + + public struct UpdateBillingViewResponse: AWSDecodableShape { + /// The Amazon Resource Name (ARN) that can be used to uniquely identify the billing view. + public let arn: String + /// The time when the billing view was last updated. + public let updatedAt: Date? + + @inlinable + public init(arn: String, updatedAt: Date? = nil) { + self.arn = arn + self.updatedAt = updatedAt + } + + private enum CodingKeys: String, CodingKey { + case arn = "arn" + case updatedAt = "updatedAt" + } + } } // MARK: - Errors @@ -132,7 +680,10 @@ extension Billing { public struct BillingErrorType: AWSErrorType { enum Code: String { case accessDeniedException = "AccessDeniedException" + case conflictException = "ConflictException" case internalServerException = "InternalServerException" + case resourceNotFoundException = "ResourceNotFoundException" + case serviceQuotaExceededException = "ServiceQuotaExceededException" case throttlingException = "ThrottlingException" case validationException = "ValidationException" } @@ -157,8 +708,14 @@ public struct BillingErrorType: AWSErrorType { /// You don't have sufficient access to perform this action. public static var accessDeniedException: Self { .init(.accessDeniedException) } + /// The requested operation would cause a conflict with the current state of a service resource associated with the request. Resolve the conflict before retrying this request. + public static var conflictException: Self { .init(.conflictException) } /// The request processing failed because of an unknown error, exception, or failure. public static var internalServerException: Self { .init(.internalServerException) } + /// The specified ARN in the request doesn't exist. + public static var resourceNotFoundException: Self { .init(.resourceNotFoundException) } + /// You've reached the limit of resources you can create, or exceeded the size of an individual resource. + public static var serviceQuotaExceededException: Self { .init(.serviceQuotaExceededException) } /// The request was denied due to request throttling. public static var throttlingException: Self { .init(.throttlingException) } /// The input fails to satisfy the constraints specified by an Amazon Web Services service. diff --git a/Sources/Soto/Services/Budgets/Budgets_api.swift b/Sources/Soto/Services/Budgets/Budgets_api.swift index d8f20643e3..3d77962561 100644 --- a/Sources/Soto/Services/Budgets/Budgets_api.swift +++ b/Sources/Soto/Services/Budgets/Budgets_api.swift @@ -80,6 +80,8 @@ public struct Budgets: AWSService { "aws-cn-global": "budgets.amazonaws.com.cn", "aws-global": "budgets.amazonaws.com", "aws-iso-b-global": "budgets.global.sc2s.sgov.gov", + "aws-iso-global": "budgets.c2s.ic.gov", + "us-iso-east-1": "budgets.c2s.ic.gov", "us-isob-east-1": "budgets.global.sc2s.sgov.gov" ]} @@ -87,6 +89,7 @@ public struct Budgets: AWSService { static var partitionEndpoints: [AWSPartition: (endpoint: String, region: SotoCore.Region)] {[ .aws: (endpoint: "aws-global", region: .useast1), .awscn: (endpoint: "aws-cn-global", region: .cnnorthwest1), + .awsiso: (endpoint: "aws-iso-global", region: .usisoeast1), .awsisob: (endpoint: "aws-iso-b-global", region: .usisobeast1) ]} diff --git a/Sources/Soto/Services/CloudHSMV2/CloudHSMV2_api.swift b/Sources/Soto/Services/CloudHSMV2/CloudHSMV2_api.swift index 364ecfa9c2..77f4e100fd 100644 --- a/Sources/Soto/Services/CloudHSMV2/CloudHSMV2_api.swift +++ b/Sources/Soto/Services/CloudHSMV2/CloudHSMV2_api.swift @@ -582,16 +582,19 @@ public struct CloudHSMV2: AWSService { /// Parameters: /// - backupRetentionPolicy: A policy that defines how the service retains backups. /// - clusterId: The identifier (ID) of the cluster that you want to modify. To find the cluster ID, use DescribeClusters. + /// - hsmType: The desired HSM type of the cluster. /// - logger: Logger use during operation @inlinable public func modifyCluster( - backupRetentionPolicy: BackupRetentionPolicy, + backupRetentionPolicy: BackupRetentionPolicy? = nil, clusterId: String, + hsmType: String? = nil, logger: Logger = AWSClient.loggingDisabled ) async throws -> ModifyClusterResponse { let input = ModifyClusterRequest( backupRetentionPolicy: backupRetentionPolicy, - clusterId: clusterId + clusterId: clusterId, + hsmType: hsmType ) return try await self.modifyCluster(input, logger: logger) } diff --git a/Sources/Soto/Services/CloudHSMV2/CloudHSMV2_shapes.swift b/Sources/Soto/Services/CloudHSMV2/CloudHSMV2_shapes.swift index ac54a387eb..d8a7bdfa31 100644 --- a/Sources/Soto/Services/CloudHSMV2/CloudHSMV2_shapes.swift +++ b/Sources/Soto/Services/CloudHSMV2/CloudHSMV2_shapes.swift @@ -217,9 +217,11 @@ extension CloudHSMV2 { public let hsms: [Hsm]? /// The type of HSM that the cluster contains. public let hsmType: String? + /// The timestamp until when the cluster can be rolled back to its original HSM type. + public let hsmTypeRollbackExpiration: Date? /// The mode of the cluster. public let mode: ClusterMode? - /// The cluster's NetworkType can be set to either IPV4 (which is the default) or DUALSTACK. When set to IPV4, communication between your application and the Hardware Security Modules (HSMs) is restricted to the IPv4 protocol only. In contrast, the DUALSTACK network type enables communication over both the IPv4 and IPv6 protocols. To use the DUALSTACK option, you'll need to configure your Virtual Private Cloud (VPC) and subnets to support both IPv4 and IPv6. This involves adding IPv6 Classless Inter-Domain Routing (CIDR) blocks to the existing IPv4 CIDR blocks in your subnets. The choice between IPV4 and DUALSTACK network types determines the flexibility of the network addressing setup for your cluster. The DUALSTACK option provides more flexibility by allowing both IPv4 and IPv6 communication. + /// The cluster's NetworkType can be IPv4 (the default) or DUALSTACK. The IPv4 NetworkType restricts communication between your application and the hardware security modules (HSMs) to the IPv4 protocol only. The DUALSTACK NetworkType enables communication over both IPv4 and IPv6 protocols. To use DUALSTACK, configure your virtual private cloud (VPC) and subnets to support both IPv4 and IPv6. This configuration involves adding IPv6 Classless Inter-Domain Routing (CIDR) blocks to the existing IPv4 CIDR blocks in your subnets. The NetworkType you choose affects the network addressing options for your cluster. DUALSTACK provides more flexibility by supporting both IPv4 and IPv6 communication. public let networkType: NetworkType? /// The default password for the cluster's Pre-Crypto Officer (PRECO) user. public let preCoPassword: String? @@ -239,7 +241,7 @@ extension CloudHSMV2 { public let vpcId: String? @inlinable - public init(backupPolicy: BackupPolicy? = nil, backupRetentionPolicy: BackupRetentionPolicy? = nil, certificates: Certificates? = nil, clusterId: String? = nil, createTimestamp: Date? = nil, hsms: [Hsm]? = nil, hsmType: String? = nil, mode: ClusterMode? = nil, networkType: NetworkType? = nil, preCoPassword: String? = nil, securityGroup: String? = nil, sourceBackupId: String? = nil, state: ClusterState? = nil, stateMessage: String? = nil, subnetMapping: [String: String]? = nil, tagList: [Tag]? = nil, vpcId: String? = nil) { + public init(backupPolicy: BackupPolicy? = nil, backupRetentionPolicy: BackupRetentionPolicy? = nil, certificates: Certificates? = nil, clusterId: String? = nil, createTimestamp: Date? = nil, hsms: [Hsm]? = nil, hsmType: String? = nil, hsmTypeRollbackExpiration: Date? = nil, mode: ClusterMode? = nil, networkType: NetworkType? = nil, preCoPassword: String? = nil, securityGroup: String? = nil, sourceBackupId: String? = nil, state: ClusterState? = nil, stateMessage: String? = nil, subnetMapping: [String: String]? = nil, tagList: [Tag]? = nil, vpcId: String? = nil) { self.backupPolicy = backupPolicy self.backupRetentionPolicy = backupRetentionPolicy self.certificates = certificates @@ -247,6 +249,7 @@ extension CloudHSMV2 { self.createTimestamp = createTimestamp self.hsms = hsms self.hsmType = hsmType + self.hsmTypeRollbackExpiration = hsmTypeRollbackExpiration self.mode = mode self.networkType = networkType self.preCoPassword = preCoPassword @@ -267,6 +270,7 @@ extension CloudHSMV2 { case createTimestamp = "CreateTimestamp" case hsms = "Hsms" case hsmType = "HsmType" + case hsmTypeRollbackExpiration = "HsmTypeRollbackExpiration" case mode = "Mode" case networkType = "NetworkType" case preCoPassword = "PreCoPassword" @@ -766,6 +770,8 @@ extension CloudHSMV2 { public let eniIpV6: String? /// The HSM's identifier (ID). public let hsmId: String + /// The type of HSM. + public let hsmType: String? /// The HSM's state. public let state: HsmState? /// A description of the HSM's state. @@ -774,13 +780,14 @@ extension CloudHSMV2 { public let subnetId: String? @inlinable - public init(availabilityZone: String? = nil, clusterId: String? = nil, eniId: String? = nil, eniIp: String? = nil, eniIpV6: String? = nil, hsmId: String, state: HsmState? = nil, stateMessage: String? = nil, subnetId: String? = nil) { + public init(availabilityZone: String? = nil, clusterId: String? = nil, eniId: String? = nil, eniIp: String? = nil, eniIpV6: String? = nil, hsmId: String, hsmType: String? = nil, state: HsmState? = nil, stateMessage: String? = nil, subnetId: String? = nil) { self.availabilityZone = availabilityZone self.clusterId = clusterId self.eniId = eniId self.eniIp = eniIp self.eniIpV6 = eniIpV6 self.hsmId = hsmId + self.hsmType = hsmType self.state = state self.stateMessage = stateMessage self.subnetId = subnetId @@ -793,6 +800,7 @@ extension CloudHSMV2 { case eniIp = "EniIp" case eniIpV6 = "EniIpV6" case hsmId = "HsmId" + case hsmType = "HsmType" case state = "State" case stateMessage = "StateMessage" case subnetId = "SubnetId" @@ -932,24 +940,30 @@ extension CloudHSMV2 { public struct ModifyClusterRequest: AWSEncodableShape { /// A policy that defines how the service retains backups. - public let backupRetentionPolicy: BackupRetentionPolicy + public let backupRetentionPolicy: BackupRetentionPolicy? /// The identifier (ID) of the cluster that you want to modify. To find the cluster ID, use DescribeClusters. public let clusterId: String + /// The desired HSM type of the cluster. + public let hsmType: String? @inlinable - public init(backupRetentionPolicy: BackupRetentionPolicy, clusterId: String) { + public init(backupRetentionPolicy: BackupRetentionPolicy? = nil, clusterId: String, hsmType: String? = nil) { self.backupRetentionPolicy = backupRetentionPolicy self.clusterId = clusterId + self.hsmType = hsmType } public func validate(name: String) throws { - try self.backupRetentionPolicy.validate(name: "\(name).backupRetentionPolicy") + try self.backupRetentionPolicy?.validate(name: "\(name).backupRetentionPolicy") try self.validate(self.clusterId, name: "clusterId", parent: name, pattern: "^cluster-[2-7a-zA-Z]{11,16}$") + try self.validate(self.hsmType, name: "hsmType", parent: name, max: 32) + try self.validate(self.hsmType, name: "hsmType", parent: name, pattern: "^((p|)hsm[0-9][a-z.]*\\.[a-zA-Z]+)$") } private enum CodingKeys: String, CodingKey { case backupRetentionPolicy = "BackupRetentionPolicy" case clusterId = "ClusterId" + case hsmType = "HsmType" } } diff --git a/Sources/Soto/Services/CloudTrail/CloudTrail_api.swift b/Sources/Soto/Services/CloudTrail/CloudTrail_api.swift index 504ba3868d..ddef842c03 100644 --- a/Sources/Soto/Services/CloudTrail/CloudTrail_api.swift +++ b/Sources/Soto/Services/CloudTrail/CloudTrail_api.swift @@ -841,7 +841,7 @@ public struct CloudTrail: AWSService { return try await self.getImport(input, logger: logger) } - /// Describes the settings for the Insights event selectors that you configured for your trail or event data store. GetInsightSelectors shows if CloudTrail Insights event logging is enabled on the trail or event data store, and if it is, which Insights types are enabled. If you run GetInsightSelectors on a trail or event data store that does not have Insights events enabled, the operation throws the exception InsightNotEnabledException Specify either the EventDataStore parameter to get Insights event selectors for an event data store, or the TrailName parameter to the get Insights event selectors for a trail. You cannot specify these parameters together. For more information, see Logging CloudTrail Insights events in the CloudTrail User Guide. + /// Describes the settings for the Insights event selectors that you configured for your trail or event data store. GetInsightSelectors shows if CloudTrail Insights event logging is enabled on the trail or event data store, and if it is, which Insights types are enabled. If you run GetInsightSelectors on a trail or event data store that does not have Insights events enabled, the operation throws the exception InsightNotEnabledException Specify either the EventDataStore parameter to get Insights event selectors for an event data store, or the TrailName parameter to the get Insights event selectors for a trail. You cannot specify these parameters together. For more information, see Working with CloudTrail Insights in the CloudTrail User Guide. @Sendable @inlinable public func getInsightSelectors(_ input: GetInsightSelectorsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetInsightSelectorsResponse { @@ -854,7 +854,7 @@ public struct CloudTrail: AWSService { logger: logger ) } - /// Describes the settings for the Insights event selectors that you configured for your trail or event data store. GetInsightSelectors shows if CloudTrail Insights event logging is enabled on the trail or event data store, and if it is, which Insights types are enabled. If you run GetInsightSelectors on a trail or event data store that does not have Insights events enabled, the operation throws the exception InsightNotEnabledException Specify either the EventDataStore parameter to get Insights event selectors for an event data store, or the TrailName parameter to the get Insights event selectors for a trail. You cannot specify these parameters together. For more information, see Logging CloudTrail Insights events in the CloudTrail User Guide. + /// Describes the settings for the Insights event selectors that you configured for your trail or event data store. GetInsightSelectors shows if CloudTrail Insights event logging is enabled on the trail or event data store, and if it is, which Insights types are enabled. If you run GetInsightSelectors on a trail or event data store that does not have Insights events enabled, the operation throws the exception InsightNotEnabledException Specify either the EventDataStore parameter to get Insights event selectors for an event data store, or the TrailName parameter to the get Insights event selectors for a trail. You cannot specify these parameters together. For more information, see Working with CloudTrail Insights in the CloudTrail User Guide. /// /// Parameters: /// - eventDataStore: Specifies the ARN (or ID suffix of the ARN) of the event data store for which you want to get Insights selectors. You cannot use this parameter with the TrailName parameter. @@ -1413,7 +1413,7 @@ public struct CloudTrail: AWSService { return try await self.lookupEvents(input, logger: logger) } - /// Configures event selectors (also referred to as basic event selectors) or advanced event selectors for your trail. You can use either AdvancedEventSelectors or EventSelectors, but not both. If you apply AdvancedEventSelectors to a trail, any existing EventSelectors are overwritten. You can use AdvancedEventSelectors to log management events, data events for all resource types, and network activity events. You can use EventSelectors to log management events and data events for the following resource types: AWS::DynamoDB::Table AWS::Lambda::Function AWS::S3::Object You can't use EventSelectors to log network activity events. If you want your trail to log Insights events, be sure the event selector or advanced event selector enables logging of the Insights event types you want configured for your trail. For more information about logging Insights events, see Logging Insights events in the CloudTrail User Guide. By default, trails created without specific event selectors are configured to log all read and write management events, and no data events or network activity events. When an event occurs in your account, CloudTrail evaluates the event selectors or advanced event selectors in all trails. For each trail, if the event matches any event selector, the trail processes and logs the event. If the event doesn't match any event selector, the trail doesn't log the event. Example You create an event selector for a trail and specify that you want to log write-only events. The EC2 GetConsoleOutput and RunInstances API operations occur in your account. CloudTrail evaluates whether the events match your event selectors. The RunInstances is a write-only event and it matches your event selector. The trail logs the event. The GetConsoleOutput is a read-only event that doesn't match your event selector. The trail doesn't log the event. The PutEventSelectors operation must be called from the Region in which the trail was created; otherwise, an InvalidHomeRegionException exception is thrown. You can configure up to five event selectors for each trail. You can add advanced event selectors, and conditions for your advanced event selectors, up to a maximum of 500 values for all conditions and selectors on a trail. For more information, see Logging management events, Logging data events, Logging network activity events, and Quotas in CloudTrail in the CloudTrail User Guide. + /// Configures event selectors (also referred to as basic event selectors) or advanced event selectors for your trail. You can use either AdvancedEventSelectors or EventSelectors, but not both. If you apply AdvancedEventSelectors to a trail, any existing EventSelectors are overwritten. You can use AdvancedEventSelectors to log management events, data events for all resource types, and network activity events. You can use EventSelectors to log management events and data events for the following resource types: AWS::DynamoDB::Table AWS::Lambda::Function AWS::S3::Object You can't use EventSelectors to log network activity events. If you want your trail to log Insights events, be sure the event selector or advanced event selector enables logging of the Insights event types you want configured for your trail. For more information about logging Insights events, see Working with CloudTrail Insights in the CloudTrail User Guide. By default, trails created without specific event selectors are configured to log all read and write management events, and no data events or network activity events. When an event occurs in your account, CloudTrail evaluates the event selectors or advanced event selectors in all trails. For each trail, if the event matches any event selector, the trail processes and logs the event. If the event doesn't match any event selector, the trail doesn't log the event. Example You create an event selector for a trail and specify that you want to log write-only events. The EC2 GetConsoleOutput and RunInstances API operations occur in your account. CloudTrail evaluates whether the events match your event selectors. The RunInstances is a write-only event and it matches your event selector. The trail logs the event. The GetConsoleOutput is a read-only event that doesn't match your event selector. The trail doesn't log the event. The PutEventSelectors operation must be called from the Region in which the trail was created; otherwise, an InvalidHomeRegionException exception is thrown. You can configure up to five event selectors for each trail. You can add advanced event selectors, and conditions for your advanced event selectors, up to a maximum of 500 values for all conditions and selectors on a trail. For more information, see Logging management events, Logging data events, Logging network activity events, and Quotas in CloudTrail in the CloudTrail User Guide. @Sendable @inlinable public func putEventSelectors(_ input: PutEventSelectorsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> PutEventSelectorsResponse { @@ -1426,7 +1426,7 @@ public struct CloudTrail: AWSService { logger: logger ) } - /// Configures event selectors (also referred to as basic event selectors) or advanced event selectors for your trail. You can use either AdvancedEventSelectors or EventSelectors, but not both. If you apply AdvancedEventSelectors to a trail, any existing EventSelectors are overwritten. You can use AdvancedEventSelectors to log management events, data events for all resource types, and network activity events. You can use EventSelectors to log management events and data events for the following resource types: AWS::DynamoDB::Table AWS::Lambda::Function AWS::S3::Object You can't use EventSelectors to log network activity events. If you want your trail to log Insights events, be sure the event selector or advanced event selector enables logging of the Insights event types you want configured for your trail. For more information about logging Insights events, see Logging Insights events in the CloudTrail User Guide. By default, trails created without specific event selectors are configured to log all read and write management events, and no data events or network activity events. When an event occurs in your account, CloudTrail evaluates the event selectors or advanced event selectors in all trails. For each trail, if the event matches any event selector, the trail processes and logs the event. If the event doesn't match any event selector, the trail doesn't log the event. Example You create an event selector for a trail and specify that you want to log write-only events. The EC2 GetConsoleOutput and RunInstances API operations occur in your account. CloudTrail evaluates whether the events match your event selectors. The RunInstances is a write-only event and it matches your event selector. The trail logs the event. The GetConsoleOutput is a read-only event that doesn't match your event selector. The trail doesn't log the event. The PutEventSelectors operation must be called from the Region in which the trail was created; otherwise, an InvalidHomeRegionException exception is thrown. You can configure up to five event selectors for each trail. You can add advanced event selectors, and conditions for your advanced event selectors, up to a maximum of 500 values for all conditions and selectors on a trail. For more information, see Logging management events, Logging data events, Logging network activity events, and Quotas in CloudTrail in the CloudTrail User Guide. + /// Configures event selectors (also referred to as basic event selectors) or advanced event selectors for your trail. You can use either AdvancedEventSelectors or EventSelectors, but not both. If you apply AdvancedEventSelectors to a trail, any existing EventSelectors are overwritten. You can use AdvancedEventSelectors to log management events, data events for all resource types, and network activity events. You can use EventSelectors to log management events and data events for the following resource types: AWS::DynamoDB::Table AWS::Lambda::Function AWS::S3::Object You can't use EventSelectors to log network activity events. If you want your trail to log Insights events, be sure the event selector or advanced event selector enables logging of the Insights event types you want configured for your trail. For more information about logging Insights events, see Working with CloudTrail Insights in the CloudTrail User Guide. By default, trails created without specific event selectors are configured to log all read and write management events, and no data events or network activity events. When an event occurs in your account, CloudTrail evaluates the event selectors or advanced event selectors in all trails. For each trail, if the event matches any event selector, the trail processes and logs the event. If the event doesn't match any event selector, the trail doesn't log the event. Example You create an event selector for a trail and specify that you want to log write-only events. The EC2 GetConsoleOutput and RunInstances API operations occur in your account. CloudTrail evaluates whether the events match your event selectors. The RunInstances is a write-only event and it matches your event selector. The trail logs the event. The GetConsoleOutput is a read-only event that doesn't match your event selector. The trail doesn't log the event. The PutEventSelectors operation must be called from the Region in which the trail was created; otherwise, an InvalidHomeRegionException exception is thrown. You can configure up to five event selectors for each trail. You can add advanced event selectors, and conditions for your advanced event selectors, up to a maximum of 500 values for all conditions and selectors on a trail. For more information, see Logging management events, Logging data events, Logging network activity events, and Quotas in CloudTrail in the CloudTrail User Guide. /// /// Parameters: /// - advancedEventSelectors: Specifies the settings for advanced event selectors. You can use advanced event selectors to log management events, data events for all resource types, and network activity events. You can add advanced event selectors, and conditions for your advanced event selectors, up to a maximum of 500 values for all conditions and selectors on a trail. You can use either AdvancedEventSelectors or EventSelectors, but not both. If you apply AdvancedEventSelectors to a trail, any existing EventSelectors are overwritten. For more information about advanced event selectors, see Logging data events and Logging network activity events in the CloudTrail User Guide. @@ -1448,7 +1448,7 @@ public struct CloudTrail: AWSService { return try await self.putEventSelectors(input, logger: logger) } - /// Lets you enable Insights event logging by specifying the Insights selectors that you want to enable on an existing trail or event data store. You also use PutInsightSelectors to turn off Insights event logging, by passing an empty list of Insights types. The valid Insights event types are ApiErrorRateInsight and ApiCallRateInsight. To enable Insights on an event data store, you must specify the ARNs (or ID suffix of the ARNs) for the source event data store (EventDataStore) and the destination event data store (InsightsDestination). The source event data store logs management events and enables Insights. The destination event data store logs Insights events based upon the management event activity of the source event data store. The source and destination event data stores must belong to the same Amazon Web Services account. To log Insights events for a trail, you must specify the name (TrailName) of the CloudTrail trail for which you want to change or add Insights selectors. To log CloudTrail Insights events on API call volume, the trail or event data store must log write management events. To log CloudTrail Insights events on API error rate, the trail or event data store must log read or write management events. You can call GetEventSelectors on a trail to check whether the trail logs management events. You can call GetEventDataStore on an event data store to check whether the event data store logs management events. For more information, see Logging CloudTrail Insights events in the CloudTrail User Guide. + /// Lets you enable Insights event logging by specifying the Insights selectors that you want to enable on an existing trail or event data store. You also use PutInsightSelectors to turn off Insights event logging, by passing an empty list of Insights types. The valid Insights event types are ApiErrorRateInsight and ApiCallRateInsight. To enable Insights on an event data store, you must specify the ARNs (or ID suffix of the ARNs) for the source event data store (EventDataStore) and the destination event data store (InsightsDestination). The source event data store logs management events and enables Insights. The destination event data store logs Insights events based upon the management event activity of the source event data store. The source and destination event data stores must belong to the same Amazon Web Services account. To log Insights events for a trail, you must specify the name (TrailName) of the CloudTrail trail for which you want to change or add Insights selectors. To log CloudTrail Insights events on API call volume, the trail or event data store must log write management events. To log CloudTrail Insights events on API error rate, the trail or event data store must log read or write management events. You can call GetEventSelectors on a trail to check whether the trail logs management events. You can call GetEventDataStore on an event data store to check whether the event data store logs management events. For more information, see Working with CloudTrail Insights in the CloudTrail User Guide. @Sendable @inlinable public func putInsightSelectors(_ input: PutInsightSelectorsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> PutInsightSelectorsResponse { @@ -1461,7 +1461,7 @@ public struct CloudTrail: AWSService { logger: logger ) } - /// Lets you enable Insights event logging by specifying the Insights selectors that you want to enable on an existing trail or event data store. You also use PutInsightSelectors to turn off Insights event logging, by passing an empty list of Insights types. The valid Insights event types are ApiErrorRateInsight and ApiCallRateInsight. To enable Insights on an event data store, you must specify the ARNs (or ID suffix of the ARNs) for the source event data store (EventDataStore) and the destination event data store (InsightsDestination). The source event data store logs management events and enables Insights. The destination event data store logs Insights events based upon the management event activity of the source event data store. The source and destination event data stores must belong to the same Amazon Web Services account. To log Insights events for a trail, you must specify the name (TrailName) of the CloudTrail trail for which you want to change or add Insights selectors. To log CloudTrail Insights events on API call volume, the trail or event data store must log write management events. To log CloudTrail Insights events on API error rate, the trail or event data store must log read or write management events. You can call GetEventSelectors on a trail to check whether the trail logs management events. You can call GetEventDataStore on an event data store to check whether the event data store logs management events. For more information, see Logging CloudTrail Insights events in the CloudTrail User Guide. + /// Lets you enable Insights event logging by specifying the Insights selectors that you want to enable on an existing trail or event data store. You also use PutInsightSelectors to turn off Insights event logging, by passing an empty list of Insights types. The valid Insights event types are ApiErrorRateInsight and ApiCallRateInsight. To enable Insights on an event data store, you must specify the ARNs (or ID suffix of the ARNs) for the source event data store (EventDataStore) and the destination event data store (InsightsDestination). The source event data store logs management events and enables Insights. The destination event data store logs Insights events based upon the management event activity of the source event data store. The source and destination event data stores must belong to the same Amazon Web Services account. To log Insights events for a trail, you must specify the name (TrailName) of the CloudTrail trail for which you want to change or add Insights selectors. To log CloudTrail Insights events on API call volume, the trail or event data store must log write management events. To log CloudTrail Insights events on API error rate, the trail or event data store must log read or write management events. You can call GetEventSelectors on a trail to check whether the trail logs management events. You can call GetEventDataStore on an event data store to check whether the event data store logs management events. For more information, see Working with CloudTrail Insights in the CloudTrail User Guide. /// /// Parameters: /// - eventDataStore: The ARN (or ID suffix of the ARN) of the source event data store for which you want to change or add Insights selectors. To enable Insights on an event data store, you must provide both the EventDataStore and InsightsDestination parameters. You cannot use this parameter with the TrailName parameter. @@ -1608,6 +1608,41 @@ public struct CloudTrail: AWSService { return try await self.restoreEventDataStore(input, logger: logger) } + /// Searches sample queries and returns a list of sample queries that are sorted by relevance. To search for sample queries, provide a natural language SearchPhrase in English. + @Sendable + @inlinable + public func searchSampleQueries(_ input: SearchSampleQueriesRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> SearchSampleQueriesResponse { + try await self.client.execute( + operation: "SearchSampleQueries", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Searches sample queries and returns a list of sample queries that are sorted by relevance. To search for sample queries, provide a natural language SearchPhrase in English. + /// + /// Parameters: + /// - maxResults: The maximum number of results to return on a single page. The default value is 10. + /// - nextToken: A token you can use to get the next page of results. The length constraint is in characters, not words. + /// - searchPhrase: The natural language phrase to use for the semantic search. The phrase must be in English. The length constraint is in characters, not words. + /// - logger: Logger use during operation + @inlinable + public func searchSampleQueries( + maxResults: Int? = nil, + nextToken: String? = nil, + searchPhrase: String, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> SearchSampleQueriesResponse { + let input = SearchSampleQueriesRequest( + maxResults: maxResults, + nextToken: nextToken, + searchPhrase: searchPhrase + ) + return try await self.searchSampleQueries(input, logger: logger) + } + /// Starts a refresh of the specified dashboard. /// Each time a dashboard is refreshed, CloudTrail runs queries to populate the dashboard's widgets. CloudTrail must be granted permissions to run the StartQuery operation on your behalf. To provide permissions, run the PutResourcePolicy operation to attach a resource-based policy to each event data store. For more information, see Example: Allow CloudTrail to run queries to populate a dashboard in the CloudTrail User Guide. @Sendable diff --git a/Sources/Soto/Services/CloudTrail/CloudTrail_shapes.swift b/Sources/Soto/Services/CloudTrail/CloudTrail_shapes.swift index 292de6c06e..6a0ddfdb8c 100644 --- a/Sources/Soto/Services/CloudTrail/CloudTrail_shapes.swift +++ b/Sources/Soto/Services/CloudTrail/CloudTrail_shapes.swift @@ -221,7 +221,7 @@ extension CloudTrail { public let endsWith: [String]? /// An operator that includes events that match the exact value of the event record field specified as the value of Field. This is the only valid operator that you can use with the readOnly, eventCategory, and resources.type fields. public let equals: [String]? - /// A field in a CloudTrail event record on which to filter events to be logged. For event data stores for CloudTrail Insights events, Config configuration items, Audit Manager evidence, or events outside of Amazon Web Services, the field is used only for selecting events as filtering is not supported. For more information, see AdvancedFieldSelector in the CloudTrailUser Guide. + /// A field in a CloudTrail event record on which to filter events to be logged. For event data stores for CloudTrail Insights events, Config configuration items, Audit Manager evidence, or events outside of Amazon Web Services, the field is used only for selecting events as filtering is not supported. For more information, see AdvancedFieldSelector in the CloudTrail API Reference. Selectors don't support the use of wildcards like * . To match multiple values with a single condition, you may use StartsWith, EndsWith, NotStartsWith, or NotEndsWith to explicitly match the beginning or end of the event field. public let field: String /// An operator that excludes events that match the last few characters of the event record field specified as the value of Field. public let notEndsWith: [String]? @@ -3336,6 +3336,83 @@ extension CloudTrail { } } + public struct SearchSampleQueriesRequest: AWSEncodableShape { + /// The maximum number of results to return on a single page. The default value is 10. + public let maxResults: Int? + /// A token you can use to get the next page of results. The length constraint is in characters, not words. + public let nextToken: String? + /// The natural language phrase to use for the semantic search. The phrase must be in English. The length constraint is in characters, not words. + public let searchPhrase: String + + @inlinable + public init(maxResults: Int? = nil, nextToken: String? = nil, searchPhrase: String) { + self.maxResults = maxResults + self.nextToken = nextToken + self.searchPhrase = searchPhrase + } + + public func validate(name: String) throws { + try self.validate(self.maxResults, name: "maxResults", parent: name, max: 50) + try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) + try self.validate(self.nextToken, name: "nextToken", parent: name, max: 1000) + try self.validate(self.nextToken, name: "nextToken", parent: name, min: 4) + try self.validate(self.nextToken, name: "nextToken", parent: name, pattern: ".*") + try self.validate(self.searchPhrase, name: "searchPhrase", parent: name, max: 1000) + try self.validate(self.searchPhrase, name: "searchPhrase", parent: name, min: 2) + try self.validate(self.searchPhrase, name: "searchPhrase", parent: name, pattern: "^[ -~\\n]*$") + } + + private enum CodingKeys: String, CodingKey { + case maxResults = "MaxResults" + case nextToken = "NextToken" + case searchPhrase = "SearchPhrase" + } + } + + public struct SearchSampleQueriesResponse: AWSDecodableShape { + /// A token you can use to get the next page of results. + public let nextToken: String? + /// A list of objects containing the search results ordered from most relevant to least relevant. + public let searchResults: [SearchSampleQueriesSearchResult]? + + @inlinable + public init(nextToken: String? = nil, searchResults: [SearchSampleQueriesSearchResult]? = nil) { + self.nextToken = nextToken + self.searchResults = searchResults + } + + private enum CodingKeys: String, CodingKey { + case nextToken = "NextToken" + case searchResults = "SearchResults" + } + } + + public struct SearchSampleQueriesSearchResult: AWSDecodableShape { + /// A longer description of a sample query. + public let description: String? + /// The name of a sample query. + public let name: String? + /// A value between 0 and 1 indicating the similarity between the search phrase and result. + public let relevance: Float? + /// The SQL code of the sample query. + public let sql: String? + + @inlinable + public init(description: String? = nil, name: String? = nil, relevance: Float? = nil, sql: String? = nil) { + self.description = description + self.name = name + self.relevance = relevance + self.sql = sql + } + + private enum CodingKeys: String, CodingKey { + case description = "Description" + case name = "Name" + case relevance = "Relevance" + case sql = "SQL" + } + } + public struct SourceConfig: AWSDecodableShape { /// The advanced event selectors that are configured for the channel. public let advancedEventSelectors: [AdvancedEventSelector]? diff --git a/Sources/Soto/Services/CloudWatchLogs/CloudWatchLogs_api.swift b/Sources/Soto/Services/CloudWatchLogs/CloudWatchLogs_api.swift index 4e1cd74835..a091875a77 100644 --- a/Sources/Soto/Services/CloudWatchLogs/CloudWatchLogs_api.swift +++ b/Sources/Soto/Services/CloudWatchLogs/CloudWatchLogs_api.swift @@ -94,6 +94,7 @@ public struct CloudWatchLogs: AWSService { "ap-southeast-3": "logs.ap-southeast-3.api.aws", "ap-southeast-4": "logs.ap-southeast-4.api.aws", "ap-southeast-5": "logs.ap-southeast-5.api.aws", + "ap-southeast-7": "logs.ap-southeast-7.api.aws", "ca-central-1": "logs.ca-central-1.api.aws", "ca-west-1": "logs.ca-west-1.api.aws", "eu-central-1": "logs.eu-central-1.api.aws", @@ -107,6 +108,7 @@ public struct CloudWatchLogs: AWSService { "il-central-1": "logs.il-central-1.api.aws", "me-central-1": "logs.me-central-1.api.aws", "me-south-1": "logs.me-south-1.api.aws", + "mx-central-1": "logs.mx-central-1.api.aws", "sa-east-1": "logs.sa-east-1.api.aws", "us-east-1": "logs.us-east-1.api.aws", "us-east-2": "logs.us-east-2.api.aws", @@ -127,7 +129,7 @@ public struct CloudWatchLogs: AWSService { // MARK: API Calls - /// Associates the specified KMS key with either one log group in the account, or with all stored CloudWatch Logs query insights results in the account. When you use AssociateKmsKey, you specify either the logGroupName parameter or the resourceIdentifier parameter. You can't specify both of those parameters in the same operation. Specify the logGroupName parameter to cause all log events stored in the log group to be encrypted with that key. Only the log events ingested after the key is associated are encrypted with that key. Associating a KMS key with a log group overrides any existing associations between the log group and a KMS key. After a KMS key is associated with a log group, all newly ingested data for the log group is encrypted using the KMS key. This association is stored as long as the data encrypted with the KMS key is still within CloudWatch Logs. This enables CloudWatch Logs to decrypt this data whenever it is requested. Associating a key with a log group does not cause the results of queries of that log group to be encrypted with that key. To have query results encrypted with a KMS key, you must use an AssociateKmsKey operation with the resourceIdentifier parameter that specifies a query-result resource. Specify the resourceIdentifier parameter with a query-result resource, to use that key to encrypt the stored results of all future StartQuery operations in the account. The response from a GetQueryResults operation will still return the query results in plain text. Even if you have not associated a key with your query results, the query results are encrypted when stored, using the default CloudWatch Logs method. If you run a query from a monitoring account that queries logs in a source account, the query results key from the monitoring account, if any, is used. If you delete the key that is used to encrypt log events or log group query results, then all the associated stored log events or query results that were encrypted with that key will be unencryptable and unusable. CloudWatch Logs supports only symmetric KMS keys. Do not use an associate an asymmetric KMS key with your log group or query results. For more information, see Using Symmetric and Asymmetric Keys. It can take up to 5 minutes for this operation to take effect. If you attempt to associate a KMS key with a log group but the KMS key does not exist or the KMS key is disabled, you receive an InvalidParameterException error. + /// Associates the specified KMS key with either one log group in the account, or with all stored CloudWatch Logs query insights results in the account. When you use AssociateKmsKey, you specify either the logGroupName parameter or the resourceIdentifier parameter. You can't specify both of those parameters in the same operation. Specify the logGroupName parameter to cause log events ingested into that log group to be encrypted with that key. Only the log events ingested after the key is associated are encrypted with that key. Associating a KMS key with a log group overrides any existing associations between the log group and a KMS key. After a KMS key is associated with a log group, all newly ingested data for the log group is encrypted using the KMS key. This association is stored as long as the data encrypted with the KMS key is still within CloudWatch Logs. This enables CloudWatch Logs to decrypt this data whenever it is requested. Associating a key with a log group does not cause the results of queries of that log group to be encrypted with that key. To have query results encrypted with a KMS key, you must use an AssociateKmsKey operation with the resourceIdentifier parameter that specifies a query-result resource. Specify the resourceIdentifier parameter with a query-result resource, to use that key to encrypt the stored results of all future StartQuery operations in the account. The response from a GetQueryResults operation will still return the query results in plain text. Even if you have not associated a key with your query results, the query results are encrypted when stored, using the default CloudWatch Logs method. If you run a query from a monitoring account that queries logs in a source account, the query results key from the monitoring account, if any, is used. If you delete the key that is used to encrypt log events or log group query results, then all the associated stored log events or query results that were encrypted with that key will be unencryptable and unusable. CloudWatch Logs supports only symmetric KMS keys. Do not use an associate an asymmetric KMS key with your log group or query results. For more information, see Using Symmetric and Asymmetric Keys. It can take up to 5 minutes for this operation to take effect. If you attempt to associate a KMS key with a log group but the KMS key does not exist or the KMS key is disabled, you receive an InvalidParameterException error. @Sendable @inlinable public func associateKmsKey(_ input: AssociateKmsKeyRequest, logger: Logger = AWSClient.loggingDisabled) async throws { @@ -140,7 +142,7 @@ public struct CloudWatchLogs: AWSService { logger: logger ) } - /// Associates the specified KMS key with either one log group in the account, or with all stored CloudWatch Logs query insights results in the account. When you use AssociateKmsKey, you specify either the logGroupName parameter or the resourceIdentifier parameter. You can't specify both of those parameters in the same operation. Specify the logGroupName parameter to cause all log events stored in the log group to be encrypted with that key. Only the log events ingested after the key is associated are encrypted with that key. Associating a KMS key with a log group overrides any existing associations between the log group and a KMS key. After a KMS key is associated with a log group, all newly ingested data for the log group is encrypted using the KMS key. This association is stored as long as the data encrypted with the KMS key is still within CloudWatch Logs. This enables CloudWatch Logs to decrypt this data whenever it is requested. Associating a key with a log group does not cause the results of queries of that log group to be encrypted with that key. To have query results encrypted with a KMS key, you must use an AssociateKmsKey operation with the resourceIdentifier parameter that specifies a query-result resource. Specify the resourceIdentifier parameter with a query-result resource, to use that key to encrypt the stored results of all future StartQuery operations in the account. The response from a GetQueryResults operation will still return the query results in plain text. Even if you have not associated a key with your query results, the query results are encrypted when stored, using the default CloudWatch Logs method. If you run a query from a monitoring account that queries logs in a source account, the query results key from the monitoring account, if any, is used. If you delete the key that is used to encrypt log events or log group query results, then all the associated stored log events or query results that were encrypted with that key will be unencryptable and unusable. CloudWatch Logs supports only symmetric KMS keys. Do not use an associate an asymmetric KMS key with your log group or query results. For more information, see Using Symmetric and Asymmetric Keys. It can take up to 5 minutes for this operation to take effect. If you attempt to associate a KMS key with a log group but the KMS key does not exist or the KMS key is disabled, you receive an InvalidParameterException error. + /// Associates the specified KMS key with either one log group in the account, or with all stored CloudWatch Logs query insights results in the account. When you use AssociateKmsKey, you specify either the logGroupName parameter or the resourceIdentifier parameter. You can't specify both of those parameters in the same operation. Specify the logGroupName parameter to cause log events ingested into that log group to be encrypted with that key. Only the log events ingested after the key is associated are encrypted with that key. Associating a KMS key with a log group overrides any existing associations between the log group and a KMS key. After a KMS key is associated with a log group, all newly ingested data for the log group is encrypted using the KMS key. This association is stored as long as the data encrypted with the KMS key is still within CloudWatch Logs. This enables CloudWatch Logs to decrypt this data whenever it is requested. Associating a key with a log group does not cause the results of queries of that log group to be encrypted with that key. To have query results encrypted with a KMS key, you must use an AssociateKmsKey operation with the resourceIdentifier parameter that specifies a query-result resource. Specify the resourceIdentifier parameter with a query-result resource, to use that key to encrypt the stored results of all future StartQuery operations in the account. The response from a GetQueryResults operation will still return the query results in plain text. Even if you have not associated a key with your query results, the query results are encrypted when stored, using the default CloudWatch Logs method. If you run a query from a monitoring account that queries logs in a source account, the query results key from the monitoring account, if any, is used. If you delete the key that is used to encrypt log events or log group query results, then all the associated stored log events or query results that were encrypted with that key will be unencryptable and unusable. CloudWatch Logs supports only symmetric KMS keys. Do not use an associate an asymmetric KMS key with your log group or query results. For more information, see Using Symmetric and Asymmetric Keys. It can take up to 5 minutes for this operation to take effect. If you attempt to associate a KMS key with a log group but the KMS key does not exist or the KMS key is disabled, you receive an InvalidParameterException error. /// /// Parameters: /// - kmsKeyId: The Amazon Resource Name (ARN) of the KMS key to use when encrypting log data. This must be a symmetric KMS key. For more information, see Amazon Resource Names and Using Symmetric and Asymmetric Keys. @@ -235,7 +237,7 @@ public struct CloudWatchLogs: AWSService { return try await self.createDelivery(input, logger: logger) } - /// Creates an export task so that you can efficiently export data from a log group to an Amazon S3 bucket. When you perform a CreateExportTask operation, you must use credentials that have permission to write to the S3 bucket that you specify as the destination. Exporting log data to S3 buckets that are encrypted by KMS is supported. Exporting log data to Amazon S3 buckets that have S3 Object Lock enabled with a retention period is also supported. Exporting to S3 buckets that are encrypted with AES-256 is supported. This is an asynchronous call. If all the required information is provided, this operation initiates an export task and responds with the ID of the task. After the task has started, you can use DescribeExportTasks to get the status of the export task. Each account can only have one active (RUNNING or PENDING) export task at a time. To cancel an export task, use CancelExportTask. You can export logs from multiple log groups or multiple time ranges to the same S3 bucket. To separate log data for each export task, specify a prefix to be used as the Amazon S3 key prefix for all exported objects. Time-based sorting on chunks of log data inside an exported file is not guaranteed. You can sort the exported log field data by using Linux utilities. + /// Creates an export task so that you can efficiently export data from a log group to an Amazon S3 bucket. When you perform a CreateExportTask operation, you must use credentials that have permission to write to the S3 bucket that you specify as the destination. Exporting log data to S3 buckets that are encrypted by KMS is supported. Exporting log data to Amazon S3 buckets that have S3 Object Lock enabled with a retention period is also supported. Exporting to S3 buckets that are encrypted with AES-256 is supported. This is an asynchronous call. If all the required information is provided, this operation initiates an export task and responds with the ID of the task. After the task has started, you can use DescribeExportTasks to get the status of the export task. Each account can only have one active (RUNNING or PENDING) export task at a time. To cancel an export task, use CancelExportTask. You can export logs from multiple log groups or multiple time ranges to the same S3 bucket. To separate log data for each export task, specify a prefix to be used as the Amazon S3 key prefix for all exported objects. We recommend that you don't regularly export to Amazon S3 as a way to continuously archive your logs. For that use case, we instaed recommend that you use subscriptions. For more information about subscriptions, see Real-time processing of log data with subscriptions. Time-based sorting on chunks of log data inside an exported file is not guaranteed. You can sort the exported log field data by using Linux utilities. @Sendable @inlinable public func createExportTask(_ input: CreateExportTaskRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateExportTaskResponse { @@ -248,7 +250,7 @@ public struct CloudWatchLogs: AWSService { logger: logger ) } - /// Creates an export task so that you can efficiently export data from a log group to an Amazon S3 bucket. When you perform a CreateExportTask operation, you must use credentials that have permission to write to the S3 bucket that you specify as the destination. Exporting log data to S3 buckets that are encrypted by KMS is supported. Exporting log data to Amazon S3 buckets that have S3 Object Lock enabled with a retention period is also supported. Exporting to S3 buckets that are encrypted with AES-256 is supported. This is an asynchronous call. If all the required information is provided, this operation initiates an export task and responds with the ID of the task. After the task has started, you can use DescribeExportTasks to get the status of the export task. Each account can only have one active (RUNNING or PENDING) export task at a time. To cancel an export task, use CancelExportTask. You can export logs from multiple log groups or multiple time ranges to the same S3 bucket. To separate log data for each export task, specify a prefix to be used as the Amazon S3 key prefix for all exported objects. Time-based sorting on chunks of log data inside an exported file is not guaranteed. You can sort the exported log field data by using Linux utilities. + /// Creates an export task so that you can efficiently export data from a log group to an Amazon S3 bucket. When you perform a CreateExportTask operation, you must use credentials that have permission to write to the S3 bucket that you specify as the destination. Exporting log data to S3 buckets that are encrypted by KMS is supported. Exporting log data to Amazon S3 buckets that have S3 Object Lock enabled with a retention period is also supported. Exporting to S3 buckets that are encrypted with AES-256 is supported. This is an asynchronous call. If all the required information is provided, this operation initiates an export task and responds with the ID of the task. After the task has started, you can use DescribeExportTasks to get the status of the export task. Each account can only have one active (RUNNING or PENDING) export task at a time. To cancel an export task, use CancelExportTask. You can export logs from multiple log groups or multiple time ranges to the same S3 bucket. To separate log data for each export task, specify a prefix to be used as the Amazon S3 key prefix for all exported objects. We recommend that you don't regularly export to Amazon S3 as a way to continuously archive your logs. For that use case, we instaed recommend that you use subscriptions. For more information about subscriptions, see Real-time processing of log data with subscriptions. Time-based sorting on chunks of log data inside an exported file is not guaranteed. You can sort the exported log field data by using Linux utilities. /// /// Parameters: /// - destination: The name of S3 bucket for the exported log data. The bucket must be in the same Amazon Web Services Region. @@ -460,7 +462,7 @@ public struct CloudWatchLogs: AWSService { return try await self.deleteDataProtectionPolicy(input, logger: logger) } - /// Deletes s delivery. A delivery is a connection between a logical delivery source and a logical delivery destination. Deleting a delivery only deletes the connection between the delivery source and delivery destination. It does not delete the delivery destination or the delivery source. + /// Deletes a delivery. A delivery is a connection between a logical delivery source and a logical delivery destination. Deleting a delivery only deletes the connection between the delivery source and delivery destination. It does not delete the delivery destination or the delivery source. @Sendable @inlinable public func deleteDelivery(_ input: DeleteDeliveryRequest, logger: Logger = AWSClient.loggingDisabled) async throws { @@ -473,7 +475,7 @@ public struct CloudWatchLogs: AWSService { logger: logger ) } - /// Deletes s delivery. A delivery is a connection between a logical delivery source and a logical delivery destination. Deleting a delivery only deletes the connection between the delivery source and delivery destination. It does not delete the delivery destination or the delivery source. + /// Deletes a delivery. A delivery is a connection between a logical delivery source and a logical delivery destination. Deleting a delivery only deletes the connection between the delivery source and delivery destination. It does not delete the delivery destination or the delivery source. /// /// Parameters: /// - id: The unique ID of the delivery to delete. You can find the ID of a delivery with the DescribeDeliveries operation. @@ -936,7 +938,7 @@ public struct CloudWatchLogs: AWSService { return try await self.deleteTransformer(input, logger: logger) } - /// Returns a list of all CloudWatch Logs account policies in the account. + /// Returns a list of all CloudWatch Logs account policies in the account. To use this operation, you must be signed on with the correct permissions depending on the type of policy that you are retrieving information for. To see data protection policies, you must have the logs:GetDataProtectionPolicy and logs:DescribeAccountPolicies permissions. To see subscription filter policies, you must have the logs:DescrubeSubscriptionFilters and logs:DescribeAccountPolicies permissions. To see transformer policies, you must have the logs:GetTransformer and logs:DescribeAccountPolicies permissions. To see field index policies, you must have the logs:DescribeIndexPolicies and logs:DescribeAccountPolicies permissions. @Sendable @inlinable public func describeAccountPolicies(_ input: DescribeAccountPoliciesRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeAccountPoliciesResponse { @@ -949,7 +951,7 @@ public struct CloudWatchLogs: AWSService { logger: logger ) } - /// Returns a list of all CloudWatch Logs account policies in the account. + /// Returns a list of all CloudWatch Logs account policies in the account. To use this operation, you must be signed on with the correct permissions depending on the type of policy that you are retrieving information for. To see data protection policies, you must have the logs:GetDataProtectionPolicy and logs:DescribeAccountPolicies permissions. To see subscription filter policies, you must have the logs:DescrubeSubscriptionFilters and logs:DescribeAccountPolicies permissions. To see transformer policies, you must have the logs:GetTransformer and logs:DescribeAccountPolicies permissions. To see field index policies, you must have the logs:DescribeIndexPolicies and logs:DescribeAccountPolicies permissions. /// /// Parameters: /// - accountIdentifiers: If you are using an account that is set up as a monitoring account for CloudWatch unified cross-account observability, you can use this to specify the account ID of a source account. If you do, the operation returns the account policy for the specified account. Currently, you can specify only one account ID in this parameter. If you omit this parameter, only the policy in the current account is returned. @@ -1298,7 +1300,7 @@ public struct CloudWatchLogs: AWSService { return try await self.describeLogGroups(input, logger: logger) } - /// Lists the log streams for the specified log group. You can list all the log streams or filter the results by prefix. You can also control how the results are ordered. You can specify the log group to search by using either logGroupIdentifier or logGroupName. You must include one of these two parameters, but you can't include both. This operation has a limit of five transactions per second, after which transactions are throttled. If you are using CloudWatch cross-account observability, you can use this operation in a monitoring account and view data from the linked source accounts. For more information, see CloudWatch cross-account observability. + /// Lists the log streams for the specified log group. You can list all the log streams or filter the results by prefix. You can also control how the results are ordered. You can specify the log group to search by using either logGroupIdentifier or logGroupName. You must include one of these two parameters, but you can't include both. This operation has a limit of 25 transactions per second, after which transactions are throttled. If you are using CloudWatch cross-account observability, you can use this operation in a monitoring account and view data from the linked source accounts. For more information, see CloudWatch cross-account observability. @Sendable @inlinable public func describeLogStreams(_ input: DescribeLogStreamsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeLogStreamsResponse { @@ -1311,7 +1313,7 @@ public struct CloudWatchLogs: AWSService { logger: logger ) } - /// Lists the log streams for the specified log group. You can list all the log streams or filter the results by prefix. You can also control how the results are ordered. You can specify the log group to search by using either logGroupIdentifier or logGroupName. You must include one of these two parameters, but you can't include both. This operation has a limit of five transactions per second, after which transactions are throttled. If you are using CloudWatch cross-account observability, you can use this operation in a monitoring account and view data from the linked source accounts. For more information, see CloudWatch cross-account observability. + /// Lists the log streams for the specified log group. You can list all the log streams or filter the results by prefix. You can also control how the results are ordered. You can specify the log group to search by using either logGroupIdentifier or logGroupName. You must include one of these two parameters, but you can't include both. This operation has a limit of 25 transactions per second, after which transactions are throttled. If you are using CloudWatch cross-account observability, you can use this operation in a monitoring account and view data from the linked source accounts. For more information, see CloudWatch cross-account observability. /// /// Parameters: /// - descending: If the value is true, results are returned in descending order. If the value is to false, results are returned in ascending order. The default value is false. @@ -2210,7 +2212,7 @@ public struct CloudWatchLogs: AWSService { return try await self.listTagsLogGroup(input, logger: logger) } - /// Creates an account-level data protection policy, subscription filter policy, or field index policy that applies to all log groups or a subset of log groups in the account. Data protection policy A data protection policy can help safeguard sensitive data that's ingested by your log groups by auditing and masking the sensitive log data. Each account can have only one account-level data protection policy. Sensitive data is detected and masked when it is ingested into a log group. When you set a data protection policy, log events ingested into the log groups before that time are not masked. If you use PutAccountPolicy to create a data protection policy for your whole account, it applies to both existing log groups and all log groups that are created later in this account. The account-level policy is applied to existing log groups with eventual consistency. It might take up to 5 minutes before sensitive data in existing log groups begins to be masked. By default, when a user views a log event that includes masked data, the sensitive data is replaced by asterisks. A user who has the logs:Unmask permission can use a GetLogEvents or FilterLogEvents operation with the unmask parameter set to true to view the unmasked log events. Users with the logs:Unmask can also view unmasked data in the CloudWatch Logs console by running a CloudWatch Logs Insights query with the unmask query command. For more information, including a list of types of data that can be audited and masked, see Protect sensitive log data with masking. To use the PutAccountPolicy operation for a data protection policy, you must be signed on with the logs:PutDataProtectionPolicy and logs:PutAccountPolicy permissions. The PutAccountPolicy operation applies to all log groups in the account. You can use PutDataProtectionPolicy to create a data protection policy that applies to just one log group. If a log group has its own data protection policy and the account also has an account-level data protection policy, then the two policies are cumulative. Any sensitive term specified in either policy is masked. Subscription filter policy A subscription filter policy sets up a real-time feed of log events from CloudWatch Logs to other Amazon Web Services services. Account-level subscription filter policies apply to both existing log groups and log groups that are created later in this account. Supported destinations are Kinesis Data Streams, Firehose, and Lambda. When log events are sent to the receiving service, they are Base64 encoded and compressed with the GZIP format. The following destinations are supported for subscription filters: An Kinesis Data Streams data stream in the same account as the subscription policy, for same-account delivery. An Firehose data stream in the same account as the subscription policy, for same-account delivery. A Lambda function in the same account as the subscription policy, for same-account delivery. A logical destination in a different account created with PutDestination, for cross-account delivery. Kinesis Data Streams and Firehose are supported as logical destinations. Each account can have one account-level subscription filter policy per Region. If you are updating an existing filter, you must specify the correct name in PolicyName. To perform a PutAccountPolicy subscription filter operation for any destination except a Lambda function, you must also have the iam:PassRole permission. Transformer policy Creates or updates a log transformer policy for your account. You use log transformers to transform log events into a different format, making them easier for you to process and analyze. You can also transform logs from different sources into standardized formats that contain relevant, source-specific information. After you have created a transformer, CloudWatch Logs performs this transformation at the time of log ingestion. You can then refer to the transformed versions of the logs during operations such as querying with CloudWatch Logs Insights or creating metric filters or subscription filters. You can also use a transformer to copy metadata from metadata keys into the log events themselves. This metadata can include log group name, log stream name, account ID and Region. A transformer for a log group is a series of processors, where each processor applies one type of transformation to the log events ingested into this log group. For more information about the available processors to use in a transformer, see Processors that you can use. Having log events in standardized format enables visibility across your applications for your log analysis, reporting, and alarming needs. CloudWatch Logs provides transformation for common log types with out-of-the-box transformation templates for major Amazon Web Services log sources such as VPC flow logs, Lambda, and Amazon RDS. You can use pre-built transformation templates or create custom transformation policies. You can create transformers only for the log groups in the Standard log class. You can have one account-level transformer policy that applies to all log groups in the account. Or you can create as many as 20 account-level transformer policies that are each scoped to a subset of log groups with the selectionCriteria parameter. If you have multiple account-level transformer policies with selection criteria, no two of them can use the same or overlapping log group name prefixes. For example, if you have one policy filtered to log groups that start with my-log, you can't have another field index policy filtered to my-logpprod or my-logging. You can also set up a transformer at the log-group level. For more information, see PutTransformer. If there is both a log-group level transformer created with PutTransformer and an account-level transformer that could apply to the same log group, the log group uses only the log-group level transformer. It ignores the account-level transformer. Field index policy You can use field index policies to create indexes on fields found in log events in the log group. Creating field indexes can help lower the scan volume for CloudWatch Logs Insights queries that reference those fields, because these queries attempt to skip the processing of log events that are known to not match the indexed field. Good fields to index are fields that you often need to query for and fields or values that match only a small fraction of the total log events. Common examples of indexes include request ID, session ID, user IDs, or instance IDs. For more information, see Create field indexes to improve query performance and reduce costs To find the fields that are in your log group events, use the GetLogGroupFields operation. For example, suppose you have created a field index for requestId. Then, any CloudWatch Logs Insights query on that log group that includes requestId = value or requestId in [value, value, ...] will attempt to process only the log events where the indexed field matches the specified value. Matches of log events to the names of indexed fields are case-sensitive. For example, an indexed field of RequestId won't match a log event containing requestId. You can have one account-level field index policy that applies to all log groups in the account. Or you can create as many as 20 account-level field index policies that are each scoped to a subset of log groups with the selectionCriteria parameter. If you have multiple account-level index policies with selection criteria, no two of them can use the same or overlapping log group name prefixes. For example, if you have one policy filtered to log groups that start with my-log, you can't have another field index policy filtered to my-logpprod or my-logging. If you create an account-level field index policy in a monitoring account in cross-account observability, the policy is applied only to the monitoring account and not to any source accounts. If you want to create a field index policy for a single log group, you can use PutIndexPolicy instead of PutAccountPolicy. If you do so, that log group will use only that log-group level policy, and will ignore the account-level policy that you create with PutAccountPolicy. + /// Creates an account-level data protection policy, subscription filter policy, or field index policy that applies to all log groups or a subset of log groups in the account. To use this operation, you must be signed on with the correct permissions depending on the type of policy that you are creating. To create a data protection policy, you must have the logs:PutDataProtectionPolicy and logs:PutAccountPolicy permissions. To create a subscription filter policy, you must have the logs:PutSubscriptionFilter and logs:PutccountPolicy permissions. To create a transformer policy, you must have the logs:PutTransformer and logs:PutAccountPolicy permissions. To create a field index policy, you must have the logs:PutIndexPolicy and logs:PutAccountPolicy permissions. Data protection policy A data protection policy can help safeguard sensitive data that's ingested by your log groups by auditing and masking the sensitive log data. Each account can have only one account-level data protection policy. Sensitive data is detected and masked when it is ingested into a log group. When you set a data protection policy, log events ingested into the log groups before that time are not masked. If you use PutAccountPolicy to create a data protection policy for your whole account, it applies to both existing log groups and all log groups that are created later in this account. The account-level policy is applied to existing log groups with eventual consistency. It might take up to 5 minutes before sensitive data in existing log groups begins to be masked. By default, when a user views a log event that includes masked data, the sensitive data is replaced by asterisks. A user who has the logs:Unmask permission can use a GetLogEvents or FilterLogEvents operation with the unmask parameter set to true to view the unmasked log events. Users with the logs:Unmask can also view unmasked data in the CloudWatch Logs console by running a CloudWatch Logs Insights query with the unmask query command. For more information, including a list of types of data that can be audited and masked, see Protect sensitive log data with masking. To use the PutAccountPolicy operation for a data protection policy, you must be signed on with the logs:PutDataProtectionPolicy and logs:PutAccountPolicy permissions. The PutAccountPolicy operation applies to all log groups in the account. You can use PutDataProtectionPolicy to create a data protection policy that applies to just one log group. If a log group has its own data protection policy and the account also has an account-level data protection policy, then the two policies are cumulative. Any sensitive term specified in either policy is masked. Subscription filter policy A subscription filter policy sets up a real-time feed of log events from CloudWatch Logs to other Amazon Web Services services. Account-level subscription filter policies apply to both existing log groups and log groups that are created later in this account. Supported destinations are Kinesis Data Streams, Firehose, and Lambda. When log events are sent to the receiving service, they are Base64 encoded and compressed with the GZIP format. The following destinations are supported for subscription filters: An Kinesis Data Streams data stream in the same account as the subscription policy, for same-account delivery. An Firehose data stream in the same account as the subscription policy, for same-account delivery. A Lambda function in the same account as the subscription policy, for same-account delivery. A logical destination in a different account created with PutDestination, for cross-account delivery. Kinesis Data Streams and Firehose are supported as logical destinations. Each account can have one account-level subscription filter policy per Region. If you are updating an existing filter, you must specify the correct name in PolicyName. To perform a PutAccountPolicy subscription filter operation for any destination except a Lambda function, you must also have the iam:PassRole permission. Transformer policy Creates or updates a log transformer policy for your account. You use log transformers to transform log events into a different format, making them easier for you to process and analyze. You can also transform logs from different sources into standardized formats that contain relevant, source-specific information. After you have created a transformer, CloudWatch Logs performs this transformation at the time of log ingestion. You can then refer to the transformed versions of the logs during operations such as querying with CloudWatch Logs Insights or creating metric filters or subscription filters. You can also use a transformer to copy metadata from metadata keys into the log events themselves. This metadata can include log group name, log stream name, account ID and Region. A transformer for a log group is a series of processors, where each processor applies one type of transformation to the log events ingested into this log group. For more information about the available processors to use in a transformer, see Processors that you can use. Having log events in standardized format enables visibility across your applications for your log analysis, reporting, and alarming needs. CloudWatch Logs provides transformation for common log types with out-of-the-box transformation templates for major Amazon Web Services log sources such as VPC flow logs, Lambda, and Amazon RDS. You can use pre-built transformation templates or create custom transformation policies. You can create transformers only for the log groups in the Standard log class. You can have one account-level transformer policy that applies to all log groups in the account. Or you can create as many as 20 account-level transformer policies that are each scoped to a subset of log groups with the selectionCriteria parameter. If you have multiple account-level transformer policies with selection criteria, no two of them can use the same or overlapping log group name prefixes. For example, if you have one policy filtered to log groups that start with my-log, you can't have another field index policy filtered to my-logpprod or my-logging. You can also set up a transformer at the log-group level. For more information, see PutTransformer. If there is both a log-group level transformer created with PutTransformer and an account-level transformer that could apply to the same log group, the log group uses only the log-group level transformer. It ignores the account-level transformer. Field index policy You can use field index policies to create indexes on fields found in log events in the log group. Creating field indexes can help lower the scan volume for CloudWatch Logs Insights queries that reference those fields, because these queries attempt to skip the processing of log events that are known to not match the indexed field. Good fields to index are fields that you often need to query for and fields or values that match only a small fraction of the total log events. Common examples of indexes include request ID, session ID, user IDs, or instance IDs. For more information, see Create field indexes to improve query performance and reduce costs To find the fields that are in your log group events, use the GetLogGroupFields operation. For example, suppose you have created a field index for requestId. Then, any CloudWatch Logs Insights query on that log group that includes requestId = value or requestId in [value, value, ...] will attempt to process only the log events where the indexed field matches the specified value. Matches of log events to the names of indexed fields are case-sensitive. For example, an indexed field of RequestId won't match a log event containing requestId. You can have one account-level field index policy that applies to all log groups in the account. Or you can create as many as 20 account-level field index policies that are each scoped to a subset of log groups with the selectionCriteria parameter. If you have multiple account-level index policies with selection criteria, no two of them can use the same or overlapping log group name prefixes. For example, if you have one policy filtered to log groups that start with my-log, you can't have another field index policy filtered to my-logpprod or my-logging. If you create an account-level field index policy in a monitoring account in cross-account observability, the policy is applied only to the monitoring account and not to any source accounts. If you want to create a field index policy for a single log group, you can use PutIndexPolicy instead of PutAccountPolicy. If you do so, that log group will use only that log-group level policy, and will ignore the account-level policy that you create with PutAccountPolicy. @Sendable @inlinable public func putAccountPolicy(_ input: PutAccountPolicyRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> PutAccountPolicyResponse { @@ -2223,7 +2225,7 @@ public struct CloudWatchLogs: AWSService { logger: logger ) } - /// Creates an account-level data protection policy, subscription filter policy, or field index policy that applies to all log groups or a subset of log groups in the account. Data protection policy A data protection policy can help safeguard sensitive data that's ingested by your log groups by auditing and masking the sensitive log data. Each account can have only one account-level data protection policy. Sensitive data is detected and masked when it is ingested into a log group. When you set a data protection policy, log events ingested into the log groups before that time are not masked. If you use PutAccountPolicy to create a data protection policy for your whole account, it applies to both existing log groups and all log groups that are created later in this account. The account-level policy is applied to existing log groups with eventual consistency. It might take up to 5 minutes before sensitive data in existing log groups begins to be masked. By default, when a user views a log event that includes masked data, the sensitive data is replaced by asterisks. A user who has the logs:Unmask permission can use a GetLogEvents or FilterLogEvents operation with the unmask parameter set to true to view the unmasked log events. Users with the logs:Unmask can also view unmasked data in the CloudWatch Logs console by running a CloudWatch Logs Insights query with the unmask query command. For more information, including a list of types of data that can be audited and masked, see Protect sensitive log data with masking. To use the PutAccountPolicy operation for a data protection policy, you must be signed on with the logs:PutDataProtectionPolicy and logs:PutAccountPolicy permissions. The PutAccountPolicy operation applies to all log groups in the account. You can use PutDataProtectionPolicy to create a data protection policy that applies to just one log group. If a log group has its own data protection policy and the account also has an account-level data protection policy, then the two policies are cumulative. Any sensitive term specified in either policy is masked. Subscription filter policy A subscription filter policy sets up a real-time feed of log events from CloudWatch Logs to other Amazon Web Services services. Account-level subscription filter policies apply to both existing log groups and log groups that are created later in this account. Supported destinations are Kinesis Data Streams, Firehose, and Lambda. When log events are sent to the receiving service, they are Base64 encoded and compressed with the GZIP format. The following destinations are supported for subscription filters: An Kinesis Data Streams data stream in the same account as the subscription policy, for same-account delivery. An Firehose data stream in the same account as the subscription policy, for same-account delivery. A Lambda function in the same account as the subscription policy, for same-account delivery. A logical destination in a different account created with PutDestination, for cross-account delivery. Kinesis Data Streams and Firehose are supported as logical destinations. Each account can have one account-level subscription filter policy per Region. If you are updating an existing filter, you must specify the correct name in PolicyName. To perform a PutAccountPolicy subscription filter operation for any destination except a Lambda function, you must also have the iam:PassRole permission. Transformer policy Creates or updates a log transformer policy for your account. You use log transformers to transform log events into a different format, making them easier for you to process and analyze. You can also transform logs from different sources into standardized formats that contain relevant, source-specific information. After you have created a transformer, CloudWatch Logs performs this transformation at the time of log ingestion. You can then refer to the transformed versions of the logs during operations such as querying with CloudWatch Logs Insights or creating metric filters or subscription filters. You can also use a transformer to copy metadata from metadata keys into the log events themselves. This metadata can include log group name, log stream name, account ID and Region. A transformer for a log group is a series of processors, where each processor applies one type of transformation to the log events ingested into this log group. For more information about the available processors to use in a transformer, see Processors that you can use. Having log events in standardized format enables visibility across your applications for your log analysis, reporting, and alarming needs. CloudWatch Logs provides transformation for common log types with out-of-the-box transformation templates for major Amazon Web Services log sources such as VPC flow logs, Lambda, and Amazon RDS. You can use pre-built transformation templates or create custom transformation policies. You can create transformers only for the log groups in the Standard log class. You can have one account-level transformer policy that applies to all log groups in the account. Or you can create as many as 20 account-level transformer policies that are each scoped to a subset of log groups with the selectionCriteria parameter. If you have multiple account-level transformer policies with selection criteria, no two of them can use the same or overlapping log group name prefixes. For example, if you have one policy filtered to log groups that start with my-log, you can't have another field index policy filtered to my-logpprod or my-logging. You can also set up a transformer at the log-group level. For more information, see PutTransformer. If there is both a log-group level transformer created with PutTransformer and an account-level transformer that could apply to the same log group, the log group uses only the log-group level transformer. It ignores the account-level transformer. Field index policy You can use field index policies to create indexes on fields found in log events in the log group. Creating field indexes can help lower the scan volume for CloudWatch Logs Insights queries that reference those fields, because these queries attempt to skip the processing of log events that are known to not match the indexed field. Good fields to index are fields that you often need to query for and fields or values that match only a small fraction of the total log events. Common examples of indexes include request ID, session ID, user IDs, or instance IDs. For more information, see Create field indexes to improve query performance and reduce costs To find the fields that are in your log group events, use the GetLogGroupFields operation. For example, suppose you have created a field index for requestId. Then, any CloudWatch Logs Insights query on that log group that includes requestId = value or requestId in [value, value, ...] will attempt to process only the log events where the indexed field matches the specified value. Matches of log events to the names of indexed fields are case-sensitive. For example, an indexed field of RequestId won't match a log event containing requestId. You can have one account-level field index policy that applies to all log groups in the account. Or you can create as many as 20 account-level field index policies that are each scoped to a subset of log groups with the selectionCriteria parameter. If you have multiple account-level index policies with selection criteria, no two of them can use the same or overlapping log group name prefixes. For example, if you have one policy filtered to log groups that start with my-log, you can't have another field index policy filtered to my-logpprod or my-logging. If you create an account-level field index policy in a monitoring account in cross-account observability, the policy is applied only to the monitoring account and not to any source accounts. If you want to create a field index policy for a single log group, you can use PutIndexPolicy instead of PutAccountPolicy. If you do so, that log group will use only that log-group level policy, and will ignore the account-level policy that you create with PutAccountPolicy. + /// Creates an account-level data protection policy, subscription filter policy, or field index policy that applies to all log groups or a subset of log groups in the account. To use this operation, you must be signed on with the correct permissions depending on the type of policy that you are creating. To create a data protection policy, you must have the logs:PutDataProtectionPolicy and logs:PutAccountPolicy permissions. To create a subscription filter policy, you must have the logs:PutSubscriptionFilter and logs:PutccountPolicy permissions. To create a transformer policy, you must have the logs:PutTransformer and logs:PutAccountPolicy permissions. To create a field index policy, you must have the logs:PutIndexPolicy and logs:PutAccountPolicy permissions. Data protection policy A data protection policy can help safeguard sensitive data that's ingested by your log groups by auditing and masking the sensitive log data. Each account can have only one account-level data protection policy. Sensitive data is detected and masked when it is ingested into a log group. When you set a data protection policy, log events ingested into the log groups before that time are not masked. If you use PutAccountPolicy to create a data protection policy for your whole account, it applies to both existing log groups and all log groups that are created later in this account. The account-level policy is applied to existing log groups with eventual consistency. It might take up to 5 minutes before sensitive data in existing log groups begins to be masked. By default, when a user views a log event that includes masked data, the sensitive data is replaced by asterisks. A user who has the logs:Unmask permission can use a GetLogEvents or FilterLogEvents operation with the unmask parameter set to true to view the unmasked log events. Users with the logs:Unmask can also view unmasked data in the CloudWatch Logs console by running a CloudWatch Logs Insights query with the unmask query command. For more information, including a list of types of data that can be audited and masked, see Protect sensitive log data with masking. To use the PutAccountPolicy operation for a data protection policy, you must be signed on with the logs:PutDataProtectionPolicy and logs:PutAccountPolicy permissions. The PutAccountPolicy operation applies to all log groups in the account. You can use PutDataProtectionPolicy to create a data protection policy that applies to just one log group. If a log group has its own data protection policy and the account also has an account-level data protection policy, then the two policies are cumulative. Any sensitive term specified in either policy is masked. Subscription filter policy A subscription filter policy sets up a real-time feed of log events from CloudWatch Logs to other Amazon Web Services services. Account-level subscription filter policies apply to both existing log groups and log groups that are created later in this account. Supported destinations are Kinesis Data Streams, Firehose, and Lambda. When log events are sent to the receiving service, they are Base64 encoded and compressed with the GZIP format. The following destinations are supported for subscription filters: An Kinesis Data Streams data stream in the same account as the subscription policy, for same-account delivery. An Firehose data stream in the same account as the subscription policy, for same-account delivery. A Lambda function in the same account as the subscription policy, for same-account delivery. A logical destination in a different account created with PutDestination, for cross-account delivery. Kinesis Data Streams and Firehose are supported as logical destinations. Each account can have one account-level subscription filter policy per Region. If you are updating an existing filter, you must specify the correct name in PolicyName. To perform a PutAccountPolicy subscription filter operation for any destination except a Lambda function, you must also have the iam:PassRole permission. Transformer policy Creates or updates a log transformer policy for your account. You use log transformers to transform log events into a different format, making them easier for you to process and analyze. You can also transform logs from different sources into standardized formats that contain relevant, source-specific information. After you have created a transformer, CloudWatch Logs performs this transformation at the time of log ingestion. You can then refer to the transformed versions of the logs during operations such as querying with CloudWatch Logs Insights or creating metric filters or subscription filters. You can also use a transformer to copy metadata from metadata keys into the log events themselves. This metadata can include log group name, log stream name, account ID and Region. A transformer for a log group is a series of processors, where each processor applies one type of transformation to the log events ingested into this log group. For more information about the available processors to use in a transformer, see Processors that you can use. Having log events in standardized format enables visibility across your applications for your log analysis, reporting, and alarming needs. CloudWatch Logs provides transformation for common log types with out-of-the-box transformation templates for major Amazon Web Services log sources such as VPC flow logs, Lambda, and Amazon RDS. You can use pre-built transformation templates or create custom transformation policies. You can create transformers only for the log groups in the Standard log class. You can have one account-level transformer policy that applies to all log groups in the account. Or you can create as many as 20 account-level transformer policies that are each scoped to a subset of log groups with the selectionCriteria parameter. If you have multiple account-level transformer policies with selection criteria, no two of them can use the same or overlapping log group name prefixes. For example, if you have one policy filtered to log groups that start with my-log, you can't have another field index policy filtered to my-logpprod or my-logging. You can also set up a transformer at the log-group level. For more information, see PutTransformer. If there is both a log-group level transformer created with PutTransformer and an account-level transformer that could apply to the same log group, the log group uses only the log-group level transformer. It ignores the account-level transformer. Field index policy You can use field index policies to create indexes on fields found in log events in the log group. Creating field indexes can help lower the scan volume for CloudWatch Logs Insights queries that reference those fields, because these queries attempt to skip the processing of log events that are known to not match the indexed field. Good fields to index are fields that you often need to query for and fields or values that match only a small fraction of the total log events. Common examples of indexes include request ID, session ID, user IDs, or instance IDs. For more information, see Create field indexes to improve query performance and reduce costs To find the fields that are in your log group events, use the GetLogGroupFields operation. For example, suppose you have created a field index for requestId. Then, any CloudWatch Logs Insights query on that log group that includes requestId = value or requestId in [value, value, ...] will attempt to process only the log events where the indexed field matches the specified value. Matches of log events to the names of indexed fields are case-sensitive. For example, an indexed field of RequestId won't match a log event containing requestId. You can have one account-level field index policy that applies to all log groups in the account. Or you can create as many as 20 account-level field index policies that are each scoped to a subset of log groups with the selectionCriteria parameter. If you have multiple account-level index policies with selection criteria, no two of them can use the same or overlapping log group name prefixes. For example, if you have one policy filtered to log groups that start with my-log, you can't have another field index policy filtered to my-logpprod or my-logging. If you create an account-level field index policy in a monitoring account in cross-account observability, the policy is applied only to the monitoring account and not to any source accounts. If you want to create a field index policy for a single log group, you can use PutIndexPolicy instead of PutAccountPolicy. If you do so, that log group will use only that log-group level policy, and will ignore the account-level policy that you create with PutAccountPolicy. /// /// Parameters: /// - policyDocument: Specify the policy, in JSON. Data protection policy A data protection policy must include two JSON blocks: The first block must include both a DataIdentifer array and an Operation property with an Audit action. The DataIdentifer array lists the types of sensitive data that you want to mask. For more information about the available options, see Types of data that you can mask. The Operation property with an Audit action is required to find the sensitive data terms. This Audit action must contain a FindingsDestination object. You can optionally use that FindingsDestination object to list one or more destinations to send audit findings to. If you specify destinations such as log groups, Firehose streams, and S3 buckets, they must already exist. The second block must include both a DataIdentifer array and an Operation property with an Deidentify action. The DataIdentifer array must exactly match the DataIdentifer array in the first block of the policy. The Operation property with the Deidentify action is what actually masks the data, and it must contain the "MaskConfig": {} object. The "MaskConfig": {} object must be empty. For an example data protection policy, see the Examples section on this page. The contents of the two DataIdentifer arrays must match exactly. In addition to the two JSON blocks, the policyDocument can also include Name, Description, and Version fields. The Name is different than the operation's policyName parameter, and is used as a dimension when CloudWatch Logs reports audit findings metrics to CloudWatch. The JSON specified in policyDocument can be up to 30,720 characters long. Subscription filter policy A subscription filter policy can include the following attributes in a JSON block: DestinationArn The ARN of the destination to deliver log events to. Supported destinations are: An Kinesis Data Streams data stream in the same account as the subscription policy, for same-account delivery. An Firehose data stream in the same account as the subscription policy, for same-account delivery. A Lambda function in the same account as the subscription policy, for same-account delivery. A logical destination in a different account created with PutDestination, for cross-account delivery. Kinesis Data Streams and Firehose are supported as logical destinations. RoleArn The ARN of an IAM role that grants CloudWatch Logs permissions to deliver ingested log events to the destination stream. You don't need to provide the ARN when you are working with a logical destination for cross-account delivery. FilterPattern A filter pattern for subscribing to a filtered stream of log events. Distribution The method used to distribute log data to the destination. By default, log data is grouped by log stream, but the grouping can be set to Random for a more even distribution. This property is only applicable when the destination is an Kinesis Data Streams data stream. Transformer policy A transformer policy must include one JSON block with the array of processors and their configurations. For more information about available processors, see Processors that you can use. Field index policy A field index filter policy can include the following attribute in a JSON block: Fields The array of field indexes to create. It must contain at least one field index. The following is an example of an index policy document that creates two indexes, RequestId and TransactionId. "policyDocument": "{ \"Fields\": [ \"RequestId\", \"TransactionId\" ] }" @@ -2283,7 +2285,7 @@ public struct CloudWatchLogs: AWSService { return try await self.putDataProtectionPolicy(input, logger: logger) } - /// Creates or updates a logical delivery destination. A delivery destination is an Amazon Web Services resource that represents an Amazon Web Services service that logs can be sent to. CloudWatch Logs, Amazon S3, and Firehose are supported as logs delivery destinations. To configure logs delivery between a supported Amazon Web Services service and a destination, you must do the following: Create a delivery source, which is a logical object that represents the resource that is actually sending the logs. For more information, see PutDeliverySource. Use PutDeliveryDestination to create a delivery destination, which is a logical object that represents the actual delivery destination. If you are delivering logs cross-account, you must use PutDeliveryDestinationPolicy in the destination account to assign an IAM policy to the destination. This policy allows delivery to that destination. Use CreateDelivery to create a delivery by pairing exactly one delivery source and one delivery destination. For more information, see CreateDelivery. You can configure a single delivery source to send logs to multiple destinations by creating multiple deliveries. You can also create multiple deliveries to configure multiple delivery sources to send logs to the same delivery destination. Only some Amazon Web Services services support being configured as a delivery source. These services are listed as Supported [V2 Permissions] in the table at Enabling logging from Amazon Web Services services. If you use this operation to update an existing delivery destination, all the current delivery destination parameters are overwritten with the new parameter values that you specify. + /// Creates or updates a logical delivery destination. A delivery destination is an Amazon Web Services resource that represents an Amazon Web Services service that logs can be sent to. CloudWatch Logs, Amazon S3, and Firehose are supported as logs delivery destinations. To configure logs delivery between a supported Amazon Web Services service and a destination, you must do the following: Create a delivery source, which is a logical object that represents the resource that is actually sending the logs. For more information, see PutDeliverySource. Use PutDeliveryDestination to create a delivery destination in the same account of the actual delivery destination. The delivery destination that you create is a logical object that represents the actual delivery destination. If you are delivering logs cross-account, you must use PutDeliveryDestinationPolicy in the destination account to assign an IAM policy to the destination. This policy allows delivery to that destination. Use CreateDelivery to create a delivery by pairing exactly one delivery source and one delivery destination. For more information, see CreateDelivery. You can configure a single delivery source to send logs to multiple destinations by creating multiple deliveries. You can also create multiple deliveries to configure multiple delivery sources to send logs to the same delivery destination. Only some Amazon Web Services services support being configured as a delivery source. These services are listed as Supported [V2 Permissions] in the table at Enabling logging from Amazon Web Services services. If you use this operation to update an existing delivery destination, all the current delivery destination parameters are overwritten with the new parameter values that you specify. @Sendable @inlinable public func putDeliveryDestination(_ input: PutDeliveryDestinationRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> PutDeliveryDestinationResponse { @@ -2296,7 +2298,7 @@ public struct CloudWatchLogs: AWSService { logger: logger ) } - /// Creates or updates a logical delivery destination. A delivery destination is an Amazon Web Services resource that represents an Amazon Web Services service that logs can be sent to. CloudWatch Logs, Amazon S3, and Firehose are supported as logs delivery destinations. To configure logs delivery between a supported Amazon Web Services service and a destination, you must do the following: Create a delivery source, which is a logical object that represents the resource that is actually sending the logs. For more information, see PutDeliverySource. Use PutDeliveryDestination to create a delivery destination, which is a logical object that represents the actual delivery destination. If you are delivering logs cross-account, you must use PutDeliveryDestinationPolicy in the destination account to assign an IAM policy to the destination. This policy allows delivery to that destination. Use CreateDelivery to create a delivery by pairing exactly one delivery source and one delivery destination. For more information, see CreateDelivery. You can configure a single delivery source to send logs to multiple destinations by creating multiple deliveries. You can also create multiple deliveries to configure multiple delivery sources to send logs to the same delivery destination. Only some Amazon Web Services services support being configured as a delivery source. These services are listed as Supported [V2 Permissions] in the table at Enabling logging from Amazon Web Services services. If you use this operation to update an existing delivery destination, all the current delivery destination parameters are overwritten with the new parameter values that you specify. + /// Creates or updates a logical delivery destination. A delivery destination is an Amazon Web Services resource that represents an Amazon Web Services service that logs can be sent to. CloudWatch Logs, Amazon S3, and Firehose are supported as logs delivery destinations. To configure logs delivery between a supported Amazon Web Services service and a destination, you must do the following: Create a delivery source, which is a logical object that represents the resource that is actually sending the logs. For more information, see PutDeliverySource. Use PutDeliveryDestination to create a delivery destination in the same account of the actual delivery destination. The delivery destination that you create is a logical object that represents the actual delivery destination. If you are delivering logs cross-account, you must use PutDeliveryDestinationPolicy in the destination account to assign an IAM policy to the destination. This policy allows delivery to that destination. Use CreateDelivery to create a delivery by pairing exactly one delivery source and one delivery destination. For more information, see CreateDelivery. You can configure a single delivery source to send logs to multiple destinations by creating multiple deliveries. You can also create multiple deliveries to configure multiple delivery sources to send logs to the same delivery destination. Only some Amazon Web Services services support being configured as a delivery source. These services are listed as Supported [V2 Permissions] in the table at Enabling logging from Amazon Web Services services. If you use this operation to update an existing delivery destination, all the current delivery destination parameters are overwritten with the new parameter values that you specify. /// /// Parameters: /// - deliveryDestinationConfiguration: A structure that contains the ARN of the Amazon Web Services resource that will receive the logs. @@ -2369,7 +2371,7 @@ public struct CloudWatchLogs: AWSService { /// Creates or updates a logical delivery source. A delivery source represents an Amazon Web Services resource that sends logs to an logs delivery destination. The destination can be CloudWatch Logs, Amazon S3, or Firehose. To configure logs delivery between a delivery destination and an Amazon Web Services service that is supported as a delivery source, you must do the following: Use PutDeliverySource to create a delivery source, which is a logical object that represents the resource that is actually sending the logs. Use PutDeliveryDestination to create a delivery destination, which is a logical object that represents the actual delivery destination. For more information, see PutDeliveryDestination. If you are delivering logs cross-account, you must use PutDeliveryDestinationPolicy in the destination account to assign an IAM policy to the destination. This policy allows delivery to that destination. Use CreateDelivery to create a delivery by pairing exactly one delivery source and one delivery destination. For more information, see CreateDelivery. You can configure a single delivery source to send logs to multiple destinations by creating multiple deliveries. You can also create multiple deliveries to configure multiple delivery sources to send logs to the same delivery destination. Only some Amazon Web Services services support being configured as a delivery source. These services are listed as Supported [V2 Permissions] in the table at Enabling logging from Amazon Web Services services. If you use this operation to update an existing delivery source, all the current delivery source parameters are overwritten with the new parameter values that you specify. /// /// Parameters: - /// - logType: Defines the type of log that the source is sending. For Amazon Bedrock, the valid value is APPLICATION_LOGS. For Amazon CodeWhisperer, the valid value is EVENT_LOGS. For IAM Identity Center, the valid value is ERROR_LOGS. For Amazon WorkMail, the valid values are ACCESS_CONTROL_LOGS, AUTHENTICATION_LOGS, WORKMAIL_AVAILABILITY_PROVIDER_LOGS, and WORKMAIL_MAILBOX_ACCESS_LOGS. + /// - logType: Defines the type of log that the source is sending. For Amazon Bedrock, the valid value is APPLICATION_LOGS. For CloudFront, the valid value is ACCESS_LOGS. For Amazon CodeWhisperer, the valid value is EVENT_LOGS. For Elemental MediaPackage, the valid values are EGRESS_ACCESS_LOGS and INGRESS_ACCESS_LOGS. For Elemental MediaTailor, the valid values are AD_DECISION_SERVER_LOGS, MANIFEST_SERVICE_LOGS, and TRANSCODE_LOGS. For IAM Identity Center, the valid value is ERROR_LOGS. For Amazon Q, the valid value is EVENT_LOGS. For Amazon SES mail manager, the valid value is APPLICATION_LOG. For Amazon WorkMail, the valid values are ACCESS_CONTROL_LOGS, AUTHENTICATION_LOGS, WORKMAIL_AVAILABILITY_PROVIDER_LOGS, WORKMAIL_MAILBOX_ACCESS_LOGS, and WORKMAIL_PERSONAL_ACCESS_TOKEN_LOGS. /// - name: A name for this delivery source. This name must be unique for all delivery sources in your account. /// - resourceArn: The ARN of the Amazon Web Services resource that is generating and sending logs. For example, arn:aws:workmail:us-east-1:123456789012:organization/m-1234EXAMPLEabcd1234abcd1234abcd1234 /// - tags: An optional list of key-value pairs to associate with the resource. For more information about tagging, see Tagging Amazon Web Services resources @@ -2572,7 +2574,7 @@ public struct CloudWatchLogs: AWSService { return try await self.putLogEvents(input, logger: logger) } - /// Creates or updates a metric filter and associates it with the specified log group. With metric filters, you can configure rules to extract metric data from log events ingested through PutLogEvents. The maximum number of metric filters that can be associated with a log group is 100. Using regular expressions to create metric filters is supported. For these filters, there is a quota of two regular expression patterns within a single filter pattern. There is also a quota of five regular expression patterns per log group. For more information about using regular expressions in metric filters, see Filter pattern syntax for metric filters, subscription filters, filter log events, and Live Tail. When you create a metric filter, you can also optionally assign a unit and dimensions to the metric that is created. Metrics extracted from log events are charged as custom metrics. To prevent unexpected high charges, do not specify high-cardinality fields such as IPAddress or requestID as dimensions. Each different value found for a dimension is treated as a separate metric and accrues charges as a separate custom metric. CloudWatch Logs might disable a metric filter if it generates 1,000 different name/value pairs for your specified dimensions within one hour. You can also set up a billing alarm to alert you if your charges are higher than expected. For more information, see Creating a Billing Alarm to Monitor Your Estimated Amazon Web Services Charges. + /// Creates or updates a metric filter and associates it with the specified log group. With metric filters, you can configure rules to extract metric data from log events ingested through PutLogEvents. The maximum number of metric filters that can be associated with a log group is 100. Using regular expressions in filter patterns is supported. For these filters, there is a quota of two regular expression patterns within a single filter pattern. There is also a quota of five regular expression patterns per log group. For more information about using regular expressions in filter patterns, see Filter pattern syntax for metric filters, subscription filters, filter log events, and Live Tail. When you create a metric filter, you can also optionally assign a unit and dimensions to the metric that is created. Metrics extracted from log events are charged as custom metrics. To prevent unexpected high charges, do not specify high-cardinality fields such as IPAddress or requestID as dimensions. Each different value found for a dimension is treated as a separate metric and accrues charges as a separate custom metric. CloudWatch Logs might disable a metric filter if it generates 1,000 different name/value pairs for your specified dimensions within one hour. You can also set up a billing alarm to alert you if your charges are higher than expected. For more information, see Creating a Billing Alarm to Monitor Your Estimated Amazon Web Services Charges. @Sendable @inlinable public func putMetricFilter(_ input: PutMetricFilterRequest, logger: Logger = AWSClient.loggingDisabled) async throws { @@ -2585,7 +2587,7 @@ public struct CloudWatchLogs: AWSService { logger: logger ) } - /// Creates or updates a metric filter and associates it with the specified log group. With metric filters, you can configure rules to extract metric data from log events ingested through PutLogEvents. The maximum number of metric filters that can be associated with a log group is 100. Using regular expressions to create metric filters is supported. For these filters, there is a quota of two regular expression patterns within a single filter pattern. There is also a quota of five regular expression patterns per log group. For more information about using regular expressions in metric filters, see Filter pattern syntax for metric filters, subscription filters, filter log events, and Live Tail. When you create a metric filter, you can also optionally assign a unit and dimensions to the metric that is created. Metrics extracted from log events are charged as custom metrics. To prevent unexpected high charges, do not specify high-cardinality fields such as IPAddress or requestID as dimensions. Each different value found for a dimension is treated as a separate metric and accrues charges as a separate custom metric. CloudWatch Logs might disable a metric filter if it generates 1,000 different name/value pairs for your specified dimensions within one hour. You can also set up a billing alarm to alert you if your charges are higher than expected. For more information, see Creating a Billing Alarm to Monitor Your Estimated Amazon Web Services Charges. + /// Creates or updates a metric filter and associates it with the specified log group. With metric filters, you can configure rules to extract metric data from log events ingested through PutLogEvents. The maximum number of metric filters that can be associated with a log group is 100. Using regular expressions in filter patterns is supported. For these filters, there is a quota of two regular expression patterns within a single filter pattern. There is also a quota of five regular expression patterns per log group. For more information about using regular expressions in filter patterns, see Filter pattern syntax for metric filters, subscription filters, filter log events, and Live Tail. When you create a metric filter, you can also optionally assign a unit and dimensions to the metric that is created. Metrics extracted from log events are charged as custom metrics. To prevent unexpected high charges, do not specify high-cardinality fields such as IPAddress or requestID as dimensions. Each different value found for a dimension is treated as a separate metric and accrues charges as a separate custom metric. CloudWatch Logs might disable a metric filter if it generates 1,000 different name/value pairs for your specified dimensions within one hour. You can also set up a billing alarm to alert you if your charges are higher than expected. For more information, see Creating a Billing Alarm to Monitor Your Estimated Amazon Web Services Charges. /// /// Parameters: /// - applyOnTransformedLogs: This parameter is valid only for log groups that have an active log transformer. For more information about log transformers, see PutTransformer. If the log group uses either a log-group level or account-level transformer, and you specify true, the metric filter will be applied on the transformed version of the log events instead of the original ingested log events. @@ -2721,7 +2723,7 @@ public struct CloudWatchLogs: AWSService { return try await self.putRetentionPolicy(input, logger: logger) } - /// Creates or updates a subscription filter and associates it with the specified log group. With subscription filters, you can subscribe to a real-time stream of log events ingested through PutLogEvents and have them delivered to a specific destination. When log events are sent to the receiving service, they are Base64 encoded and compressed with the GZIP format. The following destinations are supported for subscription filters: An Amazon Kinesis data stream belonging to the same account as the subscription filter, for same-account delivery. A logical destination created with PutDestination that belongs to a different account, for cross-account delivery. We currently support Kinesis Data Streams and Firehose as logical destinations. An Amazon Kinesis Data Firehose delivery stream that belongs to the same account as the subscription filter, for same-account delivery. An Lambda function that belongs to the same account as the subscription filter, for same-account delivery. Each log group can have up to two subscription filters associated with it. If you are updating an existing filter, you must specify the correct name in filterName. Using regular expressions to create subscription filters is supported. For these filters, there is a quotas of quota of two regular expression patterns within a single filter pattern. There is also a quota of five regular expression patterns per log group. For more information about using regular expressions in subscription filters, see Filter pattern syntax for metric filters, subscription filters, filter log events, and Live Tail. To perform a PutSubscriptionFilter operation for any destination except a Lambda function, you must also have the iam:PassRole permission. + /// Creates or updates a subscription filter and associates it with the specified log group. With subscription filters, you can subscribe to a real-time stream of log events ingested through PutLogEvents and have them delivered to a specific destination. When log events are sent to the receiving service, they are Base64 encoded and compressed with the GZIP format. The following destinations are supported for subscription filters: An Amazon Kinesis data stream belonging to the same account as the subscription filter, for same-account delivery. A logical destination created with PutDestination that belongs to a different account, for cross-account delivery. We currently support Kinesis Data Streams and Firehose as logical destinations. An Amazon Kinesis Data Firehose delivery stream that belongs to the same account as the subscription filter, for same-account delivery. An Lambda function that belongs to the same account as the subscription filter, for same-account delivery. Each log group can have up to two subscription filters associated with it. If you are updating an existing filter, you must specify the correct name in filterName. Using regular expressions in filter patterns is supported. For these filters, there is a quotas of quota of two regular expression patterns within a single filter pattern. There is also a quota of five regular expression patterns per log group. For more information about using regular expressions in filter patterns, see Filter pattern syntax for metric filters, subscription filters, filter log events, and Live Tail. To perform a PutSubscriptionFilter operation for any destination except a Lambda function, you must also have the iam:PassRole permission. @Sendable @inlinable public func putSubscriptionFilter(_ input: PutSubscriptionFilterRequest, logger: Logger = AWSClient.loggingDisabled) async throws { @@ -2734,7 +2736,7 @@ public struct CloudWatchLogs: AWSService { logger: logger ) } - /// Creates or updates a subscription filter and associates it with the specified log group. With subscription filters, you can subscribe to a real-time stream of log events ingested through PutLogEvents and have them delivered to a specific destination. When log events are sent to the receiving service, they are Base64 encoded and compressed with the GZIP format. The following destinations are supported for subscription filters: An Amazon Kinesis data stream belonging to the same account as the subscription filter, for same-account delivery. A logical destination created with PutDestination that belongs to a different account, for cross-account delivery. We currently support Kinesis Data Streams and Firehose as logical destinations. An Amazon Kinesis Data Firehose delivery stream that belongs to the same account as the subscription filter, for same-account delivery. An Lambda function that belongs to the same account as the subscription filter, for same-account delivery. Each log group can have up to two subscription filters associated with it. If you are updating an existing filter, you must specify the correct name in filterName. Using regular expressions to create subscription filters is supported. For these filters, there is a quotas of quota of two regular expression patterns within a single filter pattern. There is also a quota of five regular expression patterns per log group. For more information about using regular expressions in subscription filters, see Filter pattern syntax for metric filters, subscription filters, filter log events, and Live Tail. To perform a PutSubscriptionFilter operation for any destination except a Lambda function, you must also have the iam:PassRole permission. + /// Creates or updates a subscription filter and associates it with the specified log group. With subscription filters, you can subscribe to a real-time stream of log events ingested through PutLogEvents and have them delivered to a specific destination. When log events are sent to the receiving service, they are Base64 encoded and compressed with the GZIP format. The following destinations are supported for subscription filters: An Amazon Kinesis data stream belonging to the same account as the subscription filter, for same-account delivery. A logical destination created with PutDestination that belongs to a different account, for cross-account delivery. We currently support Kinesis Data Streams and Firehose as logical destinations. An Amazon Kinesis Data Firehose delivery stream that belongs to the same account as the subscription filter, for same-account delivery. An Lambda function that belongs to the same account as the subscription filter, for same-account delivery. Each log group can have up to two subscription filters associated with it. If you are updating an existing filter, you must specify the correct name in filterName. Using regular expressions in filter patterns is supported. For these filters, there is a quotas of quota of two regular expression patterns within a single filter pattern. There is also a quota of five regular expression patterns per log group. For more information about using regular expressions in filter patterns, see Filter pattern syntax for metric filters, subscription filters, filter log events, and Live Tail. To perform a PutSubscriptionFilter operation for any destination except a Lambda function, you must also have the iam:PassRole permission. /// /// Parameters: /// - applyOnTransformedLogs: This parameter is valid only for log groups that have an active log transformer. For more information about log transformers, see PutTransformer. If the log group uses either a log-group level or account-level transformer, and you specify true, the subscription filter will be applied on the transformed version of the log events instead of the original ingested log events. diff --git a/Sources/Soto/Services/CloudWatchLogs/CloudWatchLogs_shapes.swift b/Sources/Soto/Services/CloudWatchLogs/CloudWatchLogs_shapes.swift index a32d8975c1..1415b26d84 100644 --- a/Sources/Soto/Services/CloudWatchLogs/CloudWatchLogs_shapes.swift +++ b/Sources/Soto/Services/CloudWatchLogs/CloudWatchLogs_shapes.swift @@ -4351,9 +4351,9 @@ extension CloudWatchLogs { public struct OpenSearchResourceConfig: AWSEncodableShape { /// If you want to use an existing OpenSearch Service application for your integration with OpenSearch Service, specify it here. If you omit this, a new application will be created. public let applicationArn: String? - /// Specify the ARNs of IAM roles and IAM users who you want to grant permission to for viewing the dashboards. In addition to specifying these users here, you must also grant them the CloudWatchOpenSearchDashboardsAccess IAM policy. For more information, see + /// Specify the ARNs of IAM roles and IAM users who you want to grant permission to for viewing the dashboards. In addition to specifying these users here, you must also grant them the CloudWatchOpenSearchDashboardAccess IAM policy. For more information, see IAM policies for users. public let dashboardViewerPrincipals: [String] - /// Specify the ARN of an IAM role that CloudWatch Logs will use to create the integration. This role must have the permissions necessary to access the OpenSearch Service collection to be able to create the dashboards. For more information about the permissions needed, see Create an IAM role to access the OpenSearch Service collection in the CloudWatch Logs User Guide. + /// Specify the ARN of an IAM role that CloudWatch Logs will use to create the integration. This role must have the permissions necessary to access the OpenSearch Service collection to be able to create the dashboards. For more information about the permissions needed, see Permissions that the integration needs in the CloudWatch Logs User Guide. public let dataSourceRoleArn: String /// To have the vended dashboard data encrypted with KMS instead of the CloudWatch Logs default encryption method, specify the ARN of the KMS key that you want to use. public let kmsKeyArn: String? @@ -4967,7 +4967,7 @@ extension CloudWatchLogs { } public struct PutDeliverySourceRequest: AWSEncodableShape { - /// Defines the type of log that the source is sending. For Amazon Bedrock, the valid value is APPLICATION_LOGS. For Amazon CodeWhisperer, the valid value is EVENT_LOGS. For IAM Identity Center, the valid value is ERROR_LOGS. For Amazon WorkMail, the valid values are ACCESS_CONTROL_LOGS, AUTHENTICATION_LOGS, WORKMAIL_AVAILABILITY_PROVIDER_LOGS, and WORKMAIL_MAILBOX_ACCESS_LOGS. + /// Defines the type of log that the source is sending. For Amazon Bedrock, the valid value is APPLICATION_LOGS. For CloudFront, the valid value is ACCESS_LOGS. For Amazon CodeWhisperer, the valid value is EVENT_LOGS. For Elemental MediaPackage, the valid values are EGRESS_ACCESS_LOGS and INGRESS_ACCESS_LOGS. For Elemental MediaTailor, the valid values are AD_DECISION_SERVER_LOGS, MANIFEST_SERVICE_LOGS, and TRANSCODE_LOGS. For IAM Identity Center, the valid value is ERROR_LOGS. For Amazon Q, the valid value is EVENT_LOGS. For Amazon SES mail manager, the valid value is APPLICATION_LOG. For Amazon WorkMail, the valid values are ACCESS_CONTROL_LOGS, AUTHENTICATION_LOGS, WORKMAIL_AVAILABILITY_PROVIDER_LOGS, WORKMAIL_MAILBOX_ACCESS_LOGS, and WORKMAIL_PERSONAL_ACCESS_TOKEN_LOGS. public let logType: String /// A name for this delivery source. This name must be unique for all delivery sources in your account. public let name: String @@ -5766,7 +5766,7 @@ extension CloudWatchLogs { public struct S3DeliveryConfiguration: AWSEncodableShape & AWSDecodableShape { /// This parameter causes the S3 objects that contain delivered logs to use a prefix structure that allows for integration with Apache Hive. public let enableHiveCompatiblePath: Bool? - /// This string allows re-configuring the S3 object prefix to contain either static or variable sections. The valid variables to use in the suffix path will vary by each log source. See ConfigurationTemplate$allowedSuffixPathFields for more info on what values are supported in the suffix path for each log source. + /// This string allows re-configuring the S3 object prefix to contain either static or variable sections. The valid variables to use in the suffix path will vary by each log source. To find the values supported for the suffix path for each log source, use the DescribeConfigurationTemplates operation and check the allowedSuffixPathFields field in the response. public let suffixPath: String? @inlinable @@ -6712,7 +6712,7 @@ public struct CloudWatchLogsErrorType: AWSErrorType { public static var serviceQuotaExceededException: Self { .init(.serviceQuotaExceededException) } /// The service cannot complete the request. public static var serviceUnavailableException: Self { .init(.serviceUnavailableException) } - /// his exception is returned if an unknown error occurs during a Live Tail session. + /// This exception is returned if an unknown error occurs during a Live Tail session. public static var sessionStreamingException: Self { .init(.sessionStreamingException) } /// This exception is returned in a Live Tail stream when the Live Tail session times out. Live Tail sessions time out after three hours. public static var sessionTimeoutException: Self { .init(.sessionTimeoutException) } diff --git a/Sources/Soto/Services/CodeBuild/CodeBuild_api.swift b/Sources/Soto/Services/CodeBuild/CodeBuild_api.swift index 806e120751..2b1f332cd4 100644 --- a/Sources/Soto/Services/CodeBuild/CodeBuild_api.swift +++ b/Sources/Soto/Services/CodeBuild/CodeBuild_api.swift @@ -1587,7 +1587,7 @@ public struct CodeBuild: AWSService { /// - projectName: The name of the CodeBuild build project to start running a build. /// - queuedTimeoutInMinutesOverride: The number of minutes a build is allowed to be queued before it times out. /// - registryCredentialOverride: The credentials for access to a private registry. - /// - reportBuildStatusOverride: Set to true to report to your source provider the status of a build's start and completion. If you use this option with a source provider other than GitHub, GitHub Enterprise, or Bitbucket, an invalidInputException is thrown. To be able to report the build status to the source provider, the user associated with the source provider must + /// - reportBuildStatusOverride: Set to true to report to your source provider the status of a build's start and completion. If you use this option with a source provider other than GitHub, GitHub Enterprise, GitLab, GitLab Self Managed, or Bitbucket, an invalidInputException is thrown. To be able to report the build status to the source provider, the user associated with the source provider must /// - secondaryArtifactsOverride: An array of ProjectArtifacts objects. /// - secondarySourcesOverride: An array of ProjectSource objects. /// - secondarySourcesVersionOverride: An array of ProjectSourceVersion objects that specify one or more versions of the project's secondary sources to be used for this build only. diff --git a/Sources/Soto/Services/CodeBuild/CodeBuild_shapes.swift b/Sources/Soto/Services/CodeBuild/CodeBuild_shapes.swift index 569614613e..0950510531 100644 --- a/Sources/Soto/Services/CodeBuild/CodeBuild_shapes.swift +++ b/Sources/Soto/Services/CodeBuild/CodeBuild_shapes.swift @@ -722,12 +722,15 @@ extension CodeBuild { public struct BatchRestrictions: AWSEncodableShape & AWSDecodableShape { /// An array of strings that specify the compute types that are allowed for the batch build. See Build environment compute types in the CodeBuild User Guide for these values. public let computeTypesAllowed: [String]? + /// An array of strings that specify the fleets that are allowed for the batch build. See Run builds on reserved capacity fleets in the CodeBuild User Guide for more information. + public let fleetsAllowed: [String]? /// Specifies the maximum number of builds allowed. public let maximumBuildsAllowed: Int? @inlinable - public init(computeTypesAllowed: [String]? = nil, maximumBuildsAllowed: Int? = nil) { + public init(computeTypesAllowed: [String]? = nil, fleetsAllowed: [String]? = nil, maximumBuildsAllowed: Int? = nil) { self.computeTypesAllowed = computeTypesAllowed + self.fleetsAllowed = fleetsAllowed self.maximumBuildsAllowed = maximumBuildsAllowed } @@ -735,10 +738,14 @@ extension CodeBuild { try self.computeTypesAllowed?.forEach { try validate($0, name: "computeTypesAllowed[]", parent: name, min: 1) } + try self.fleetsAllowed?.forEach { + try validate($0, name: "fleetsAllowed[]", parent: name, min: 1) + } } private enum CodingKeys: String, CodingKey { case computeTypesAllowed = "computeTypesAllowed" + case fleetsAllowed = "fleetsAllowed" case maximumBuildsAllowed = "maximumBuildsAllowed" } } @@ -3444,7 +3451,7 @@ extension CodeBuild { public let insecureSsl: Bool? /// Information about the location of the source code to be built. Valid values include: For source code settings that are specified in the source action of a pipeline in CodePipeline, location should not be specified. If it is specified, CodePipeline ignores it. This is because CodePipeline uses the settings in a pipeline's source action instead of this value. For source code in an CodeCommit repository, the HTTPS clone URL to the repository that contains the source code and the buildspec file (for example, https://git-codecommit..amazonaws.com/v1/repos/). For source code in an Amazon S3 input bucket, one of the following. The path to the ZIP file that contains the source code (for example, //.zip). The path to the folder that contains the source code (for example, ///). For source code in a GitHub repository, the HTTPS clone URL to the repository that contains the source and the buildspec file. You must connect your Amazon Web Services account to your GitHub account. Use the CodeBuild console to start creating a build project. When you use the console to connect (or reconnect) with GitHub, on the GitHub Authorize application page, for Organization access, choose Request access next to each repository you want to allow CodeBuild to have access to, and then choose Authorize application. (After you have connected to your GitHub account, you do not need to finish creating the build project. You can leave the CodeBuild console.) To instruct CodeBuild to use this connection, in the source object, set the auth object's type value to OAUTH. For source code in an GitLab or self-managed GitLab repository, the HTTPS clone URL to the repository that contains the source and the buildspec file. You must connect your Amazon Web Services account to your GitLab account. Use the CodeBuild console to start creating a build project. When you use the console to connect (or reconnect) with GitLab, on the Connections Authorize application page, choose Authorize. Then on the CodeConnections Create GitLab connection page, choose Connect to GitLab. (After you have connected to your GitLab account, you do not need to finish creating the build project. You can leave the CodeBuild console.) To instruct CodeBuild to override the default connection and use this connection instead, set the auth object's type value to CODECONNECTIONS in the source object. For source code in a Bitbucket repository, the HTTPS clone URL to the repository that contains the source and the buildspec file. You must connect your Amazon Web Services account to your Bitbucket account. Use the CodeBuild console to start creating a build project. When you use the console to connect (or reconnect) with Bitbucket, on the Bitbucket Confirm access to your account page, choose Grant access. (After you have connected to your Bitbucket account, you do not need to finish creating the build project. You can leave the CodeBuild console.) To instruct CodeBuild to use this connection, in the source object, set the auth object's type value to OAUTH. If you specify CODEPIPELINE for the Type property, don't specify this property. For all of the other types, you must specify Location. public let location: String? - /// Set to true to report the status of a build's start and finish to your source provider. This option is valid only when your source provider is GitHub, GitHub Enterprise, GitLab, GitLab Self Managed, or Bitbucket. If this is set and you use a different source provider, an invalidInputException is thrown. To be able to report the build status to the source provider, the user associated with the source provider must + /// Set to true to report the status of a build's start and finish to your source provider. This option is valid only when your source provider is GitHub, GitHub Enterprise, GitLab, GitLab Self Managed, GitLab, GitLab Self Managed, or Bitbucket. If this is set and you use a different source provider, an invalidInputException is thrown. To be able to report the build status to the source provider, the user associated with the source provider must /// have write access to the repo. If the user does not have write access, the build status cannot be updated. For more information, see Source provider access in the CodeBuild User Guide. The status of a build triggered by a webhook is always reported to your source provider. If your project's builds are triggered by a webhook, you must push a new commit to the repo for a change to this property to take effect. public let reportBuildStatus: Bool? /// An identifier for this project source. The identifier can only contain alphanumeric characters and underscores, and must be less than 128 characters in length. @@ -4258,7 +4265,7 @@ extension CodeBuild { public let queuedTimeoutInMinutesOverride: Int? /// The credentials for access to a private registry. public let registryCredentialOverride: RegistryCredential? - /// Set to true to report to your source provider the status of a build's start and completion. If you use this option with a source provider other than GitHub, GitHub Enterprise, or Bitbucket, an invalidInputException is thrown. To be able to report the build status to the source provider, the user associated with the source provider must + /// Set to true to report to your source provider the status of a build's start and completion. If you use this option with a source provider other than GitHub, GitHub Enterprise, GitLab, GitLab Self Managed, or Bitbucket, an invalidInputException is thrown. To be able to report the build status to the source provider, the user associated with the source provider must /// have write access to the repo. If the user does not have write access, the build status cannot be updated. For more information, see Source provider access in the CodeBuild User Guide. The status of a build triggered by a webhook is always reported to your source provider. public let reportBuildStatusOverride: Bool? /// An array of ProjectArtifacts objects. diff --git a/Sources/Soto/Services/CognitoIdentityProvider/CognitoIdentityProvider_api.swift b/Sources/Soto/Services/CognitoIdentityProvider/CognitoIdentityProvider_api.swift index adfee537cd..2fd84485f1 100644 --- a/Sources/Soto/Services/CognitoIdentityProvider/CognitoIdentityProvider_api.swift +++ b/Sources/Soto/Services/CognitoIdentityProvider/CognitoIdentityProvider_api.swift @@ -81,6 +81,45 @@ public struct CognitoIdentityProvider: AWSService { /// FIPS and dualstack endpoints static var variantEndpoints: [EndpointVariantType: AWSServiceConfig.EndpointVariant] {[ + [.dualstack]: .init(endpoints: [ + "af-south-1": "cognito-idp.af-south-1.amazonaws.com", + "ap-east-1": "cognito-idp.ap-east-1.amazonaws.com", + "ap-northeast-1": "cognito-idp.ap-northeast-1.amazonaws.com", + "ap-northeast-2": "cognito-idp.ap-northeast-2.amazonaws.com", + "ap-northeast-3": "cognito-idp.ap-northeast-3.amazonaws.com", + "ap-south-1": "cognito-idp.ap-south-1.amazonaws.com", + "ap-south-2": "cognito-idp.ap-south-2.amazonaws.com", + "ap-southeast-1": "cognito-idp.ap-southeast-1.amazonaws.com", + "ap-southeast-2": "cognito-idp.ap-southeast-2.amazonaws.com", + "ap-southeast-3": "cognito-idp.ap-southeast-3.amazonaws.com", + "ap-southeast-4": "cognito-idp.ap-southeast-4.amazonaws.com", + "ca-central-1": "cognito-idp.ca-central-1.amazonaws.com", + "ca-west-1": "cognito-idp.ca-west-1.amazonaws.com", + "eu-central-1": "cognito-idp.eu-central-1.amazonaws.com", + "eu-central-2": "cognito-idp.eu-central-2.amazonaws.com", + "eu-north-1": "cognito-idp.eu-north-1.amazonaws.com", + "eu-south-1": "cognito-idp.eu-south-1.amazonaws.com", + "eu-south-2": "cognito-idp.eu-south-2.amazonaws.com", + "eu-west-1": "cognito-idp.eu-west-1.amazonaws.com", + "eu-west-2": "cognito-idp.eu-west-2.amazonaws.com", + "eu-west-3": "cognito-idp.eu-west-3.amazonaws.com", + "il-central-1": "cognito-idp.il-central-1.amazonaws.com", + "me-central-1": "cognito-idp.me-central-1.amazonaws.com", + "me-south-1": "cognito-idp.me-south-1.amazonaws.com", + "sa-east-1": "cognito-idp.sa-east-1.amazonaws.com", + "us-east-1": "cognito-idp.us-east-1.amazonaws.com", + "us-east-2": "cognito-idp.us-east-2.amazonaws.com", + "us-gov-west-1": "cognito-idp.us-gov-west-1.amazonaws.com", + "us-west-1": "cognito-idp.us-west-1.amazonaws.com", + "us-west-2": "cognito-idp.us-west-2.amazonaws.com" + ]), + [.dualstack, .fips]: .init(endpoints: [ + "us-east-1": "cognito-idp-fips.us-east-1.amazonaws.com", + "us-east-2": "cognito-idp-fips.us-east-2.amazonaws.com", + "us-gov-west-1": "cognito-idp-fips.us-gov-west-1.amazonaws.com", + "us-west-1": "cognito-idp-fips.us-west-1.amazonaws.com", + "us-west-2": "cognito-idp-fips.us-west-2.amazonaws.com" + ]), [.fips]: .init(endpoints: [ "us-east-1": "cognito-idp-fips.us-east-1.amazonaws.com", "us-east-2": "cognito-idp-fips.us-east-2.amazonaws.com", diff --git a/Sources/Soto/Services/Comprehend/Comprehend_api.swift b/Sources/Soto/Services/Comprehend/Comprehend_api.swift index 2772f205cc..f2b9e24b24 100644 --- a/Sources/Soto/Services/Comprehend/Comprehend_api.swift +++ b/Sources/Soto/Services/Comprehend/Comprehend_api.swift @@ -84,6 +84,7 @@ public struct Comprehend: AWSService { "us-east-1": "comprehend-fips.us-east-1.amazonaws.com", "us-east-2": "comprehend-fips.us-east-2.amazonaws.com", "us-gov-west-1": "comprehend-fips.us-gov-west-1.amazonaws.com", + "us-iso-east-1": "comprehend-fips.us-iso-east-1.c2s.ic.gov", "us-west-2": "comprehend-fips.us-west-2.amazonaws.com" ]) ]} diff --git a/Sources/Soto/Services/ComputeOptimizer/ComputeOptimizer_shapes.swift b/Sources/Soto/Services/ComputeOptimizer/ComputeOptimizer_shapes.swift index 807d6f5efd..792d58ff4d 100644 --- a/Sources/Soto/Services/ComputeOptimizer/ComputeOptimizer_shapes.swift +++ b/Sources/Soto/Services/ComputeOptimizer/ComputeOptimizer_shapes.swift @@ -26,6 +26,18 @@ import Foundation extension ComputeOptimizer { // MARK: Enums + public enum AllocationStrategy: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case lowestPrice = "LowestPrice" + case prioritized = "Prioritized" + public var description: String { return self.rawValue } + } + + public enum AsgType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case mixedInstanceType = "MixedInstanceTypes" + case singleInstanceType = "SingleInstanceType" + public var description: String { return self.rawValue } + } + public enum AutoScalingConfiguration: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case targetTrackingScalingCpu = "TargetTrackingScalingCpu" case targetTrackingScalingMemory = "TargetTrackingScalingMemory" @@ -166,10 +178,13 @@ extension ComputeOptimizer { case accountId = "AccountId" case autoScalingGroupArn = "AutoScalingGroupArn" case autoScalingGroupName = "AutoScalingGroupName" + case currentConfigurationAllocationStrategy = "CurrentConfigurationAllocationStrategy" case currentConfigurationDesiredCapacity = "CurrentConfigurationDesiredCapacity" case currentConfigurationInstanceType = "CurrentConfigurationInstanceType" case currentConfigurationMaxSize = "CurrentConfigurationMaxSize" case currentConfigurationMinSize = "CurrentConfigurationMinSize" + case currentConfigurationMixedInstanceTypes = "CurrentConfigurationMixedInstanceTypes" + case currentConfigurationType = "CurrentConfigurationType" case currentInstanceGpuInfo = "CurrentInstanceGpuInfo" case currentMemory = "CurrentMemory" case currentNetwork = "CurrentNetwork" @@ -189,10 +204,14 @@ extension ComputeOptimizer { case inferredWorkloadTypes = "InferredWorkloadTypes" case lastRefreshTimestamp = "LastRefreshTimestamp" case lookbackPeriodInDays = "LookbackPeriodInDays" + case recommendationOptionsConfigurationAllocationStrategy = "RecommendationOptionsConfigurationAllocationStrategy" case recommendationOptionsConfigurationDesiredCapacity = "RecommendationOptionsConfigurationDesiredCapacity" + case recommendationOptionsConfigurationEstimatedInstanceHourReductionPercentage = "RecommendationOptionsConfigurationEstimatedInstanceHourReductionPercentage" case recommendationOptionsConfigurationInstanceType = "RecommendationOptionsConfigurationInstanceType" case recommendationOptionsConfigurationMaxSize = "RecommendationOptionsConfigurationMaxSize" case recommendationOptionsConfigurationMinSize = "RecommendationOptionsConfigurationMinSize" + case recommendationOptionsConfigurationMixedInstanceTypes = "RecommendationOptionsConfigurationMixedInstanceTypes" + case recommendationOptionsConfigurationType = "RecommendationOptionsConfigurationType" case recommendationOptionsEstimatedMonthlySavingsCurrency = "RecommendationOptionsEstimatedMonthlySavingsCurrency" case recommendationOptionsEstimatedMonthlySavingsCurrencyAfterDiscounts = "RecommendationOptionsEstimatedMonthlySavingsCurrencyAfterDiscounts" case recommendationOptionsEstimatedMonthlySavingsValue = "RecommendationOptionsEstimatedMonthlySavingsValue" @@ -1044,28 +1063,44 @@ extension ComputeOptimizer { } public struct AutoScalingGroupConfiguration: AWSDecodableShape { - /// The desired capacity, or number of instances, for the Auto Scaling group. + /// Describes the allocation strategy that the EC2 Auto Scaling group uses. This field is only available for EC2 Auto Scaling groups with mixed instance types. + public let allocationStrategy: AllocationStrategy? + /// The desired capacity, or number of instances, for the EC2 Auto Scaling group. public let desiredCapacity: Int? - /// The instance type for the Auto Scaling group. + /// Describes the projected percentage reduction in instance hours after adopting the recommended configuration. This field is only available for EC2 Auto Scaling groups with scaling policies. + public let estimatedInstanceHourReductionPercentage: Double? + /// The instance type for the EC2 Auto Scaling group. public let instanceType: String? - /// The maximum size, or maximum number of instances, for the Auto Scaling group. + /// The maximum size, or maximum number of instances, for the EC2 Auto Scaling group. public let maxSize: Int? - /// The minimum size, or minimum number of instances, for the Auto Scaling group. + /// The minimum size, or minimum number of instances, for the EC2 Auto Scaling group. public let minSize: Int? + /// List the instance types within an EC2 Auto Scaling group that has mixed instance types. + public let mixedInstanceTypes: [String]? + /// Describes whether the EC2 Auto Scaling group has a single instance type or a mixed instance type configuration. + public let type: AsgType? @inlinable - public init(desiredCapacity: Int? = nil, instanceType: String? = nil, maxSize: Int? = nil, minSize: Int? = nil) { + public init(allocationStrategy: AllocationStrategy? = nil, desiredCapacity: Int? = nil, estimatedInstanceHourReductionPercentage: Double? = nil, instanceType: String? = nil, maxSize: Int? = nil, minSize: Int? = nil, mixedInstanceTypes: [String]? = nil, type: AsgType? = nil) { + self.allocationStrategy = allocationStrategy self.desiredCapacity = desiredCapacity + self.estimatedInstanceHourReductionPercentage = estimatedInstanceHourReductionPercentage self.instanceType = instanceType self.maxSize = maxSize self.minSize = minSize + self.mixedInstanceTypes = mixedInstanceTypes + self.type = type } private enum CodingKeys: String, CodingKey { + case allocationStrategy = "allocationStrategy" case desiredCapacity = "desiredCapacity" + case estimatedInstanceHourReductionPercentage = "estimatedInstanceHourReductionPercentage" case instanceType = "instanceType" case maxSize = "maxSize" case minSize = "minSize" + case mixedInstanceTypes = "mixedInstanceTypes" + case type = "type" } } diff --git a/Sources/Soto/Services/Connect/Connect_api.swift b/Sources/Soto/Services/Connect/Connect_api.swift index c38fd127a8..dc2bf76074 100644 --- a/Sources/Soto/Services/Connect/Connect_api.swift +++ b/Sources/Soto/Services/Connect/Connect_api.swift @@ -80,6 +80,7 @@ public struct Connect: AWSService { /// FIPS and dualstack endpoints static var variantEndpoints: [EndpointVariantType: AWSServiceConfig.EndpointVariant] {[ [.fips]: .init(endpoints: [ + "ca-central-1": "connect-fips.ca-central-1.amazonaws.com", "us-east-1": "connect-fips.us-east-1.amazonaws.com", "us-gov-west-1": "connect.us-gov-west-1.amazonaws.com", "us-west-2": "connect-fips.us-west-2.amazonaws.com" @@ -569,7 +570,7 @@ public struct Connect: AWSService { return try await self.associateTrafficDistributionGroupUser(input, logger: logger) } - /// >Associates a set of proficiencies with a user. + /// Associates a set of proficiencies with a user. @Sendable @inlinable public func associateUserProficiencies(_ input: AssociateUserProficienciesRequest, logger: Logger = AWSClient.loggingDisabled) async throws { @@ -582,7 +583,7 @@ public struct Connect: AWSService { logger: logger ) } - /// >Associates a set of proficiencies with a user. + /// Associates a set of proficiencies with a user. /// /// Parameters: /// - instanceId: The identifier of the Amazon Connect instance. You can find the instance ID in the Amazon Resource Name (ARN of the instance). @@ -1058,7 +1059,7 @@ public struct Connect: AWSService { return try await self.createContactFlowModule(input, logger: logger) } - /// Publishes a new version of the flow provided. Versions are immutable and monotonically increasing. If a version of the same flow content already exists, no new version is created and instead the existing version number is returned. If the FlowContentSha256 provided is different from the FlowContentSha256 of the $LATEST published flow content, then an error is returned. This API only supports creating versions for flows of type Campaign. + /// Publishes a new version of the flow provided. Versions are immutable and monotonically increasing. If the FlowContentSha256 provided is different from the FlowContentSha256 of the $LATEST published flow content, then an error is returned. This API only supports creating versions for flows of type Campaign. @Sendable @inlinable public func createContactFlowVersion(_ input: CreateContactFlowVersionRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateContactFlowVersionResponse { @@ -1071,10 +1072,11 @@ public struct Connect: AWSService { logger: logger ) } - /// Publishes a new version of the flow provided. Versions are immutable and monotonically increasing. If a version of the same flow content already exists, no new version is created and instead the existing version number is returned. If the FlowContentSha256 provided is different from the FlowContentSha256 of the $LATEST published flow content, then an error is returned. This API only supports creating versions for flows of type Campaign. + /// Publishes a new version of the flow provided. Versions are immutable and monotonically increasing. If the FlowContentSha256 provided is different from the FlowContentSha256 of the $LATEST published flow content, then an error is returned. This API only supports creating versions for flows of type Campaign. /// /// Parameters: /// - contactFlowId: The identifier of the flow. + /// - contactFlowVersion: The identifier of the flow version. /// - description: The description of the flow version. /// - flowContentSha256: Indicates the checksum value of the flow content. /// - instanceId: The identifier of the Amazon Connect instance. @@ -1084,6 +1086,7 @@ public struct Connect: AWSService { @inlinable public func createContactFlowVersion( contactFlowId: String, + contactFlowVersion: Int64? = nil, description: String? = nil, flowContentSha256: String? = nil, instanceId: String, @@ -1093,6 +1096,7 @@ public struct Connect: AWSService { ) async throws -> CreateContactFlowVersionResponse { let input = CreateContactFlowVersionRequest( contactFlowId: contactFlowId, + contactFlowVersion: contactFlowVersion, description: description, flowContentSha256: flowContentSha256, instanceId: instanceId, @@ -2353,6 +2357,41 @@ public struct Connect: AWSService { return try await self.deleteContactFlowModule(input, logger: logger) } + /// Deletes the particular version specified in flow version identifier. + @Sendable + @inlinable + public func deleteContactFlowVersion(_ input: DeleteContactFlowVersionRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DeleteContactFlowVersionResponse { + try await self.client.execute( + operation: "DeleteContactFlowVersion", + path: "/contact-flows/{InstanceId}/{ContactFlowId}/version/{ContactFlowVersion}", + httpMethod: .DELETE, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Deletes the particular version specified in flow version identifier. + /// + /// Parameters: + /// - contactFlowId: The identifier of the flow. + /// - contactFlowVersion: The identifier of the flow version. + /// - instanceId: The identifier of the Amazon Connect instance. You can find the instance ID in the Amazon Resource Name (ARN) of the instance. + /// - logger: Logger use during operation + @inlinable + public func deleteContactFlowVersion( + contactFlowId: String, + contactFlowVersion: Int64, + instanceId: String, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> DeleteContactFlowVersionResponse { + let input = DeleteContactFlowVersionRequest( + contactFlowId: contactFlowId, + contactFlowVersion: contactFlowVersion, + instanceId: instanceId + ) + return try await self.deleteContactFlowVersion(input, logger: logger) + } + /// Deletes email address from the specified Amazon Connect instance. @Sendable @inlinable @@ -2651,7 +2690,7 @@ public struct Connect: AWSService { return try await self.deletePushNotificationRegistration(input, logger: logger) } - /// Deletes a queue. It isn't possible to delete a queue by using the Amazon Connect admin website. + /// Deletes a queue. @Sendable @inlinable public func deleteQueue(_ input: DeleteQueueRequest, logger: Logger = AWSClient.loggingDisabled) async throws { @@ -2664,7 +2703,7 @@ public struct Connect: AWSService { logger: logger ) } - /// Deletes a queue. It isn't possible to delete a queue by using the Amazon Connect admin website. + /// Deletes a queue. /// /// Parameters: /// - instanceId: The identifier of the Amazon Connect instance. You can find the instance ID in the Amazon Resource Name (ARN) of the instance. @@ -3200,7 +3239,7 @@ public struct Connect: AWSService { return try await self.describeContactEvaluation(input, logger: logger) } - /// Describes the specified flow. You can also create and update flows using the Amazon Connect Flow language. Use the $SAVED alias in the request to describe the SAVED content of a Flow. For example, arn:aws:.../contact-flow/{id}:$SAVED. After a flow is published, $SAVED needs to be supplied to view saved content that has not been published. In the response, Status indicates the flow status as either SAVED or PUBLISHED. The PUBLISHED status will initiate validation on the content. SAVED does not initiate validation of the content. SAVED | PUBLISHED + /// Describes the specified flow. You can also create and update flows using the Amazon Connect Flow language. Use the $SAVED alias in the request to describe the SAVED content of a Flow. For example, arn:aws:.../contact-flow/{id}:$SAVED. After a flow is published, $SAVED needs to be supplied to view saved content that has not been published. Use arn:aws:.../contact-flow/{id}:{version} to retrieve the content of a specific flow version. In the response, Status indicates the flow status as either SAVED or PUBLISHED. The PUBLISHED status will initiate validation on the content. SAVED does not initiate validation of the content. SAVED | PUBLISHED @Sendable @inlinable public func describeContactFlow(_ input: DescribeContactFlowRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeContactFlowResponse { @@ -3213,7 +3252,7 @@ public struct Connect: AWSService { logger: logger ) } - /// Describes the specified flow. You can also create and update flows using the Amazon Connect Flow language. Use the $SAVED alias in the request to describe the SAVED content of a Flow. For example, arn:aws:.../contact-flow/{id}:$SAVED. After a flow is published, $SAVED needs to be supplied to view saved content that has not been published. In the response, Status indicates the flow status as either SAVED or PUBLISHED. The PUBLISHED status will initiate validation on the content. SAVED does not initiate validation of the content. SAVED | PUBLISHED + /// Describes the specified flow. You can also create and update flows using the Amazon Connect Flow language. Use the $SAVED alias in the request to describe the SAVED content of a Flow. For example, arn:aws:.../contact-flow/{id}:$SAVED. After a flow is published, $SAVED needs to be supplied to view saved content that has not been published. Use arn:aws:.../contact-flow/{id}:{version} to retrieve the content of a specific flow version. In the response, Status indicates the flow status as either SAVED or PUBLISHED. The PUBLISHED status will initiate validation on the content. SAVED does not initiate validation of the content. SAVED | PUBLISHED /// /// Parameters: /// - contactFlowId: The identifier of the flow. @@ -5094,7 +5133,7 @@ public struct Connect: AWSService { return try await self.listAuthenticationProfiles(input, logger: logger) } - /// This API is in preview release for Amazon Connect and is subject to change. For the specified version of Amazon Lex, returns a paginated list of all the Amazon Lex bots currently associated with the instance. Use this API to returns both Amazon Lex V1 and V2 bots. + /// This API is in preview release for Amazon Connect and is subject to change. For the specified version of Amazon Lex, returns a paginated list of all the Amazon Lex bots currently associated with the instance. Use this API to return both Amazon Lex V1 and V2 bots. @Sendable @inlinable public func listBots(_ input: ListBotsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListBotsResponse { @@ -5107,7 +5146,7 @@ public struct Connect: AWSService { logger: logger ) } - /// This API is in preview release for Amazon Connect and is subject to change. For the specified version of Amazon Lex, returns a paginated list of all the Amazon Lex bots currently associated with the instance. Use this API to returns both Amazon Lex V1 and V2 bots. + /// This API is in preview release for Amazon Connect and is subject to change. For the specified version of Amazon Lex, returns a paginated list of all the Amazon Lex bots currently associated with the instance. Use this API to return both Amazon Lex V1 and V2 bots. /// /// Parameters: /// - instanceId: The identifier of the Amazon Connect instance. You can find the instance ID in the Amazon Resource Name (ARN) of the instance. @@ -7860,6 +7899,7 @@ public struct Connect: AWSService { /// - chatDurationInMinutes: The total duration of the newly started chat session. If not specified, the chat session duration defaults to 25 hour. The minimum configurable time is 60 minutes. The maximum configurable time is 10,080 minutes (7 days). /// - clientToken: A unique, case-sensitive identifier that you provide to ensure the idempotency of the request. If not provided, the Amazon Web Services SDK populates this field. For more information about idempotency, see Making retries safe with idempotent APIs. /// - contactFlowId: The identifier of the flow for initiating the chat. To see the ContactFlowId in the Amazon Connect admin website, on the navigation menu go to Routing, Flows. Choose the flow. On the flow page, under the name of the flow, choose Show additional flow information. The ContactFlowId is the last part of the ARN, shown here in bold: arn:aws:connect:us-west-2:xxxxxxxxxxxx:instance/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/contact-flow/846ec553-a005-41c0-8341-xxxxxxxxxxxx + /// - customerId: The customer's identification number. For example, the CustomerId may be a customer number from your CRM. /// - initialMessage: The initial message to be sent to the newly created chat. If you have a Lex bot in your flow, the initial message is not delivered to the Lex bot. /// - instanceId: The identifier of the Amazon Connect instance. You can find the instance ID in the Amazon Resource Name (ARN) of the instance. /// - participantDetails: Information identifying the participant. @@ -7874,6 +7914,7 @@ public struct Connect: AWSService { chatDurationInMinutes: Int? = nil, clientToken: String? = StartChatContactRequest.idempotencyToken(), contactFlowId: String, + customerId: String? = nil, initialMessage: ChatMessage? = nil, instanceId: String, participantDetails: ParticipantDetails, @@ -7888,6 +7929,7 @@ public struct Connect: AWSService { chatDurationInMinutes: chatDurationInMinutes, clientToken: clientToken, contactFlowId: contactFlowId, + customerId: customerId, initialMessage: initialMessage, instanceId: instanceId, participantDetails: participantDetails, @@ -9525,6 +9567,47 @@ public struct Connect: AWSService { return try await self.updateInstanceStorageConfig(input, logger: logger) } + /// Instructs Amazon Connect to resume the authentication process. The subsequent actions depend on the request body contents: If a code is provided: Connect retrieves the identity information from Amazon Cognito and imports it into Connect Customer Profiles. If an error is provided: The error branch of the Authenticate Customer block is executed. The API returns a success response to acknowledge the request. However, the interaction and exchange of identity information occur asynchronously after the response is returned. + @Sendable + @inlinable + public func updateParticipantAuthentication(_ input: UpdateParticipantAuthenticationRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateParticipantAuthenticationResponse { + try await self.client.execute( + operation: "UpdateParticipantAuthentication", + path: "/contact/update-participant-authentication", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Instructs Amazon Connect to resume the authentication process. The subsequent actions depend on the request body contents: If a code is provided: Connect retrieves the identity information from Amazon Cognito and imports it into Connect Customer Profiles. If an error is provided: The error branch of the Authenticate Customer block is executed. The API returns a success response to acknowledge the request. However, the interaction and exchange of identity information occur asynchronously after the response is returned. + /// + /// Parameters: + /// - code: The code query parameter provided by Cognito in the redirectUri. + /// - error: The error query parameter provided by Cognito in the redirectUri. + /// - errorDescription: The error_description parameter provided by Cognito in the redirectUri. + /// - instanceId: The identifier of the Amazon Connect instance. You can find the instance ID in the Amazon Resource Name (ARN) of the instance. + /// - state: The state query parameter that was provided by Cognito in the redirectUri. This will also match the state parameter provided in the AuthenticationUrl from the GetAuthenticationUrl response. + /// - logger: Logger use during operation + @inlinable + public func updateParticipantAuthentication( + code: String? = nil, + error: String? = nil, + errorDescription: String? = nil, + instanceId: String, + state: String, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> UpdateParticipantAuthenticationResponse { + let input = UpdateParticipantAuthenticationRequest( + code: code, + error: error, + errorDescription: errorDescription, + instanceId: instanceId, + state: state + ) + return try await self.updateParticipantAuthentication(input, logger: logger) + } + /// Updates timeouts for when human chat participants are to be considered idle, and when agents are automatically disconnected from a chat due to idleness. You can set four timers: Customer idle timeout Customer auto-disconnect timeout Agent idle timeout Agent auto-disconnect timeout For more information about how chat timeouts work, see Set up chat timeouts for human participants. @Sendable @inlinable diff --git a/Sources/Soto/Services/Connect/Connect_shapes.swift b/Sources/Soto/Services/Connect/Connect_shapes.swift index 7067d5be71..e62e603119 100644 --- a/Sources/Soto/Services/Connect/Connect_shapes.swift +++ b/Sources/Soto/Services/Connect/Connect_shapes.swift @@ -136,6 +136,7 @@ extension Connect { case agentHold = "AGENT_HOLD" case agentTransfer = "AGENT_TRANSFER" case agentWhisper = "AGENT_WHISPER" + case campaign = "CAMPAIGN" case contactFlow = "CONTACT_FLOW" case customerHold = "CUSTOMER_HOLD" case customerQueue = "CUSTOMER_QUEUE" @@ -406,6 +407,7 @@ extension Connect { case enhancedContactMonitoring = "ENHANCED_CONTACT_MONITORING" case highVolumeOutbound = "HIGH_VOLUME_OUTBOUND" case inboundCalls = "INBOUND_CALLS" + case multiPartyChatConference = "MULTI_PARTY_CHAT_CONFERENCE" case multiPartyConference = "MULTI_PARTY_CONFERENCE" case outboundCalls = "OUTBOUND_CALLS" case useCustomTtsVoices = "USE_CUSTOM_TTS_VOICES" @@ -451,6 +453,7 @@ extension Connect { case application = "APPLICATION" case callTransferConnector = "CALL_TRANSFER_CONNECTOR" case casesDomain = "CASES_DOMAIN" + case cognitoUserPool = "COGNITO_USER_POOL" case event = "EVENT" case fileScanner = "FILE_SCANNER" case pinpointApp = "PINPOINT_APP" @@ -2763,15 +2766,18 @@ extension Connect { public let name: String? /// The proficiency level of the condition. public let proficiencyLevel: Float? + /// An Object to define the minimum and maximum proficiency levels. + public let range: Range? /// The value of predefined attribute. public let value: String? @inlinable - public init(comparisonOperator: String? = nil, matchCriteria: MatchCriteria? = nil, name: String? = nil, proficiencyLevel: Float? = nil, value: String? = nil) { + public init(comparisonOperator: String? = nil, matchCriteria: MatchCriteria? = nil, name: String? = nil, proficiencyLevel: Float? = nil, range: Range? = nil, value: String? = nil) { self.comparisonOperator = comparisonOperator self.matchCriteria = matchCriteria self.name = name self.proficiencyLevel = proficiencyLevel + self.range = range self.value = value } @@ -2783,6 +2789,7 @@ extension Connect { try self.validate(self.name, name: "name", parent: name, min: 1) try self.validate(self.proficiencyLevel, name: "proficiencyLevel", parent: name, max: 5.0) try self.validate(self.proficiencyLevel, name: "proficiencyLevel", parent: name, min: 1.0) + try self.range?.validate(name: "\(name).range") try self.validate(self.value, name: "value", parent: name, max: 128) try self.validate(self.value, name: "value", parent: name, min: 1) } @@ -2792,6 +2799,7 @@ extension Connect { case matchCriteria = "MatchCriteria" case name = "Name" case proficiencyLevel = "ProficiencyLevel" + case range = "Range" case value = "Value" } } @@ -3563,6 +3571,8 @@ extension Connect { public let customer: Customer? /// The customer or external third party participant endpoint. public let customerEndpoint: EndpointInfo? + /// The customer's identification number. For example, the CustomerId may be a customer number from your CRM. You can create a Lambda function to pull the unique customer ID of the caller from your CRM system. If you enable Amazon Connect Voice ID capability, this attribute is populated with the CustomerSpeakerId of the caller. + public let customerId: String? /// Information about customer’s voice activity. public let customerVoiceActivity: CustomerVoiceActivity? /// The description of the contact. @@ -3617,7 +3627,7 @@ extension Connect { public let wisdomInfo: WisdomInfo? @inlinable - public init(additionalEmailRecipients: AdditionalEmailRecipients? = nil, agentInfo: AgentInfo? = nil, answeringMachineDetectionStatus: AnsweringMachineDetectionStatus? = nil, arn: String? = nil, campaign: Campaign? = nil, channel: Channel? = nil, connectedToSystemTimestamp: Date? = nil, contactAssociationId: String? = nil, customer: Customer? = nil, customerEndpoint: EndpointInfo? = nil, customerVoiceActivity: CustomerVoiceActivity? = nil, description: String? = nil, disconnectDetails: DisconnectDetails? = nil, disconnectTimestamp: Date? = nil, id: String? = nil, initialContactId: String? = nil, initiationMethod: ContactInitiationMethod? = nil, initiationTimestamp: Date? = nil, lastPausedTimestamp: Date? = nil, lastResumedTimestamp: Date? = nil, lastUpdateTimestamp: Date? = nil, name: String? = nil, previousContactId: String? = nil, qualityMetrics: QualityMetrics? = nil, queueInfo: QueueInfo? = nil, queuePriority: Int64? = nil, queueTimeAdjustmentSeconds: Int? = nil, relatedContactId: String? = nil, routingCriteria: RoutingCriteria? = nil, scheduledTimestamp: Date? = nil, segmentAttributes: [String: SegmentAttributeValue]? = nil, systemEndpoint: EndpointInfo? = nil, tags: [String: String]? = nil, totalPauseCount: Int? = nil, totalPauseDurationInSeconds: Int? = nil, wisdomInfo: WisdomInfo? = nil) { + public init(additionalEmailRecipients: AdditionalEmailRecipients? = nil, agentInfo: AgentInfo? = nil, answeringMachineDetectionStatus: AnsweringMachineDetectionStatus? = nil, arn: String? = nil, campaign: Campaign? = nil, channel: Channel? = nil, connectedToSystemTimestamp: Date? = nil, contactAssociationId: String? = nil, customer: Customer? = nil, customerEndpoint: EndpointInfo? = nil, customerId: String? = nil, customerVoiceActivity: CustomerVoiceActivity? = nil, description: String? = nil, disconnectDetails: DisconnectDetails? = nil, disconnectTimestamp: Date? = nil, id: String? = nil, initialContactId: String? = nil, initiationMethod: ContactInitiationMethod? = nil, initiationTimestamp: Date? = nil, lastPausedTimestamp: Date? = nil, lastResumedTimestamp: Date? = nil, lastUpdateTimestamp: Date? = nil, name: String? = nil, previousContactId: String? = nil, qualityMetrics: QualityMetrics? = nil, queueInfo: QueueInfo? = nil, queuePriority: Int64? = nil, queueTimeAdjustmentSeconds: Int? = nil, relatedContactId: String? = nil, routingCriteria: RoutingCriteria? = nil, scheduledTimestamp: Date? = nil, segmentAttributes: [String: SegmentAttributeValue]? = nil, systemEndpoint: EndpointInfo? = nil, tags: [String: String]? = nil, totalPauseCount: Int? = nil, totalPauseDurationInSeconds: Int? = nil, wisdomInfo: WisdomInfo? = nil) { self.additionalEmailRecipients = additionalEmailRecipients self.agentInfo = agentInfo self.answeringMachineDetectionStatus = answeringMachineDetectionStatus @@ -3628,6 +3638,7 @@ extension Connect { self.contactAssociationId = contactAssociationId self.customer = customer self.customerEndpoint = customerEndpoint + self.customerId = customerId self.customerVoiceActivity = customerVoiceActivity self.description = description self.disconnectDetails = disconnectDetails @@ -3667,6 +3678,7 @@ extension Connect { case contactAssociationId = "ContactAssociationId" case customer = "Customer" case customerEndpoint = "CustomerEndpoint" + case customerId = "CustomerId" case customerVoiceActivity = "CustomerVoiceActivity" case description = "Description" case disconnectDetails = "DisconnectDetails" @@ -4516,7 +4528,7 @@ extension Connect { public let contactFlowArn: String? /// The identifier of the flow. public let contactFlowId: String? - /// Indicates the checksum value of the flow content. + /// Indicates the checksum value of the latest published flow content. public let flowContentSha256: String? @inlinable @@ -4536,6 +4548,8 @@ extension Connect { public struct CreateContactFlowVersionRequest: AWSEncodableShape { /// The identifier of the flow. public let contactFlowId: String + /// The identifier of the flow version. + public let contactFlowVersion: Int64? /// The description of the flow version. public let description: String? /// Indicates the checksum value of the flow content. @@ -4548,8 +4562,9 @@ extension Connect { public let lastModifiedTime: Date? @inlinable - public init(contactFlowId: String, description: String? = nil, flowContentSha256: String? = nil, instanceId: String, lastModifiedRegion: String? = nil, lastModifiedTime: Date? = nil) { + public init(contactFlowId: String, contactFlowVersion: Int64? = nil, description: String? = nil, flowContentSha256: String? = nil, instanceId: String, lastModifiedRegion: String? = nil, lastModifiedTime: Date? = nil) { self.contactFlowId = contactFlowId + self.contactFlowVersion = contactFlowVersion self.description = description self.flowContentSha256 = flowContentSha256 self.instanceId = instanceId @@ -4561,6 +4576,7 @@ extension Connect { let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer var container = encoder.container(keyedBy: CodingKeys.self) request.encodePath(self.contactFlowId, key: "ContactFlowId") + try container.encodeIfPresent(self.contactFlowVersion, forKey: .contactFlowVersion) try container.encodeIfPresent(self.description, forKey: .description) try container.encodeIfPresent(self.flowContentSha256, forKey: .flowContentSha256) request.encodePath(self.instanceId, key: "InstanceId") @@ -4569,6 +4585,7 @@ extension Connect { } public func validate(name: String) throws { + try self.validate(self.contactFlowVersion, name: "contactFlowVersion", parent: name, min: 1) try self.validate(self.flowContentSha256, name: "flowContentSha256", parent: name, max: 64) try self.validate(self.flowContentSha256, name: "flowContentSha256", parent: name, min: 1) try self.validate(self.flowContentSha256, name: "flowContentSha256", parent: name, pattern: "^[a-zA-Z0-9]{64}$") @@ -4578,6 +4595,7 @@ extension Connect { } private enum CodingKeys: String, CodingKey { + case contactFlowVersion = "ContactFlowVersion" case description = "Description" case flowContentSha256 = "FlowContentSha256" case lastModifiedRegion = "LastModifiedRegion" @@ -7012,6 +7030,42 @@ extension Connect { public init() {} } + public struct DeleteContactFlowVersionRequest: AWSEncodableShape { + /// The identifier of the flow. + public let contactFlowId: String + /// The identifier of the flow version. + public let contactFlowVersion: Int64 + /// The identifier of the Amazon Connect instance. You can find the instance ID in the Amazon Resource Name (ARN) of the instance. + public let instanceId: String + + @inlinable + public init(contactFlowId: String, contactFlowVersion: Int64, instanceId: String) { + self.contactFlowId = contactFlowId + self.contactFlowVersion = contactFlowVersion + self.instanceId = instanceId + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.contactFlowId, key: "ContactFlowId") + request.encodePath(self.contactFlowVersion, key: "ContactFlowVersion") + request.encodePath(self.instanceId, key: "InstanceId") + } + + public func validate(name: String) throws { + try self.validate(self.contactFlowVersion, name: "contactFlowVersion", parent: name, min: 1) + try self.validate(self.instanceId, name: "instanceId", parent: name, max: 100) + try self.validate(self.instanceId, name: "instanceId", parent: name, min: 1) + } + + private enum CodingKeys: CodingKey {} + } + + public struct DeleteContactFlowVersionResponse: AWSDecodableShape { + public init() {} + } + public struct DeleteEmailAddressRequest: AWSEncodableShape { /// The identifier of the email address. public let emailAddressId: String @@ -10440,13 +10494,15 @@ extension Connect { public let andExpression: [Expression]? /// An object to specify the predefined attribute condition. public let attributeCondition: AttributeCondition? + public let notAttributeCondition: AttributeCondition? /// List of routing expressions which will be OR-ed together. public let orExpression: [Expression]? @inlinable - public init(andExpression: [Expression]? = nil, attributeCondition: AttributeCondition? = nil, orExpression: [Expression]? = nil) { + public init(andExpression: [Expression]? = nil, attributeCondition: AttributeCondition? = nil, notAttributeCondition: AttributeCondition? = nil, orExpression: [Expression]? = nil) { self.andExpression = andExpression self.attributeCondition = attributeCondition + self.notAttributeCondition = notAttributeCondition self.orExpression = orExpression } @@ -10455,6 +10511,7 @@ extension Connect { try $0.validate(name: "\(name).andExpression[]") } try self.attributeCondition?.validate(name: "\(name).attributeCondition") + try self.notAttributeCondition?.validate(name: "\(name).notAttributeCondition") try self.orExpression?.forEach { try $0.validate(name: "\(name).orExpression[]") } @@ -10463,6 +10520,7 @@ extension Connect { private enum CodingKeys: String, CodingKey { case andExpression = "AndExpression" case attributeCondition = "AttributeCondition" + case notAttributeCondition = "NotAttributeCondition" case orExpression = "OrExpression" } } @@ -16759,6 +16817,31 @@ extension Connect { } } + public struct Range: AWSEncodableShape & AWSDecodableShape { + /// The maximum proficiency level of the range. + public let maxProficiencyLevel: Float? + /// The minimum proficiency level of the range. + public let minProficiencyLevel: Float? + + @inlinable + public init(maxProficiencyLevel: Float? = nil, minProficiencyLevel: Float? = nil) { + self.maxProficiencyLevel = maxProficiencyLevel + self.minProficiencyLevel = minProficiencyLevel + } + + public func validate(name: String) throws { + try self.validate(self.maxProficiencyLevel, name: "maxProficiencyLevel", parent: name, max: 5.0) + try self.validate(self.maxProficiencyLevel, name: "maxProficiencyLevel", parent: name, min: 1.0) + try self.validate(self.minProficiencyLevel, name: "minProficiencyLevel", parent: name, max: 5.0) + try self.validate(self.minProficiencyLevel, name: "minProficiencyLevel", parent: name, min: 1.0) + } + + private enum CodingKeys: String, CodingKey { + case maxProficiencyLevel = "MaxProficiencyLevel" + case minProficiencyLevel = "MinProficiencyLevel" + } + } + public struct ReadOnlyFieldInfo: AWSEncodableShape & AWSDecodableShape { /// Identifier of the read-only field. public let id: TaskTemplateFieldIdentifier? @@ -19770,7 +19853,7 @@ extension Connect { public let fileId: String? /// The current status of the attached file. public let fileStatus: FileStatusType? - /// Information to be used while uploading the attached file. + /// The headers to be provided while uploading the file to the URL. public let uploadUrlMetadata: UploadUrlMetadata? @inlinable @@ -19802,6 +19885,8 @@ extension Connect { public let clientToken: String? /// The identifier of the flow for initiating the chat. To see the ContactFlowId in the Amazon Connect admin website, on the navigation menu go to Routing, Flows. Choose the flow. On the flow page, under the name of the flow, choose Show additional flow information. The ContactFlowId is the last part of the ARN, shown here in bold: arn:aws:connect:us-west-2:xxxxxxxxxxxx:instance/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/contact-flow/846ec553-a005-41c0-8341-xxxxxxxxxxxx public let contactFlowId: String + /// The customer's identification number. For example, the CustomerId may be a customer number from your CRM. + public let customerId: String? /// The initial message to be sent to the newly created chat. If you have a Lex bot in your flow, the initial message is not delivered to the Lex bot. public let initialMessage: ChatMessage? /// The identifier of the Amazon Connect instance. You can find the instance ID in the Amazon Resource Name (ARN) of the instance. @@ -19818,11 +19903,12 @@ extension Connect { public let supportedMessagingContentTypes: [String]? @inlinable - public init(attributes: [String: String]? = nil, chatDurationInMinutes: Int? = nil, clientToken: String? = StartChatContactRequest.idempotencyToken(), contactFlowId: String, initialMessage: ChatMessage? = nil, instanceId: String, participantDetails: ParticipantDetails, persistentChat: PersistentChat? = nil, relatedContactId: String? = nil, segmentAttributes: [String: SegmentAttributeValue]? = nil, supportedMessagingContentTypes: [String]? = nil) { + public init(attributes: [String: String]? = nil, chatDurationInMinutes: Int? = nil, clientToken: String? = StartChatContactRequest.idempotencyToken(), contactFlowId: String, customerId: String? = nil, initialMessage: ChatMessage? = nil, instanceId: String, participantDetails: ParticipantDetails, persistentChat: PersistentChat? = nil, relatedContactId: String? = nil, segmentAttributes: [String: SegmentAttributeValue]? = nil, supportedMessagingContentTypes: [String]? = nil) { self.attributes = attributes self.chatDurationInMinutes = chatDurationInMinutes self.clientToken = clientToken self.contactFlowId = contactFlowId + self.customerId = customerId self.initialMessage = initialMessage self.instanceId = instanceId self.participantDetails = participantDetails @@ -19842,6 +19928,8 @@ extension Connect { try self.validate(self.chatDurationInMinutes, name: "chatDurationInMinutes", parent: name, min: 60) try self.validate(self.clientToken, name: "clientToken", parent: name, max: 500) try self.validate(self.contactFlowId, name: "contactFlowId", parent: name, max: 500) + try self.validate(self.customerId, name: "customerId", parent: name, max: 128) + try self.validate(self.customerId, name: "customerId", parent: name, min: 1) try self.initialMessage?.validate(name: "\(name).initialMessage") try self.validate(self.instanceId, name: "instanceId", parent: name, max: 100) try self.validate(self.instanceId, name: "instanceId", parent: name, min: 1) @@ -19865,6 +19953,7 @@ extension Connect { case chatDurationInMinutes = "ChatDurationInMinutes" case clientToken = "ClientToken" case contactFlowId = "ContactFlowId" + case customerId = "CustomerId" case initialMessage = "InitialMessage" case instanceId = "InstanceId" case participantDetails = "ParticipantDetails" @@ -22760,6 +22849,55 @@ extension Connect { } } + public struct UpdateParticipantAuthenticationRequest: AWSEncodableShape { + /// The code query parameter provided by Cognito in the redirectUri. + public let code: String? + /// The error query parameter provided by Cognito in the redirectUri. + public let error: String? + /// The error_description parameter provided by Cognito in the redirectUri. + public let errorDescription: String? + /// The identifier of the Amazon Connect instance. You can find the instance ID in the Amazon Resource Name (ARN) of the instance. + public let instanceId: String + /// The state query parameter that was provided by Cognito in the redirectUri. This will also match the state parameter provided in the AuthenticationUrl from the GetAuthenticationUrl response. + public let state: String + + @inlinable + public init(code: String? = nil, error: String? = nil, errorDescription: String? = nil, instanceId: String, state: String) { + self.code = code + self.error = error + self.errorDescription = errorDescription + self.instanceId = instanceId + self.state = state + } + + public func validate(name: String) throws { + try self.validate(self.code, name: "code", parent: name, max: 2048) + try self.validate(self.code, name: "code", parent: name, min: 1) + try self.validate(self.error, name: "error", parent: name, max: 2048) + try self.validate(self.error, name: "error", parent: name, min: 1) + try self.validate(self.error, name: "error", parent: name, pattern: "^[\\x20-\\x21\\x23-\\x5B\\x5D-\\x7E]*$") + try self.validate(self.errorDescription, name: "errorDescription", parent: name, max: 2048) + try self.validate(self.errorDescription, name: "errorDescription", parent: name, min: 1) + try self.validate(self.errorDescription, name: "errorDescription", parent: name, pattern: "^[\\x20-\\x21\\x23-\\x5B\\x5D-\\x7E]*$") + try self.validate(self.instanceId, name: "instanceId", parent: name, max: 100) + try self.validate(self.instanceId, name: "instanceId", parent: name, min: 1) + try self.validate(self.state, name: "state", parent: name, max: 1000) + try self.validate(self.state, name: "state", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case code = "Code" + case error = "Error" + case errorDescription = "ErrorDescription" + case instanceId = "InstanceId" + case state = "State" + } + } + + public struct UpdateParticipantAuthenticationResponse: AWSDecodableShape { + public init() {} + } + public struct UpdateParticipantRoleConfigRequest: AWSEncodableShape { /// The Amazon Connect channel you want to configure. public let channelConfiguration: UpdateParticipantRoleConfigChannelInfo @@ -25020,7 +25158,7 @@ extension Connect { } public struct VoiceRecordingConfiguration: AWSEncodableShape { - /// Identifies which IVR track is being recorded. + /// Identifies which IVR track is being recorded. One and only one of the track configurations should be presented in the request. public let ivrRecordingTrack: IvrRecordingTrack? /// Identifies which track is being recorded. public let voiceRecordingTrack: VoiceRecordingTrack? diff --git a/Sources/Soto/Services/ConnectParticipant/ConnectParticipant_api.swift b/Sources/Soto/Services/ConnectParticipant/ConnectParticipant_api.swift index e230ec3a8f..0cf20d8289 100644 --- a/Sources/Soto/Services/ConnectParticipant/ConnectParticipant_api.swift +++ b/Sources/Soto/Services/ConnectParticipant/ConnectParticipant_api.swift @@ -25,7 +25,7 @@ import Foundation /// Service object for interacting with AWS ConnectParticipant service. /// -/// Amazon Connect is an easy-to-use omnichannel cloud contact center service that enables companies of any size to deliver superior customer service at a lower cost. Amazon Connect communications capabilities make it easy for companies to deliver personalized interactions across communication channels, including chat. Use the Amazon Connect Participant Service to manage participants (for example, agents, customers, and managers listening in), and to send messages and events within a chat contact. The APIs in the service enable the following: sending chat messages, attachment sharing, managing a participant's connection state and message events, and retrieving chat transcripts. +/// Participant Service actions Participant Service data types Amazon Connect is an easy-to-use omnichannel cloud contact center service that enables companies of any size to deliver superior customer service at a lower cost. Amazon Connect communications capabilities make it easy for companies to deliver personalized interactions across communication channels, including chat. Use the Amazon Connect Participant Service to manage participants (for example, agents, customers, and managers listening in), and to send messages and events within a chat contact. The APIs in the service enable the following: sending chat messages, attachment sharing, managing a participant's connection state and message events, and retrieving chat transcripts. public struct ConnectParticipant: AWSService { // MARK: Member variables @@ -89,7 +89,39 @@ public struct ConnectParticipant: AWSService { // MARK: API Calls - /// Allows you to confirm that the attachment has been uploaded using the pre-signed URL provided in StartAttachmentUpload API. A conflict exception is thrown when an attachment with that identifier is already being uploaded. ConnectionToken is used for invoking this API instead of ParticipantToken. The Amazon Connect Participant Service APIs do not use Signature Version 4 authentication. + /// Cancels the authentication session. The opted out branch of the Authenticate Customer flow block will be taken. The current supported channel is chat. This API is not supported for Apple Messages for Business, WhatsApp, or SMS chats. + @Sendable + @inlinable + public func cancelParticipantAuthentication(_ input: CancelParticipantAuthenticationRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CancelParticipantAuthenticationResponse { + try await self.client.execute( + operation: "CancelParticipantAuthentication", + path: "/participant/cancel-authentication", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Cancels the authentication session. The opted out branch of the Authenticate Customer flow block will be taken. The current supported channel is chat. This API is not supported for Apple Messages for Business, WhatsApp, or SMS chats. + /// + /// Parameters: + /// - connectionToken: The authentication token associated with the participant's connection. + /// - sessionId: The sessionId provided in the authenticationInitiated event. + /// - logger: Logger use during operation + @inlinable + public func cancelParticipantAuthentication( + connectionToken: String, + sessionId: String, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> CancelParticipantAuthenticationResponse { + let input = CancelParticipantAuthenticationRequest( + connectionToken: connectionToken, + sessionId: sessionId + ) + return try await self.cancelParticipantAuthentication(input, logger: logger) + } + + /// Allows you to confirm that the attachment has been uploaded using the pre-signed URL provided in StartAttachmentUpload API. A conflict exception is thrown when an attachment with that identifier is already being uploaded. For security recommendations, see Amazon Connect Chat security best practices. ConnectionToken is used for invoking this API instead of ParticipantToken. The Amazon Connect Participant Service APIs do not use Signature Version 4 authentication. @Sendable @inlinable public func completeAttachmentUpload(_ input: CompleteAttachmentUploadRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CompleteAttachmentUploadResponse { @@ -102,7 +134,7 @@ public struct ConnectParticipant: AWSService { logger: logger ) } - /// Allows you to confirm that the attachment has been uploaded using the pre-signed URL provided in StartAttachmentUpload API. A conflict exception is thrown when an attachment with that identifier is already being uploaded. ConnectionToken is used for invoking this API instead of ParticipantToken. The Amazon Connect Participant Service APIs do not use Signature Version 4 authentication. + /// Allows you to confirm that the attachment has been uploaded using the pre-signed URL provided in StartAttachmentUpload API. A conflict exception is thrown when an attachment with that identifier is already being uploaded. For security recommendations, see Amazon Connect Chat security best practices. ConnectionToken is used for invoking this API instead of ParticipantToken. The Amazon Connect Participant Service APIs do not use Signature Version 4 authentication. /// /// Parameters: /// - attachmentIds: A list of unique identifiers for the attachments. @@ -124,7 +156,7 @@ public struct ConnectParticipant: AWSService { return try await self.completeAttachmentUpload(input, logger: logger) } - /// Creates the participant's connection. ParticipantToken is used for invoking this API instead of ConnectionToken. The participant token is valid for the lifetime of the participant – until they are part of a contact. The response URL for WEBSOCKET Type has a connect expiry timeout of 100s. Clients must manually connect to the returned websocket URL and subscribe to the desired topic. For chat, you need to publish the following on the established websocket connection: {"topic":"aws/subscribe","content":{"topics":["aws/chat"]}} Upon websocket URL expiry, as specified in the response ConnectionExpiry parameter, clients need to call this API again to obtain a new websocket URL and perform the same steps as before. Message streaming support: This API can also be used together with the StartContactStreaming API to create a participant connection for chat contacts that are not using a websocket. For more information about message streaming, Enable real-time chat message streaming in the Amazon Connect Administrator Guide. Feature specifications: For information about feature specifications, such as the allowed number of open websocket connections per participant, see Feature specifications in the Amazon Connect Administrator Guide. The Amazon Connect Participant Service APIs do not use Signature Version 4 authentication. + /// Creates the participant's connection. For security recommendations, see Amazon Connect Chat security best practices. ParticipantToken is used for invoking this API instead of ConnectionToken. The participant token is valid for the lifetime of the participant – until they are part of a contact. The response URL for WEBSOCKET Type has a connect expiry timeout of 100s. Clients must manually connect to the returned websocket URL and subscribe to the desired topic. For chat, you need to publish the following on the established websocket connection: {"topic":"aws/subscribe","content":{"topics":["aws/chat"]}} Upon websocket URL expiry, as specified in the response ConnectionExpiry parameter, clients need to call this API again to obtain a new websocket URL and perform the same steps as before. Message streaming support: This API can also be used together with the StartContactStreaming API to create a participant connection for chat contacts that are not using a websocket. For more information about message streaming, Enable real-time chat message streaming in the Amazon Connect Administrator Guide. Feature specifications: For information about feature specifications, such as the allowed number of open websocket connections per participant, see Feature specifications in the Amazon Connect Administrator Guide. The Amazon Connect Participant Service APIs do not use Signature Version 4 authentication. @Sendable @inlinable public func createParticipantConnection(_ input: CreateParticipantConnectionRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateParticipantConnectionResponse { @@ -137,7 +169,7 @@ public struct ConnectParticipant: AWSService { logger: logger ) } - /// Creates the participant's connection. ParticipantToken is used for invoking this API instead of ConnectionToken. The participant token is valid for the lifetime of the participant – until they are part of a contact. The response URL for WEBSOCKET Type has a connect expiry timeout of 100s. Clients must manually connect to the returned websocket URL and subscribe to the desired topic. For chat, you need to publish the following on the established websocket connection: {"topic":"aws/subscribe","content":{"topics":["aws/chat"]}} Upon websocket URL expiry, as specified in the response ConnectionExpiry parameter, clients need to call this API again to obtain a new websocket URL and perform the same steps as before. Message streaming support: This API can also be used together with the StartContactStreaming API to create a participant connection for chat contacts that are not using a websocket. For more information about message streaming, Enable real-time chat message streaming in the Amazon Connect Administrator Guide. Feature specifications: For information about feature specifications, such as the allowed number of open websocket connections per participant, see Feature specifications in the Amazon Connect Administrator Guide. The Amazon Connect Participant Service APIs do not use Signature Version 4 authentication. + /// Creates the participant's connection. For security recommendations, see Amazon Connect Chat security best practices. ParticipantToken is used for invoking this API instead of ConnectionToken. The participant token is valid for the lifetime of the participant – until they are part of a contact. The response URL for WEBSOCKET Type has a connect expiry timeout of 100s. Clients must manually connect to the returned websocket URL and subscribe to the desired topic. For chat, you need to publish the following on the established websocket connection: {"topic":"aws/subscribe","content":{"topics":["aws/chat"]}} Upon websocket URL expiry, as specified in the response ConnectionExpiry parameter, clients need to call this API again to obtain a new websocket URL and perform the same steps as before. Message streaming support: This API can also be used together with the StartContactStreaming API to create a participant connection for chat contacts that are not using a websocket. For more information about message streaming, Enable real-time chat message streaming in the Amazon Connect Administrator Guide. Feature specifications: For information about feature specifications, such as the allowed number of open websocket connections per participant, see Feature specifications in the Amazon Connect Administrator Guide. The Amazon Connect Participant Service APIs do not use Signature Version 4 authentication. /// /// Parameters: /// - connectParticipant: Amazon Connect Participant is used to mark the participant as connected for customer participant in message streaming, as well as for agent or manager participant in non-streaming chats. @@ -159,7 +191,7 @@ public struct ConnectParticipant: AWSService { return try await self.createParticipantConnection(input, logger: logger) } - /// Retrieves the view for the specified view token. + /// Retrieves the view for the specified view token. For security recommendations, see Amazon Connect Chat security best practices. @Sendable @inlinable public func describeView(_ input: DescribeViewRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeViewResponse { @@ -172,7 +204,7 @@ public struct ConnectParticipant: AWSService { logger: logger ) } - /// Retrieves the view for the specified view token. + /// Retrieves the view for the specified view token. For security recommendations, see Amazon Connect Chat security best practices. /// /// Parameters: /// - connectionToken: The connection token. @@ -191,7 +223,7 @@ public struct ConnectParticipant: AWSService { return try await self.describeView(input, logger: logger) } - /// Disconnects a participant. ConnectionToken is used for invoking this API instead of ParticipantToken. The Amazon Connect Participant Service APIs do not use Signature Version 4 authentication. + /// Disconnects a participant. For security recommendations, see Amazon Connect Chat security best practices. ConnectionToken is used for invoking this API instead of ParticipantToken. The Amazon Connect Participant Service APIs do not use Signature Version 4 authentication. @Sendable @inlinable public func disconnectParticipant(_ input: DisconnectParticipantRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DisconnectParticipantResponse { @@ -204,7 +236,7 @@ public struct ConnectParticipant: AWSService { logger: logger ) } - /// Disconnects a participant. ConnectionToken is used for invoking this API instead of ParticipantToken. The Amazon Connect Participant Service APIs do not use Signature Version 4 authentication. + /// Disconnects a participant. For security recommendations, see Amazon Connect Chat security best practices. ConnectionToken is used for invoking this API instead of ParticipantToken. The Amazon Connect Participant Service APIs do not use Signature Version 4 authentication. /// /// Parameters: /// - clientToken: A unique, case-sensitive identifier that you provide to ensure the idempotency of the request. If not provided, the Amazon Web Services SDK populates this field. For more information about idempotency, see Making retries safe with idempotent APIs. @@ -223,7 +255,7 @@ public struct ConnectParticipant: AWSService { return try await self.disconnectParticipant(input, logger: logger) } - /// Provides a pre-signed URL for download of a completed attachment. This is an asynchronous API for use with active contacts. ConnectionToken is used for invoking this API instead of ParticipantToken. The Amazon Connect Participant Service APIs do not use Signature Version 4 authentication. + /// Provides a pre-signed URL for download of a completed attachment. This is an asynchronous API for use with active contacts. For security recommendations, see Amazon Connect Chat security best practices. ConnectionToken is used for invoking this API instead of ParticipantToken. The Amazon Connect Participant Service APIs do not use Signature Version 4 authentication. @Sendable @inlinable public func getAttachment(_ input: GetAttachmentRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetAttachmentResponse { @@ -236,26 +268,64 @@ public struct ConnectParticipant: AWSService { logger: logger ) } - /// Provides a pre-signed URL for download of a completed attachment. This is an asynchronous API for use with active contacts. ConnectionToken is used for invoking this API instead of ParticipantToken. The Amazon Connect Participant Service APIs do not use Signature Version 4 authentication. + /// Provides a pre-signed URL for download of a completed attachment. This is an asynchronous API for use with active contacts. For security recommendations, see Amazon Connect Chat security best practices. ConnectionToken is used for invoking this API instead of ParticipantToken. The Amazon Connect Participant Service APIs do not use Signature Version 4 authentication. /// /// Parameters: /// - attachmentId: A unique identifier for the attachment. /// - connectionToken: The authentication token associated with the participant's connection. + /// - urlExpiryInSeconds: The expiration time of the URL in ISO timestamp. It's specified in ISO 8601 format: yyyy-MM-ddThh:mm:ss.SSSZ. For example, 2019-11-08T02:41:28.172Z. /// - logger: Logger use during operation @inlinable public func getAttachment( attachmentId: String, connectionToken: String, + urlExpiryInSeconds: Int? = nil, logger: Logger = AWSClient.loggingDisabled ) async throws -> GetAttachmentResponse { let input = GetAttachmentRequest( attachmentId: attachmentId, - connectionToken: connectionToken + connectionToken: connectionToken, + urlExpiryInSeconds: urlExpiryInSeconds ) return try await self.getAttachment(input, logger: logger) } - /// Retrieves a transcript of the session, including details about any attachments. For information about accessing past chat contact transcripts for a persistent chat, see Enable persistent chat. If you have a process that consumes events in the transcript of an chat that has ended, note that chat transcripts contain the following event content types if the event has occurred during the chat session: application/vnd.amazonaws.connect.event.participant.left application/vnd.amazonaws.connect.event.participant.joined application/vnd.amazonaws.connect.event.chat.ended application/vnd.amazonaws.connect.event.transfer.succeeded application/vnd.amazonaws.connect.event.transfer.failed ConnectionToken is used for invoking this API instead of ParticipantToken. The Amazon Connect Participant Service APIs do not use Signature Version 4 authentication. + /// Retrieves the AuthenticationUrl for the current authentication session for the AuthenticateCustomer flow block. For security recommendations, see Amazon Connect Chat security best practices. This API can only be called within one minute of receiving the authenticationInitiated event. The current supported channel is chat. This API is not supported for Apple Messages for Business, WhatsApp, or SMS chats. + @Sendable + @inlinable + public func getAuthenticationUrl(_ input: GetAuthenticationUrlRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetAuthenticationUrlResponse { + try await self.client.execute( + operation: "GetAuthenticationUrl", + path: "/participant/authentication-url", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Retrieves the AuthenticationUrl for the current authentication session for the AuthenticateCustomer flow block. For security recommendations, see Amazon Connect Chat security best practices. This API can only be called within one minute of receiving the authenticationInitiated event. The current supported channel is chat. This API is not supported for Apple Messages for Business, WhatsApp, or SMS chats. + /// + /// Parameters: + /// - connectionToken: The authentication token associated with the participant's connection. + /// - redirectUri: The URL where the customer will be redirected after Amazon Cognito authorizes the user. + /// - sessionId: The sessionId provided in the authenticationInitiated event. + /// - logger: Logger use during operation + @inlinable + public func getAuthenticationUrl( + connectionToken: String, + redirectUri: String, + sessionId: String, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> GetAuthenticationUrlResponse { + let input = GetAuthenticationUrlRequest( + connectionToken: connectionToken, + redirectUri: redirectUri, + sessionId: sessionId + ) + return try await self.getAuthenticationUrl(input, logger: logger) + } + + /// Retrieves a transcript of the session, including details about any attachments. For information about accessing past chat contact transcripts for a persistent chat, see Enable persistent chat. For security recommendations, see Amazon Connect Chat security best practices. If you have a process that consumes events in the transcript of an chat that has ended, note that chat transcripts contain the following event content types if the event has occurred during the chat session: application/vnd.amazonaws.connect.event.participant.left application/vnd.amazonaws.connect.event.participant.joined application/vnd.amazonaws.connect.event.chat.ended application/vnd.amazonaws.connect.event.transfer.succeeded application/vnd.amazonaws.connect.event.transfer.failed ConnectionToken is used for invoking this API instead of ParticipantToken. The Amazon Connect Participant Service APIs do not use Signature Version 4 authentication. @Sendable @inlinable public func getTranscript(_ input: GetTranscriptRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetTranscriptResponse { @@ -268,7 +338,7 @@ public struct ConnectParticipant: AWSService { logger: logger ) } - /// Retrieves a transcript of the session, including details about any attachments. For information about accessing past chat contact transcripts for a persistent chat, see Enable persistent chat. If you have a process that consumes events in the transcript of an chat that has ended, note that chat transcripts contain the following event content types if the event has occurred during the chat session: application/vnd.amazonaws.connect.event.participant.left application/vnd.amazonaws.connect.event.participant.joined application/vnd.amazonaws.connect.event.chat.ended application/vnd.amazonaws.connect.event.transfer.succeeded application/vnd.amazonaws.connect.event.transfer.failed ConnectionToken is used for invoking this API instead of ParticipantToken. The Amazon Connect Participant Service APIs do not use Signature Version 4 authentication. + /// Retrieves a transcript of the session, including details about any attachments. For information about accessing past chat contact transcripts for a persistent chat, see Enable persistent chat. For security recommendations, see Amazon Connect Chat security best practices. If you have a process that consumes events in the transcript of an chat that has ended, note that chat transcripts contain the following event content types if the event has occurred during the chat session: application/vnd.amazonaws.connect.event.participant.left application/vnd.amazonaws.connect.event.participant.joined application/vnd.amazonaws.connect.event.chat.ended application/vnd.amazonaws.connect.event.transfer.succeeded application/vnd.amazonaws.connect.event.transfer.failed ConnectionToken is used for invoking this API instead of ParticipantToken. The Amazon Connect Participant Service APIs do not use Signature Version 4 authentication. /// /// Parameters: /// - connectionToken: The authentication token associated with the participant's connection. @@ -302,7 +372,7 @@ public struct ConnectParticipant: AWSService { return try await self.getTranscript(input, logger: logger) } - /// The application/vnd.amazonaws.connect.event.connection.acknowledged ContentType will no longer be supported starting December 31, 2024. This event has been migrated to the CreateParticipantConnection API using the ConnectParticipant field. Sends an event. Message receipts are not supported when there are more than two active participants in the chat. Using the SendEvent API for message receipts when a supervisor is barged-in will result in a conflict exception. ConnectionToken is used for invoking this API instead of ParticipantToken. The Amazon Connect Participant Service APIs do not use Signature Version 4 authentication. + /// The application/vnd.amazonaws.connect.event.connection.acknowledged ContentType will no longer be supported starting December 31, 2024. This event has been migrated to the CreateParticipantConnection API using the ConnectParticipant field. Sends an event. Message receipts are not supported when there are more than two active participants in the chat. Using the SendEvent API for message receipts when a supervisor is barged-in will result in a conflict exception. For security recommendations, see Amazon Connect Chat security best practices. ConnectionToken is used for invoking this API instead of ParticipantToken. The Amazon Connect Participant Service APIs do not use Signature Version 4 authentication. @Sendable @inlinable public func sendEvent(_ input: SendEventRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> SendEventResponse { @@ -315,7 +385,7 @@ public struct ConnectParticipant: AWSService { logger: logger ) } - /// The application/vnd.amazonaws.connect.event.connection.acknowledged ContentType will no longer be supported starting December 31, 2024. This event has been migrated to the CreateParticipantConnection API using the ConnectParticipant field. Sends an event. Message receipts are not supported when there are more than two active participants in the chat. Using the SendEvent API for message receipts when a supervisor is barged-in will result in a conflict exception. ConnectionToken is used for invoking this API instead of ParticipantToken. The Amazon Connect Participant Service APIs do not use Signature Version 4 authentication. + /// The application/vnd.amazonaws.connect.event.connection.acknowledged ContentType will no longer be supported starting December 31, 2024. This event has been migrated to the CreateParticipantConnection API using the ConnectParticipant field. Sends an event. Message receipts are not supported when there are more than two active participants in the chat. Using the SendEvent API for message receipts when a supervisor is barged-in will result in a conflict exception. For security recommendations, see Amazon Connect Chat security best practices. ConnectionToken is used for invoking this API instead of ParticipantToken. The Amazon Connect Participant Service APIs do not use Signature Version 4 authentication. /// /// Parameters: /// - clientToken: A unique, case-sensitive identifier that you provide to ensure the idempotency of the request. If not provided, the Amazon Web Services SDK populates this field. For more information about idempotency, see Making retries safe with idempotent APIs. @@ -340,7 +410,7 @@ public struct ConnectParticipant: AWSService { return try await self.sendEvent(input, logger: logger) } - /// Sends a message. ConnectionToken is used for invoking this API instead of ParticipantToken. The Amazon Connect Participant Service APIs do not use Signature Version 4 authentication. + /// Sends a message. For security recommendations, see Amazon Connect Chat security best practices. ConnectionToken is used for invoking this API instead of ParticipantToken. The Amazon Connect Participant Service APIs do not use Signature Version 4 authentication. @Sendable @inlinable public func sendMessage(_ input: SendMessageRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> SendMessageResponse { @@ -353,7 +423,7 @@ public struct ConnectParticipant: AWSService { logger: logger ) } - /// Sends a message. ConnectionToken is used for invoking this API instead of ParticipantToken. The Amazon Connect Participant Service APIs do not use Signature Version 4 authentication. + /// Sends a message. For security recommendations, see Amazon Connect Chat security best practices. ConnectionToken is used for invoking this API instead of ParticipantToken. The Amazon Connect Participant Service APIs do not use Signature Version 4 authentication. /// /// Parameters: /// - clientToken: A unique, case-sensitive identifier that you provide to ensure the idempotency of the request. If not provided, the Amazon Web Services SDK populates this field. For more information about idempotency, see Making retries safe with idempotent APIs. @@ -378,7 +448,7 @@ public struct ConnectParticipant: AWSService { return try await self.sendMessage(input, logger: logger) } - /// Provides a pre-signed Amazon S3 URL in response for uploading the file directly to S3. ConnectionToken is used for invoking this API instead of ParticipantToken. The Amazon Connect Participant Service APIs do not use Signature Version 4 authentication. + /// Provides a pre-signed Amazon S3 URL in response for uploading the file directly to S3. For security recommendations, see Amazon Connect Chat security best practices. ConnectionToken is used for invoking this API instead of ParticipantToken. The Amazon Connect Participant Service APIs do not use Signature Version 4 authentication. @Sendable @inlinable public func startAttachmentUpload(_ input: StartAttachmentUploadRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> StartAttachmentUploadResponse { @@ -391,7 +461,7 @@ public struct ConnectParticipant: AWSService { logger: logger ) } - /// Provides a pre-signed Amazon S3 URL in response for uploading the file directly to S3. ConnectionToken is used for invoking this API instead of ParticipantToken. The Amazon Connect Participant Service APIs do not use Signature Version 4 authentication. + /// Provides a pre-signed Amazon S3 URL in response for uploading the file directly to S3. For security recommendations, see Amazon Connect Chat security best practices. ConnectionToken is used for invoking this API instead of ParticipantToken. The Amazon Connect Participant Service APIs do not use Signature Version 4 authentication. /// /// Parameters: /// - attachmentName: A case-sensitive name of the attachment being uploaded. diff --git a/Sources/Soto/Services/ConnectParticipant/ConnectParticipant_shapes.swift b/Sources/Soto/Services/ConnectParticipant/ConnectParticipant_shapes.swift index bc686afc12..a2fe0f9cca 100644 --- a/Sources/Soto/Services/ConnectParticipant/ConnectParticipant_shapes.swift +++ b/Sources/Soto/Services/ConnectParticipant/ConnectParticipant_shapes.swift @@ -104,6 +104,41 @@ extension ConnectParticipant { } } + public struct CancelParticipantAuthenticationRequest: AWSEncodableShape { + /// The authentication token associated with the participant's connection. + public let connectionToken: String + /// The sessionId provided in the authenticationInitiated event. + public let sessionId: String + + @inlinable + public init(connectionToken: String, sessionId: String) { + self.connectionToken = connectionToken + self.sessionId = sessionId + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + var container = encoder.container(keyedBy: CodingKeys.self) + request.encodeHeader(self.connectionToken, key: "X-Amz-Bearer") + try container.encode(self.sessionId, forKey: .sessionId) + } + + public func validate(name: String) throws { + try self.validate(self.connectionToken, name: "connectionToken", parent: name, max: 1000) + try self.validate(self.connectionToken, name: "connectionToken", parent: name, min: 1) + try self.validate(self.sessionId, name: "sessionId", parent: name, max: 36) + try self.validate(self.sessionId, name: "sessionId", parent: name, min: 36) + } + + private enum CodingKeys: String, CodingKey { + case sessionId = "SessionId" + } + } + + public struct CancelParticipantAuthenticationResponse: AWSDecodableShape { + public init() {} + } + public struct CompleteAttachmentUploadRequest: AWSEncodableShape { /// A list of unique identifiers for the attachments. public let attachmentIds: [String] @@ -303,11 +338,14 @@ extension ConnectParticipant { public let attachmentId: String /// The authentication token associated with the participant's connection. public let connectionToken: String + /// The expiration time of the URL in ISO timestamp. It's specified in ISO 8601 format: yyyy-MM-ddThh:mm:ss.SSSZ. For example, 2019-11-08T02:41:28.172Z. + public let urlExpiryInSeconds: Int? @inlinable - public init(attachmentId: String, connectionToken: String) { + public init(attachmentId: String, connectionToken: String, urlExpiryInSeconds: Int? = nil) { self.attachmentId = attachmentId self.connectionToken = connectionToken + self.urlExpiryInSeconds = urlExpiryInSeconds } public func encode(to encoder: Encoder) throws { @@ -315,6 +353,7 @@ extension ConnectParticipant { var container = encoder.container(keyedBy: CodingKeys.self) try container.encode(self.attachmentId, forKey: .attachmentId) request.encodeHeader(self.connectionToken, key: "X-Amz-Bearer") + try container.encodeIfPresent(self.urlExpiryInSeconds, forKey: .urlExpiryInSeconds) } public func validate(name: String) throws { @@ -322,14 +361,19 @@ extension ConnectParticipant { try self.validate(self.attachmentId, name: "attachmentId", parent: name, min: 1) try self.validate(self.connectionToken, name: "connectionToken", parent: name, max: 1000) try self.validate(self.connectionToken, name: "connectionToken", parent: name, min: 1) + try self.validate(self.urlExpiryInSeconds, name: "urlExpiryInSeconds", parent: name, max: 300) + try self.validate(self.urlExpiryInSeconds, name: "urlExpiryInSeconds", parent: name, min: 5) } private enum CodingKeys: String, CodingKey { case attachmentId = "AttachmentId" + case urlExpiryInSeconds = "UrlExpiryInSeconds" } } public struct GetAttachmentResponse: AWSDecodableShape { + /// The size of the attachment in bytes. + public let attachmentSizeInBytes: Int64 /// This is the pre-signed URL that can be used for uploading the file to Amazon S3 when used in response /// to StartAttachmentUpload. public let url: String? @@ -337,17 +381,71 @@ extension ConnectParticipant { public let urlExpiry: String? @inlinable - public init(url: String? = nil, urlExpiry: String? = nil) { + public init(attachmentSizeInBytes: Int64, url: String? = nil, urlExpiry: String? = nil) { + self.attachmentSizeInBytes = attachmentSizeInBytes self.url = url self.urlExpiry = urlExpiry } private enum CodingKeys: String, CodingKey { + case attachmentSizeInBytes = "AttachmentSizeInBytes" case url = "Url" case urlExpiry = "UrlExpiry" } } + public struct GetAuthenticationUrlRequest: AWSEncodableShape { + /// The authentication token associated with the participant's connection. + public let connectionToken: String + /// The URL where the customer will be redirected after Amazon Cognito authorizes the user. + public let redirectUri: String + /// The sessionId provided in the authenticationInitiated event. + public let sessionId: String + + @inlinable + public init(connectionToken: String, redirectUri: String, sessionId: String) { + self.connectionToken = connectionToken + self.redirectUri = redirectUri + self.sessionId = sessionId + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + var container = encoder.container(keyedBy: CodingKeys.self) + request.encodeHeader(self.connectionToken, key: "X-Amz-Bearer") + try container.encode(self.redirectUri, forKey: .redirectUri) + try container.encode(self.sessionId, forKey: .sessionId) + } + + public func validate(name: String) throws { + try self.validate(self.connectionToken, name: "connectionToken", parent: name, max: 1000) + try self.validate(self.connectionToken, name: "connectionToken", parent: name, min: 1) + try self.validate(self.redirectUri, name: "redirectUri", parent: name, max: 1024) + try self.validate(self.redirectUri, name: "redirectUri", parent: name, min: 1) + try self.validate(self.sessionId, name: "sessionId", parent: name, max: 36) + try self.validate(self.sessionId, name: "sessionId", parent: name, min: 36) + } + + private enum CodingKeys: String, CodingKey { + case redirectUri = "RedirectUri" + case sessionId = "SessionId" + } + } + + public struct GetAuthenticationUrlResponse: AWSDecodableShape { + /// The URL where the customer will sign in to the identity provider. This URL contains the authorize endpoint for the Cognito UserPool used in the authentication. + public let authenticationUrl: String? + + @inlinable + public init(authenticationUrl: String? = nil) { + self.authenticationUrl = authenticationUrl + } + + private enum CodingKeys: String, CodingKey { + case authenticationUrl = "AuthenticationUrl" + } + } + public struct GetTranscriptRequest: AWSEncodableShape { /// The authentication token associated with the participant's connection. public let connectionToken: String @@ -707,7 +805,7 @@ extension ConnectParticipant { public struct StartAttachmentUploadResponse: AWSDecodableShape { /// A unique identifier for the attachment. public let attachmentId: String? - /// Fields to be used while uploading the attachment. + /// The headers to be provided while uploading the file to the URL. public let uploadMetadata: UploadMetadata? @inlinable diff --git a/Sources/Soto/Services/CostExplorer/CostExplorer_api.swift b/Sources/Soto/Services/CostExplorer/CostExplorer_api.swift index a6b144d255..9866a24905 100644 --- a/Sources/Soto/Services/CostExplorer/CostExplorer_api.swift +++ b/Sources/Soto/Services/CostExplorer/CostExplorer_api.swift @@ -517,6 +517,7 @@ public struct CostExplorer: AWSService { /// Retrieves cost and usage metrics for your account. You can specify which cost and usage-related metric that you want the request to return. For example, you can specify BlendedCosts or UsageQuantity. You can also filter and group your data by various dimensions, such as SERVICE or AZ, in a specific time range. For a complete list of valid dimensions, see the GetDimensionValues operation. Management account in an organization in Organizations have access to all member accounts. For information about filter limitations, see Quotas and restrictions in the Billing and Cost Management User Guide. /// /// Parameters: + /// - billingViewArn: The Amazon Resource Name (ARN) that uniquely identifies a specific billing view. The ARN is used to specify which particular billing view you want to interact with or retrieve information from when making API calls related to Amazon Web Services Billing and Cost Management features. The BillingViewArn can be retrieved by calling the ListBillingViews API. /// - filter: Filters Amazon Web Services costs by different dimensions. For example, you can specify SERVICE and LINKED_ACCOUNT and get the costs that are associated with that account's usage of that service. You can nest Expression objects to define any combination of dimension filters. For more information, see Expression. Valid values for MatchOptions for Dimensions are EQUALS and CASE_SENSITIVE. Valid values for MatchOptions for CostCategories and Tags are EQUALS, ABSENT, and CASE_SENSITIVE. Default values are EQUALS and CASE_SENSITIVE. /// - granularity: Sets the Amazon Web Services cost granularity to MONTHLY or DAILY, or HOURLY. If Granularity isn't set, the response object doesn't include the Granularity, either MONTHLY or DAILY, or HOURLY. /// - groupBy: You can group Amazon Web Services costs using up to two different groups, either dimensions, tag keys, cost categories, or any two group by types. Valid values for the DIMENSION type are AZ, INSTANCE_TYPE, LEGAL_ENTITY_NAME, INVOICING_ENTITY, LINKED_ACCOUNT, OPERATION, PLATFORM, PURCHASE_TYPE, SERVICE, TENANCY, RECORD_TYPE, and USAGE_TYPE. When you group by the TAG type and include a valid tag key, you get all tag values, including empty strings. @@ -526,6 +527,7 @@ public struct CostExplorer: AWSService { /// - logger: Logger use during operation @inlinable public func getCostAndUsage( + billingViewArn: String? = nil, filter: Expression? = nil, granularity: Granularity, groupBy: [GroupDefinition]? = nil, @@ -535,6 +537,7 @@ public struct CostExplorer: AWSService { logger: Logger = AWSClient.loggingDisabled ) async throws -> GetCostAndUsageResponse { let input = GetCostAndUsageRequest( + billingViewArn: billingViewArn, filter: filter, granularity: granularity, groupBy: groupBy, @@ -561,6 +564,7 @@ public struct CostExplorer: AWSService { /// Retrieves cost and usage metrics with resources for your account. You can specify which cost and usage-related metric, such as BlendedCosts or UsageQuantity, that you want the request to return. You can also filter and group your data by various dimensions, such as SERVICE or AZ, in a specific time range. For a complete list of valid dimensions, see the GetDimensionValues operation. Management account in an organization in Organizations have access to all member accounts. Hourly granularity is only available for EC2-Instances (Elastic Compute Cloud) resource-level data. All other resource-level data is available at daily granularity. This is an opt-in only feature. You can enable this feature from the Cost Explorer Settings page. For information about how to access the Settings page, see Controlling Access for Cost Explorer in the Billing and Cost Management User Guide. /// /// Parameters: + /// - billingViewArn: The Amazon Resource Name (ARN) that uniquely identifies a specific billing view. The ARN is used to specify which particular billing view you want to interact with or retrieve information from when making API calls related to Amazon Web Services Billing and Cost Management features. The BillingViewArn can be retrieved by calling the ListBillingViews API. /// - filter: Filters Amazon Web Services costs by different dimensions. For example, you can specify SERVICE and LINKED_ACCOUNT and get the costs that are associated with that account's usage of that service. You can nest Expression objects to define any combination of dimension filters. For more information, see Expression. The GetCostAndUsageWithResources operation requires that you either group by or filter by a ResourceId. It requires the Expression "SERVICE = Amazon Elastic Compute Cloud - Compute" in the filter. Valid values for MatchOptions for Dimensions are EQUALS and CASE_SENSITIVE. Valid values for MatchOptions for CostCategories and Tags are EQUALS, ABSENT, and CASE_SENSITIVE. Default values are EQUALS and CASE_SENSITIVE. /// - granularity: Sets the Amazon Web Services cost granularity to MONTHLY, DAILY, or HOURLY. If Granularity isn't set, the response object doesn't include the Granularity, MONTHLY, DAILY, or HOURLY. /// - groupBy: You can group Amazon Web Services costs using up to two different groups: DIMENSION, TAG, COST_CATEGORY. @@ -570,6 +574,7 @@ public struct CostExplorer: AWSService { /// - logger: Logger use during operation @inlinable public func getCostAndUsageWithResources( + billingViewArn: String? = nil, filter: Expression, granularity: Granularity, groupBy: [GroupDefinition]? = nil, @@ -579,6 +584,7 @@ public struct CostExplorer: AWSService { logger: Logger = AWSClient.loggingDisabled ) async throws -> GetCostAndUsageWithResourcesResponse { let input = GetCostAndUsageWithResourcesRequest( + billingViewArn: billingViewArn, filter: filter, granularity: granularity, groupBy: groupBy, @@ -605,6 +611,7 @@ public struct CostExplorer: AWSService { /// Retrieves an array of Cost Category names and values incurred cost. If some Cost Category names and values are not associated with any cost, they will not be returned by this API. /// /// Parameters: + /// - billingViewArn: The Amazon Resource Name (ARN) that uniquely identifies a specific billing view. The ARN is used to specify which particular billing view you want to interact with or retrieve information from when making API calls related to Amazon Web Services Billing and Cost Management features. The BillingViewArn can be retrieved by calling the ListBillingViews API. /// - costCategoryName: /// - filter: /// - maxResults: This field is only used when the SortBy value is provided in the request. The maximum number of objects that are returned for this request. If MaxResults isn't specified with the SortBy value, the request returns 1000 results as the default value for this parameter. For GetCostCategories, MaxResults has an upper quota of 1000. @@ -615,6 +622,7 @@ public struct CostExplorer: AWSService { /// - logger: Logger use during operation @inlinable public func getCostCategories( + billingViewArn: String? = nil, costCategoryName: String? = nil, filter: Expression? = nil, maxResults: Int? = nil, @@ -625,6 +633,7 @@ public struct CostExplorer: AWSService { logger: Logger = AWSClient.loggingDisabled ) async throws -> GetCostCategoriesResponse { let input = GetCostCategoriesRequest( + billingViewArn: billingViewArn, costCategoryName: costCategoryName, filter: filter, maxResults: maxResults, @@ -652,6 +661,7 @@ public struct CostExplorer: AWSService { /// Retrieves a forecast for how much Amazon Web Services predicts that you will spend over the forecast time period that you select, based on your past costs. /// /// Parameters: + /// - billingViewArn: The Amazon Resource Name (ARN) that uniquely identifies a specific billing view. The ARN is used to specify which particular billing view you want to interact with or retrieve information from when making API calls related to Amazon Web Services Billing and Cost Management features. The BillingViewArn can be retrieved by calling the ListBillingViews API. /// - filter: The filters that you want to use to filter your forecast. The GetCostForecast API supports filtering by the following dimensions: AZ INSTANCE_TYPE LINKED_ACCOUNT LINKED_ACCOUNT_NAME OPERATION PURCHASE_TYPE REGION SERVICE USAGE_TYPE USAGE_TYPE_GROUP RECORD_TYPE OPERATING_SYSTEM TENANCY SCOPE PLATFORM SUBSCRIPTION_ID LEGAL_ENTITY_NAME DEPLOYMENT_OPTION DATABASE_ENGINE INSTANCE_TYPE_FAMILY BILLING_ENTITY RESERVATION_ID SAVINGS_PLAN_ARN /// - granularity: How granular you want the forecast to be. You can get 3 months of DAILY forecasts or 12 months of MONTHLY forecasts. The GetCostForecast operation supports only DAILY and MONTHLY granularities. /// - metric: Which metric Cost Explorer uses to create your forecast. For more information about blended and unblended rates, see Why does the "blended" annotation appear on some line items in my bill?. Valid values for a GetCostForecast call are the following: AMORTIZED_COST BLENDED_COST NET_AMORTIZED_COST NET_UNBLENDED_COST UNBLENDED_COST @@ -660,6 +670,7 @@ public struct CostExplorer: AWSService { /// - logger: Logger use during operation @inlinable public func getCostForecast( + billingViewArn: String? = nil, filter: Expression? = nil, granularity: Granularity, metric: Metric, @@ -668,6 +679,7 @@ public struct CostExplorer: AWSService { logger: Logger = AWSClient.loggingDisabled ) async throws -> GetCostForecastResponse { let input = GetCostForecastRequest( + billingViewArn: billingViewArn, filter: filter, granularity: granularity, metric: metric, @@ -693,6 +705,7 @@ public struct CostExplorer: AWSService { /// Retrieves all available filter values for a specified filter over a period of time. You can search the dimension values for an arbitrary string. /// /// Parameters: + /// - billingViewArn: The Amazon Resource Name (ARN) that uniquely identifies a specific billing view. The ARN is used to specify which particular billing view you want to interact with or retrieve information from when making API calls related to Amazon Web Services Billing and Cost Management features. The BillingViewArn can be retrieved by calling the ListBillingViews API. /// - context: The context for the call to GetDimensionValues. This can be RESERVATIONS or COST_AND_USAGE. The default value is COST_AND_USAGE. If the context is set to RESERVATIONS, the resulting dimension values can be used in the GetReservationUtilization operation. If the context is set to COST_AND_USAGE, the resulting dimension values can be used in the GetCostAndUsage operation. If you set the context to COST_AND_USAGE, you can use the following dimensions for searching: AZ - The Availability Zone. An example is us-east-1a. BILLING_ENTITY - The Amazon Web Services seller that your account is with. Possible values are the following: - Amazon Web Services(Amazon Web Services): The entity that sells Amazon Web Services services. - AISPL (Amazon Internet Services Pvt. Ltd.): The local Indian entity that's an acting reseller for Amazon Web Services services in India. - Amazon Web Services Marketplace: The entity that supports the sale of solutions that are built on Amazon Web Services by third-party software providers. CACHE_ENGINE - The Amazon ElastiCache operating system. Examples are Windows or Linux. DEPLOYMENT_OPTION - The scope of Amazon Relational Database Service deployments. Valid values are SingleAZ and MultiAZ. DATABASE_ENGINE - The Amazon Relational Database Service database. Examples are Aurora or MySQL. INSTANCE_TYPE - The type of Amazon EC2 instance. An example is m4.xlarge. INSTANCE_TYPE_FAMILY - A family of instance types optimized to fit different use cases. Examples are Compute Optimized (for example, C4, C5, C6g, and C7g), Memory Optimization (for example, R4, R5n, R5b, and R6g). INVOICING_ENTITY - The name of the entity that issues the Amazon Web Services invoice. LEGAL_ENTITY_NAME - The name of the organization that sells you Amazon Web Services services, such as Amazon Web Services. LINKED_ACCOUNT - The description in the attribute map that includes the full name of the member account. The value field contains the Amazon Web Services ID of the member account. OPERATING_SYSTEM - The operating system. Examples are Windows or Linux. OPERATION - The action performed. Examples include RunInstance and CreateBucket. PLATFORM - The Amazon EC2 operating system. Examples are Windows or Linux. PURCHASE_TYPE - The reservation type of the purchase that this usage is related to. Examples include On-Demand Instances and Standard Reserved Instances. RESERVATION_ID - The unique identifier for an Amazon Web Services Reservation Instance. SAVINGS_PLAN_ARN - The unique identifier for your Savings Plans. SAVINGS_PLANS_TYPE - Type of Savings Plans (EC2 Instance or Compute). SERVICE - The Amazon Web Services service such as Amazon DynamoDB. TENANCY - The tenancy of a resource. Examples are shared or dedicated. USAGE_TYPE - The type of usage. An example is DataTransfer-In-Bytes. The response for the GetDimensionValues operation includes a unit attribute. Examples include GB and Hrs. USAGE_TYPE_GROUP - The grouping of common usage types. An example is Amazon EC2: CloudWatch – Alarms. The response for this operation includes a unit attribute. REGION - The Amazon Web Services Region. RECORD_TYPE - The different types of charges such as Reserved Instance (RI) fees, usage costs, tax refunds, and credits. RESOURCE_ID - The unique identifier of the resource. ResourceId is an opt-in feature only available for last 14 days for EC2-Compute Service. If you set the context to RESERVATIONS, you can use the following dimensions for searching: AZ - The Availability Zone. An example is us-east-1a. CACHE_ENGINE - The Amazon ElastiCache operating system. Examples are Windows or Linux. DEPLOYMENT_OPTION - The scope of Amazon Relational Database Service deployments. Valid values are SingleAZ and MultiAZ. INSTANCE_TYPE - The type of Amazon EC2 instance. An example is m4.xlarge. LINKED_ACCOUNT - The description in the attribute map that includes the full name of the member account. The value field contains the Amazon Web Services ID of the member account. PLATFORM - The Amazon EC2 operating system. Examples are Windows or Linux. REGION - The Amazon Web Services Region. SCOPE (Utilization only) - The scope of a Reserved Instance (RI). Values are regional or a single Availability Zone. TAG (Coverage only) - The tags that are associated with a Reserved Instance (RI). TENANCY - The tenancy of a resource. Examples are shared or dedicated. If you set the context to SAVINGS_PLANS, you can use the following dimensions for searching: SAVINGS_PLANS_TYPE - Type of Savings Plans (EC2 Instance or Compute) PAYMENT_OPTION - The payment option for the given Savings Plans (for example, All Upfront) REGION - The Amazon Web Services Region. INSTANCE_TYPE_FAMILY - The family of instances (For example, m5) LINKED_ACCOUNT - The description in the attribute map that includes the full name of the member account. The value field contains the Amazon Web Services ID of the member account. SAVINGS_PLAN_ARN - The unique identifier for your Savings Plans. /// - dimension: The name of the dimension. Each Dimension is available for a different Context. For more information, see Context. LINK_ACCOUNT_NAME and SERVICE_CODE can only be used in CostCategoryRule. /// - filter: @@ -704,6 +717,7 @@ public struct CostExplorer: AWSService { /// - logger: Logger use during operation @inlinable public func getDimensionValues( + billingViewArn: String? = nil, context: Context? = nil, dimension: Dimension, filter: Expression? = nil, @@ -715,6 +729,7 @@ public struct CostExplorer: AWSService { logger: Logger = AWSClient.loggingDisabled ) async throws -> GetDimensionValuesResponse { let input = GetDimensionValuesRequest( + billingViewArn: billingViewArn, context: context, dimension: dimension, filter: filter, @@ -1148,6 +1163,7 @@ public struct CostExplorer: AWSService { /// Queries for available tag keys and tag values for a specified period. You can search the tag values for an arbitrary string. /// /// Parameters: + /// - billingViewArn: The Amazon Resource Name (ARN) that uniquely identifies a specific billing view. The ARN is used to specify which particular billing view you want to interact with or retrieve information from when making API calls related to Amazon Web Services Billing and Cost Management features. The BillingViewArn can be retrieved by calling the ListBillingViews API. /// - filter: /// - maxResults: This field is only used when SortBy is provided in the request. The maximum number of objects that are returned for this request. If MaxResults isn't specified with SortBy, the request returns 1000 results as the default value for this parameter. For GetTags, MaxResults has an upper quota of 1000. /// - nextPageToken: The token to retrieve the next set of results. Amazon Web Services provides the token when the response from a previous call has more results than the maximum page size. @@ -1158,6 +1174,7 @@ public struct CostExplorer: AWSService { /// - logger: Logger use during operation @inlinable public func getTags( + billingViewArn: String? = nil, filter: Expression? = nil, maxResults: Int? = nil, nextPageToken: String? = nil, @@ -1168,6 +1185,7 @@ public struct CostExplorer: AWSService { logger: Logger = AWSClient.loggingDisabled ) async throws -> GetTagsResponse { let input = GetTagsRequest( + billingViewArn: billingViewArn, filter: filter, maxResults: maxResults, nextPageToken: nextPageToken, @@ -1195,6 +1213,7 @@ public struct CostExplorer: AWSService { /// Retrieves a forecast for how much Amazon Web Services predicts that you will use over the forecast time period that you select, based on your past usage. /// /// Parameters: + /// - billingViewArn: The Amazon Resource Name (ARN) that uniquely identifies a specific billing view. The ARN is used to specify which particular billing view you want to interact with or retrieve information from when making API calls related to Amazon Web Services Billing and Cost Management features. The BillingViewArn can be retrieved by calling the ListBillingViews API. /// - filter: The filters that you want to use to filter your forecast. The GetUsageForecast API supports filtering by the following dimensions: AZ INSTANCE_TYPE LINKED_ACCOUNT LINKED_ACCOUNT_NAME OPERATION PURCHASE_TYPE REGION SERVICE USAGE_TYPE USAGE_TYPE_GROUP RECORD_TYPE OPERATING_SYSTEM TENANCY SCOPE PLATFORM SUBSCRIPTION_ID LEGAL_ENTITY_NAME DEPLOYMENT_OPTION DATABASE_ENGINE INSTANCE_TYPE_FAMILY BILLING_ENTITY RESERVATION_ID SAVINGS_PLAN_ARN /// - granularity: How granular you want the forecast to be. You can get 3 months of DAILY forecasts or 12 months of MONTHLY forecasts. The GetUsageForecast operation supports only DAILY and MONTHLY granularities. /// - metric: Which metric Cost Explorer uses to create your forecast. Valid values for a GetUsageForecast call are the following: USAGE_QUANTITY NORMALIZED_USAGE_AMOUNT @@ -1203,6 +1222,7 @@ public struct CostExplorer: AWSService { /// - logger: Logger use during operation @inlinable public func getUsageForecast( + billingViewArn: String? = nil, filter: Expression? = nil, granularity: Granularity, metric: Metric, @@ -1211,6 +1231,7 @@ public struct CostExplorer: AWSService { logger: Logger = AWSClient.loggingDisabled ) async throws -> GetUsageForecastResponse { let input = GetUsageForecastRequest( + billingViewArn: billingViewArn, filter: filter, granularity: granularity, metric: metric, diff --git a/Sources/Soto/Services/CostExplorer/CostExplorer_shapes.swift b/Sources/Soto/Services/CostExplorer/CostExplorer_shapes.swift index 7c90d7f69b..d208c440c0 100644 --- a/Sources/Soto/Services/CostExplorer/CostExplorer_shapes.swift +++ b/Sources/Soto/Services/CostExplorer/CostExplorer_shapes.swift @@ -2152,6 +2152,8 @@ extension CostExplorer { } public struct GetCostAndUsageRequest: AWSEncodableShape { + /// The Amazon Resource Name (ARN) that uniquely identifies a specific billing view. The ARN is used to specify which particular billing view you want to interact with or retrieve information from when making API calls related to Amazon Web Services Billing and Cost Management features. The BillingViewArn can be retrieved by calling the ListBillingViews API. + public let billingViewArn: String? /// Filters Amazon Web Services costs by different dimensions. For example, you can specify SERVICE and LINKED_ACCOUNT and get the costs that are associated with that account's usage of that service. You can nest Expression objects to define any combination of dimension filters. For more information, see Expression. Valid values for MatchOptions for Dimensions are EQUALS and CASE_SENSITIVE. Valid values for MatchOptions for CostCategories and Tags are EQUALS, ABSENT, and CASE_SENSITIVE. Default values are EQUALS and CASE_SENSITIVE. public let filter: Expression? /// Sets the Amazon Web Services cost granularity to MONTHLY or DAILY, or HOURLY. If Granularity isn't set, the response object doesn't include the Granularity, either MONTHLY or DAILY, or HOURLY. @@ -2166,7 +2168,8 @@ extension CostExplorer { public let timePeriod: DateInterval @inlinable - public init(filter: Expression? = nil, granularity: Granularity, groupBy: [GroupDefinition]? = nil, metrics: [String], nextPageToken: String? = nil, timePeriod: DateInterval) { + public init(billingViewArn: String? = nil, filter: Expression? = nil, granularity: Granularity, groupBy: [GroupDefinition]? = nil, metrics: [String], nextPageToken: String? = nil, timePeriod: DateInterval) { + self.billingViewArn = billingViewArn self.filter = filter self.granularity = granularity self.groupBy = groupBy @@ -2176,6 +2179,9 @@ extension CostExplorer { } public func validate(name: String) throws { + try self.validate(self.billingViewArn, name: "billingViewArn", parent: name, max: 2048) + try self.validate(self.billingViewArn, name: "billingViewArn", parent: name, min: 20) + try self.validate(self.billingViewArn, name: "billingViewArn", parent: name, pattern: "^arn:aws[a-z-]*:(billing)::[0-9]{12}:billingview/[-a-zA-Z0-9/:_+=.-@]{1,43}$") try self.filter?.validate(name: "\(name).filter") try self.groupBy?.forEach { try $0.validate(name: "\(name).groupBy[]") @@ -2190,6 +2196,7 @@ extension CostExplorer { } private enum CodingKeys: String, CodingKey { + case billingViewArn = "BillingViewArn" case filter = "Filter" case granularity = "Granularity" case groupBy = "GroupBy" @@ -2226,6 +2233,8 @@ extension CostExplorer { } public struct GetCostAndUsageWithResourcesRequest: AWSEncodableShape { + /// The Amazon Resource Name (ARN) that uniquely identifies a specific billing view. The ARN is used to specify which particular billing view you want to interact with or retrieve information from when making API calls related to Amazon Web Services Billing and Cost Management features. The BillingViewArn can be retrieved by calling the ListBillingViews API. + public let billingViewArn: String? /// Filters Amazon Web Services costs by different dimensions. For example, you can specify SERVICE and LINKED_ACCOUNT and get the costs that are associated with that account's usage of that service. You can nest Expression objects to define any combination of dimension filters. For more information, see Expression. The GetCostAndUsageWithResources operation requires that you either group by or filter by a ResourceId. It requires the Expression "SERVICE = Amazon Elastic Compute Cloud - Compute" in the filter. Valid values for MatchOptions for Dimensions are EQUALS and CASE_SENSITIVE. Valid values for MatchOptions for CostCategories and Tags are EQUALS, ABSENT, and CASE_SENSITIVE. Default values are EQUALS and CASE_SENSITIVE. public let filter: Expression /// Sets the Amazon Web Services cost granularity to MONTHLY, DAILY, or HOURLY. If Granularity isn't set, the response object doesn't include the Granularity, MONTHLY, DAILY, or HOURLY. @@ -2240,7 +2249,8 @@ extension CostExplorer { public let timePeriod: DateInterval @inlinable - public init(filter: Expression, granularity: Granularity, groupBy: [GroupDefinition]? = nil, metrics: [String]? = nil, nextPageToken: String? = nil, timePeriod: DateInterval) { + public init(billingViewArn: String? = nil, filter: Expression, granularity: Granularity, groupBy: [GroupDefinition]? = nil, metrics: [String]? = nil, nextPageToken: String? = nil, timePeriod: DateInterval) { + self.billingViewArn = billingViewArn self.filter = filter self.granularity = granularity self.groupBy = groupBy @@ -2250,6 +2260,9 @@ extension CostExplorer { } public func validate(name: String) throws { + try self.validate(self.billingViewArn, name: "billingViewArn", parent: name, max: 2048) + try self.validate(self.billingViewArn, name: "billingViewArn", parent: name, min: 20) + try self.validate(self.billingViewArn, name: "billingViewArn", parent: name, pattern: "^arn:aws[a-z-]*:(billing)::[0-9]{12}:billingview/[-a-zA-Z0-9/:_+=.-@]{1,43}$") try self.filter.validate(name: "\(name).filter") try self.groupBy?.forEach { try $0.validate(name: "\(name).groupBy[]") @@ -2264,6 +2277,7 @@ extension CostExplorer { } private enum CodingKeys: String, CodingKey { + case billingViewArn = "BillingViewArn" case filter = "Filter" case granularity = "Granularity" case groupBy = "GroupBy" @@ -2300,6 +2314,8 @@ extension CostExplorer { } public struct GetCostCategoriesRequest: AWSEncodableShape { + /// The Amazon Resource Name (ARN) that uniquely identifies a specific billing view. The ARN is used to specify which particular billing view you want to interact with or retrieve information from when making API calls related to Amazon Web Services Billing and Cost Management features. The BillingViewArn can be retrieved by calling the ListBillingViews API. + public let billingViewArn: String? public let costCategoryName: String? public let filter: Expression? /// This field is only used when the SortBy value is provided in the request. The maximum number of objects that are returned for this request. If MaxResults isn't specified with the SortBy value, the request returns 1000 results as the default value for this parameter. For GetCostCategories, MaxResults has an upper quota of 1000. @@ -2313,7 +2329,8 @@ extension CostExplorer { public let timePeriod: DateInterval @inlinable - public init(costCategoryName: String? = nil, filter: Expression? = nil, maxResults: Int? = nil, nextPageToken: String? = nil, searchString: String? = nil, sortBy: [SortDefinition]? = nil, timePeriod: DateInterval) { + public init(billingViewArn: String? = nil, costCategoryName: String? = nil, filter: Expression? = nil, maxResults: Int? = nil, nextPageToken: String? = nil, searchString: String? = nil, sortBy: [SortDefinition]? = nil, timePeriod: DateInterval) { + self.billingViewArn = billingViewArn self.costCategoryName = costCategoryName self.filter = filter self.maxResults = maxResults @@ -2324,6 +2341,9 @@ extension CostExplorer { } public func validate(name: String) throws { + try self.validate(self.billingViewArn, name: "billingViewArn", parent: name, max: 2048) + try self.validate(self.billingViewArn, name: "billingViewArn", parent: name, min: 20) + try self.validate(self.billingViewArn, name: "billingViewArn", parent: name, pattern: "^arn:aws[a-z-]*:(billing)::[0-9]{12}:billingview/[-a-zA-Z0-9/:_+=.-@]{1,43}$") try self.validate(self.costCategoryName, name: "costCategoryName", parent: name, max: 50) try self.validate(self.costCategoryName, name: "costCategoryName", parent: name, min: 1) try self.validate(self.costCategoryName, name: "costCategoryName", parent: name, pattern: "^(?! )[\\p{L}\\p{N}\\p{Z}-_]*(? CreateLocationNfsResponse { @@ -562,7 +610,7 @@ public struct DataSync: AWSService { logger: logger ) } - /// Creates a transfer location for a Network File System (NFS) file server. DataSync can use this location as a source or destination for transferring data. Before you begin, make sure that you understand how DataSync accesses NFS file servers. If you're copying data to or from an Snowcone device, you can also use CreateLocationNfs to create your transfer location. For more information, see Configuring transfers with Snowcone. + /// Creates a transfer location for a Network File System (NFS) file server. DataSync can use this location as a source or destination for transferring data. Before you begin, make sure that you understand how DataSync accesses NFS file servers. /// /// Parameters: /// - mountOptions: Specifies the options that DataSync can use to mount your NFS file server. @@ -2000,7 +2048,7 @@ public struct DataSync: AWSService { return try await self.updateDiscoveryJob(input, logger: logger) } - /// Modifies some configurations of the Microsoft Azure Blob Storage transfer location that you're using with DataSync. + /// Modifies the following configurations of the Microsoft Azure Blob Storage transfer location that you're using with DataSync. For more information, see Configuring DataSync transfers with Azure Blob Storage. @Sendable @inlinable public func updateLocationAzureBlob(_ input: UpdateLocationAzureBlobRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateLocationAzureBlobResponse { @@ -2013,7 +2061,7 @@ public struct DataSync: AWSService { logger: logger ) } - /// Modifies some configurations of the Microsoft Azure Blob Storage transfer location that you're using with DataSync. + /// Modifies the following configurations of the Microsoft Azure Blob Storage transfer location that you're using with DataSync. For more information, see Configuring DataSync transfers with Azure Blob Storage. /// /// Parameters: /// - accessTier: Specifies the access tier that you want your objects or files transferred into. This only applies when using the location as a transfer destination. For more information, see Access tiers. @@ -2047,7 +2095,191 @@ public struct DataSync: AWSService { return try await self.updateLocationAzureBlob(input, logger: logger) } - /// Updates some parameters of a previously created location for a Hadoop Distributed File System cluster. + /// Modifies the following configuration parameters of the Amazon EFS transfer location that you're using with DataSync. For more information, see Configuring DataSync transfers with Amazon EFS. + @Sendable + @inlinable + public func updateLocationEfs(_ input: UpdateLocationEfsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateLocationEfsResponse { + try await self.client.execute( + operation: "UpdateLocationEfs", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Modifies the following configuration parameters of the Amazon EFS transfer location that you're using with DataSync. For more information, see Configuring DataSync transfers with Amazon EFS. + /// + /// Parameters: + /// - accessPointArn: Specifies the Amazon Resource Name (ARN) of the access point that DataSync uses to mount your Amazon EFS file system. For more information, see Accessing restricted Amazon EFS file systems. + /// - fileSystemAccessRoleArn: Specifies an Identity and Access Management (IAM) role that allows DataSync to access your Amazon EFS file system. For information on creating this role, see Creating a DataSync IAM role for Amazon EFS file system access. + /// - inTransitEncryption: Specifies whether you want DataSync to use Transport Layer Security (TLS) 1.2 encryption when it transfers data to or from your Amazon EFS file system. If you specify an access point using AccessPointArn or an IAM role using FileSystemAccessRoleArn, you must set this parameter to TLS1_2. + /// - locationArn: Specifies the Amazon Resource Name (ARN) of the Amazon EFS transfer location that you're updating. + /// - subdirectory: Specifies a mount path for your Amazon EFS file system. This is where DataSync reads or writes data on your file system (depending on if this is a source or destination location). By default, DataSync uses the root directory (or access point if you provide one by using AccessPointArn). You can also include subdirectories using forward slashes (for example, /path/to/folder). + /// - logger: Logger use during operation + @inlinable + public func updateLocationEfs( + accessPointArn: String? = nil, + fileSystemAccessRoleArn: String? = nil, + inTransitEncryption: EfsInTransitEncryption? = nil, + locationArn: String, + subdirectory: String? = nil, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> UpdateLocationEfsResponse { + let input = UpdateLocationEfsRequest( + accessPointArn: accessPointArn, + fileSystemAccessRoleArn: fileSystemAccessRoleArn, + inTransitEncryption: inTransitEncryption, + locationArn: locationArn, + subdirectory: subdirectory + ) + return try await self.updateLocationEfs(input, logger: logger) + } + + /// Modifies the following configuration parameters of the Amazon FSx for Lustre transfer location that you're using with DataSync. For more information, see Configuring DataSync transfers with FSx for Lustre. + @Sendable + @inlinable + public func updateLocationFsxLustre(_ input: UpdateLocationFsxLustreRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateLocationFsxLustreResponse { + try await self.client.execute( + operation: "UpdateLocationFsxLustre", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Modifies the following configuration parameters of the Amazon FSx for Lustre transfer location that you're using with DataSync. For more information, see Configuring DataSync transfers with FSx for Lustre. + /// + /// Parameters: + /// - locationArn: Specifies the Amazon Resource Name (ARN) of the FSx for Lustre transfer location that you're updating. + /// - subdirectory: Specifies a mount path for your FSx for Lustre file system. The path can include subdirectories. When the location is used as a source, DataSync reads data from the mount path. When the location is used as a destination, DataSync writes data to the mount path. If you don't include this parameter, DataSync uses the file system's root directory (/). + /// - logger: Logger use during operation + @inlinable + public func updateLocationFsxLustre( + locationArn: String, + subdirectory: String? = nil, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> UpdateLocationFsxLustreResponse { + let input = UpdateLocationFsxLustreRequest( + locationArn: locationArn, + subdirectory: subdirectory + ) + return try await self.updateLocationFsxLustre(input, logger: logger) + } + + /// Modifies the following configuration parameters of the Amazon FSx for NetApp ONTAP transfer location that you're using with DataSync. For more information, see Configuring DataSync transfers with FSx for ONTAP. + @Sendable + @inlinable + public func updateLocationFsxOntap(_ input: UpdateLocationFsxOntapRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateLocationFsxOntapResponse { + try await self.client.execute( + operation: "UpdateLocationFsxOntap", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Modifies the following configuration parameters of the Amazon FSx for NetApp ONTAP transfer location that you're using with DataSync. For more information, see Configuring DataSync transfers with FSx for ONTAP. + /// + /// Parameters: + /// - locationArn: Specifies the Amazon Resource Name (ARN) of the FSx for ONTAP transfer location that you're updating. + /// - protocol: Specifies the data transfer protocol that DataSync uses to access your Amazon FSx file system. + /// - subdirectory: Specifies a path to the file share in the storage virtual machine (SVM) where you want to transfer data to or from. You can specify a junction path (also known as a mount point), qtree path (for NFS file shares), or share name (for SMB file shares). For example, your mount path might be /vol1, /vol1/tree1, or /share1. Don't specify a junction path in the SVM's root volume. For more information, see Managing FSx for ONTAP storage virtual machines in the Amazon FSx for NetApp ONTAP User Guide. + /// - logger: Logger use during operation + @inlinable + public func updateLocationFsxOntap( + locationArn: String, + protocol: FsxUpdateProtocol? = nil, + subdirectory: String? = nil, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> UpdateLocationFsxOntapResponse { + let input = UpdateLocationFsxOntapRequest( + locationArn: locationArn, + protocol: `protocol`, + subdirectory: subdirectory + ) + return try await self.updateLocationFsxOntap(input, logger: logger) + } + + /// Modifies the following configuration parameters of the Amazon FSx for OpenZFS transfer location that you're using with DataSync. For more information, see Configuring DataSync transfers with FSx for OpenZFS. Request parameters related to SMB aren't supported with the UpdateLocationFsxOpenZfs operation. + @Sendable + @inlinable + public func updateLocationFsxOpenZfs(_ input: UpdateLocationFsxOpenZfsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateLocationFsxOpenZfsResponse { + try await self.client.execute( + operation: "UpdateLocationFsxOpenZfs", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Modifies the following configuration parameters of the Amazon FSx for OpenZFS transfer location that you're using with DataSync. For more information, see Configuring DataSync transfers with FSx for OpenZFS. Request parameters related to SMB aren't supported with the UpdateLocationFsxOpenZfs operation. + /// + /// Parameters: + /// - locationArn: Specifies the Amazon Resource Name (ARN) of the FSx for OpenZFS transfer location that you're updating. + /// - protocol: + /// - subdirectory: Specifies a subdirectory in the location's path that must begin with /fsx. DataSync uses this subdirectory to read or write data (depending on whether the file system is a source or destination location). + /// - logger: Logger use during operation + @inlinable + public func updateLocationFsxOpenZfs( + locationArn: String, + protocol: FsxProtocol? = nil, + subdirectory: String? = nil, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> UpdateLocationFsxOpenZfsResponse { + let input = UpdateLocationFsxOpenZfsRequest( + locationArn: locationArn, + protocol: `protocol`, + subdirectory: subdirectory + ) + return try await self.updateLocationFsxOpenZfs(input, logger: logger) + } + + /// Modifies the following configuration parameters of the Amazon FSx for Windows File Server transfer location that you're using with DataSync. For more information, see Configuring DataSync transfers with FSx for Windows File Server. + @Sendable + @inlinable + public func updateLocationFsxWindows(_ input: UpdateLocationFsxWindowsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateLocationFsxWindowsResponse { + try await self.client.execute( + operation: "UpdateLocationFsxWindows", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Modifies the following configuration parameters of the Amazon FSx for Windows File Server transfer location that you're using with DataSync. For more information, see Configuring DataSync transfers with FSx for Windows File Server. + /// + /// Parameters: + /// - domain: Specifies the name of the Windows domain that your FSx for Windows File Server file system belongs to. If you have multiple Active Directory domains in your environment, configuring this parameter makes sure that DataSync connects to the right file system. + /// - locationArn: Specifies the ARN of the FSx for Windows File Server transfer location that you're updating. + /// - password: Specifies the password of the user with the permissions to mount and access the files, folders, and file metadata in your FSx for Windows File Server file system. + /// - subdirectory: Specifies a mount path for your file system using forward slashes. DataSync uses this subdirectory to read or write data (depending on whether the file system is a source or destination location). + /// - user: Specifies the user with the permissions to mount and access the files, folders, and file metadata in your FSx for Windows File Server file system. For information about choosing a user with the right level of access for your transfer, see required permissions for FSx for Windows File Server locations. + /// - logger: Logger use during operation + @inlinable + public func updateLocationFsxWindows( + domain: String? = nil, + locationArn: String, + password: String? = nil, + subdirectory: String? = nil, + user: String? = nil, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> UpdateLocationFsxWindowsResponse { + let input = UpdateLocationFsxWindowsRequest( + domain: domain, + locationArn: locationArn, + password: password, + subdirectory: subdirectory, + user: user + ) + return try await self.updateLocationFsxWindows(input, logger: logger) + } + + /// Modifies the following configuration parameters of the Hadoop Distributed File System (HDFS) transfer location that you're using with DataSync. For more information, see Configuring DataSync transfers with an HDFS cluster. @Sendable @inlinable public func updateLocationHdfs(_ input: UpdateLocationHdfsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateLocationHdfsResponse { @@ -2060,7 +2292,7 @@ public struct DataSync: AWSService { logger: logger ) } - /// Updates some parameters of a previously created location for a Hadoop Distributed File System cluster. + /// Modifies the following configuration parameters of the Hadoop Distributed File System (HDFS) transfer location that you're using with DataSync. For more information, see Configuring DataSync transfers with an HDFS cluster. /// /// Parameters: /// - agentArns: The Amazon Resource Names (ARNs) of the DataSync agents that can connect to your HDFS cluster. @@ -2112,7 +2344,7 @@ public struct DataSync: AWSService { return try await self.updateLocationHdfs(input, logger: logger) } - /// Modifies some configurations of the Network File System (NFS) transfer location that you're using with DataSync. For more information, see Configuring transfers to or from an NFS file server. + /// Modifies the following configuration parameters of the Network File System (NFS) transfer location that you're using with DataSync. For more information, see Configuring transfers with an NFS file server. @Sendable @inlinable public func updateLocationNfs(_ input: UpdateLocationNfsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateLocationNfsResponse { @@ -2125,7 +2357,7 @@ public struct DataSync: AWSService { logger: logger ) } - /// Modifies some configurations of the Network File System (NFS) transfer location that you're using with DataSync. For more information, see Configuring transfers to or from an NFS file server. + /// Modifies the following configuration parameters of the Network File System (NFS) transfer location that you're using with DataSync. For more information, see Configuring transfers with an NFS file server. /// /// Parameters: /// - locationArn: Specifies the Amazon Resource Name (ARN) of the NFS transfer location that you want to update. @@ -2150,7 +2382,7 @@ public struct DataSync: AWSService { return try await self.updateLocationNfs(input, logger: logger) } - /// Updates some parameters of an existing DataSync location for an object storage system. + /// Modifies the following configuration parameters of the object storage transfer location that you're using with DataSync. For more information, see Configuring DataSync transfers with an object storage system. @Sendable @inlinable public func updateLocationObjectStorage(_ input: UpdateLocationObjectStorageRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateLocationObjectStorageResponse { @@ -2163,7 +2395,7 @@ public struct DataSync: AWSService { logger: logger ) } - /// Updates some parameters of an existing DataSync location for an object storage system. + /// Modifies the following configuration parameters of the object storage transfer location that you're using with DataSync. For more information, see Configuring DataSync transfers with an object storage system. /// /// Parameters: /// - accessKey: Specifies the access key (for example, a user name) if credentials are required to authenticate with the object storage server. @@ -2200,7 +2432,45 @@ public struct DataSync: AWSService { return try await self.updateLocationObjectStorage(input, logger: logger) } - /// Updates some of the parameters of a Server Message Block (SMB) file server location that you can use for DataSync transfers. + /// Modifies the following configuration parameters of the Amazon S3 transfer location that you're using with DataSync. Before you begin, make sure that you read the following topics: Storage class considerations with Amazon S3 locations Evaluating S3 request costs when using DataSync + @Sendable + @inlinable + public func updateLocationS3(_ input: UpdateLocationS3Request, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateLocationS3Response { + try await self.client.execute( + operation: "UpdateLocationS3", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Modifies the following configuration parameters of the Amazon S3 transfer location that you're using with DataSync. Before you begin, make sure that you read the following topics: Storage class considerations with Amazon S3 locations Evaluating S3 request costs when using DataSync + /// + /// Parameters: + /// - locationArn: Specifies the Amazon Resource Name (ARN) of the Amazon S3 transfer location that you're updating. + /// - s3Config: + /// - s3StorageClass: Specifies the storage class that you want your objects to use when Amazon S3 is a transfer destination. For buckets in Amazon Web Services Regions, the storage class defaults to STANDARD. For buckets on Outposts, the storage class defaults to OUTPOSTS. For more information, see Storage class considerations with Amazon S3 transfers. + /// - subdirectory: Specifies a prefix in the S3 bucket that DataSync reads from or writes to (depending on whether the bucket is a source or destination location). DataSync can't transfer objects with a prefix that begins with a slash (/) or includes //, /./, or /../ patterns. For example: /photos photos//2006/January photos/./2006/February photos/../2006/March + /// - logger: Logger use during operation + @inlinable + public func updateLocationS3( + locationArn: String, + s3Config: S3Config? = nil, + s3StorageClass: S3StorageClass? = nil, + subdirectory: String? = nil, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> UpdateLocationS3Response { + let input = UpdateLocationS3Request( + locationArn: locationArn, + s3Config: s3Config, + s3StorageClass: s3StorageClass, + subdirectory: subdirectory + ) + return try await self.updateLocationS3(input, logger: logger) + } + + /// Modifies the following configuration parameters of the Server Message Block (SMB) transfer location that you're using with DataSync. For more information, see Configuring DataSync transfers with an SMB file server. @Sendable @inlinable public func updateLocationSmb(_ input: UpdateLocationSmbRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateLocationSmbResponse { @@ -2213,7 +2483,7 @@ public struct DataSync: AWSService { logger: logger ) } - /// Updates some of the parameters of a Server Message Block (SMB) file server location that you can use for DataSync transfers. + /// Modifies the following configuration parameters of the Server Message Block (SMB) transfer location that you're using with DataSync. For more information, see Configuring DataSync transfers with an SMB file server. /// /// Parameters: /// - agentArns: Specifies the DataSync agent (or agents) that can connect to your SMB file server. You specify an agent by using its Amazon Resource Name (ARN). diff --git a/Sources/Soto/Services/DataSync/DataSync_shapes.swift b/Sources/Soto/Services/DataSync/DataSync_shapes.swift index 7ea63c27d8..223ec4f0f7 100644 --- a/Sources/Soto/Services/DataSync/DataSync_shapes.swift +++ b/Sources/Soto/Services/DataSync/DataSync_shapes.swift @@ -690,7 +690,7 @@ extension DataSync { public let fileSystemAccessRoleArn: String? /// Specifies whether you want DataSync to use Transport Layer Security (TLS) 1.2 encryption when it transfers data to or from your Amazon EFS file system. If you specify an access point using AccessPointArn or an IAM role using FileSystemAccessRoleArn, you must set this parameter to TLS1_2. public let inTransitEncryption: EfsInTransitEncryption? - /// Specifies a mount path for your Amazon EFS file system. This is where DataSync reads or writes data (depending on if this is a source or destination location) on your file system. By default, DataSync uses the root directory (or access point if you provide one by using AccessPointArn). You can also include subdirectories using forward slashes (for example, /path/to/folder). + /// Specifies a mount path for your Amazon EFS file system. This is where DataSync reads or writes data on your file system (depending on if this is a source or destination location). By default, DataSync uses the root directory (or access point if you provide one by using AccessPointArn). You can also include subdirectories using forward slashes (for example, /path/to/folder). public let subdirectory: String? /// Specifies the key-value pair that represents a tag that you want to add to the resource. The value can be an empty string. This value helps you manage, filter, and search for your resources. We recommend that you create a name tag for your location. public let tags: [TagListEntry]? @@ -748,13 +748,13 @@ extension DataSync { } public struct CreateLocationFsxLustreRequest: AWSEncodableShape { - /// The Amazon Resource Name (ARN) for the FSx for Lustre file system. + /// Specifies the Amazon Resource Name (ARN) of the FSx for Lustre file system. public let fsxFilesystemArn: String - /// The Amazon Resource Names (ARNs) of the security groups that are used to configure the FSx for Lustre file system. + /// Specifies the Amazon Resource Names (ARNs) of up to five security groups that provide access to your FSx for Lustre file system. The security groups must be able to access the file system's ports. The file system must also allow access from the security groups. For information about file system access, see the Amazon FSx for Lustre User Guide . public let securityGroupArns: [String] - /// A subdirectory in the location's path. This subdirectory in the FSx for Lustre file system is used to read data from the FSx for Lustre source location or write data to the FSx for Lustre destination. + /// Specifies a mount path for your FSx for Lustre file system. The path can include subdirectories. When the location is used as a source, DataSync reads data from the mount path. When the location is used as a destination, DataSync writes data to the mount path. If you don't include this parameter, DataSync uses the file system's root directory (/). public let subdirectory: String? - /// The key-value pair that represents a tag that you want to add to the resource. The value can be an empty string. This value helps you manage, filter, and search for your resources. We recommend that you create a name tag for your location. + /// Specifies labels that help you categorize, filter, and search for your Amazon Web Services resources. We recommend creating at least a name tag for your location. public let tags: [TagListEntry]? @inlinable @@ -791,7 +791,7 @@ extension DataSync { } public struct CreateLocationFsxLustreResponse: AWSDecodableShape { - /// The Amazon Resource Name (ARN) of the FSx for Lustre file system location that's created. + /// The Amazon Resource Name (ARN) of the FSx for Lustre file system location that you created. public let locationArn: String? @inlinable @@ -810,7 +810,7 @@ extension DataSync { public let securityGroupArns: [String] /// Specifies the ARN of the storage virtual machine (SVM) in your file system where you want to copy data to or from. public let storageVirtualMachineArn: String - /// Specifies a path to the file share in the SVM where you'll copy your data. You can specify a junction path (also known as a mount point), qtree path (for NFS file shares), or share name (for SMB file shares). For example, your mount path might be /vol1, /vol1/tree1, or /share1. Don't specify a junction path in the SVM's root volume. For more information, see Managing FSx for ONTAP storage virtual machines in the Amazon FSx for NetApp ONTAP User Guide. + /// Specifies a path to the file share in the SVM where you want to transfer data to or from. You can specify a junction path (also known as a mount point), qtree path (for NFS file shares), or share name (for SMB file shares). For example, your mount path might be /vol1, /vol1/tree1, or /share1. Don't specify a junction path in the SVM's root volume. For more information, see Managing FSx for ONTAP storage virtual machines in the Amazon FSx for NetApp ONTAP User Guide. public let subdirectory: String? /// Specifies labels that help you categorize, filter, and search for your Amazon Web Services resources. We recommend creating at least a name tag for your location. public let tags: [TagListEntry]? @@ -928,7 +928,7 @@ extension DataSync { } public struct CreateLocationFsxWindowsRequest: AWSEncodableShape { - /// Specifies the name of the Microsoft Active Directory domain that the FSx for Windows File Server file system belongs to. If you have multiple Active Directory domains in your environment, configuring this parameter makes sure that DataSync connects to the right file system. + /// Specifies the name of the Windows domain that the FSx for Windows File Server file system belongs to. If you have multiple Active Directory domains in your environment, configuring this parameter makes sure that DataSync connects to the right file system. public let domain: String? /// Specifies the Amazon Resource Name (ARN) for the FSx for Windows File Server file system. public let fsxFilesystemArn: String @@ -2886,7 +2886,7 @@ extension DataSync { } public struct FsxProtocolSmb: AWSEncodableShape & AWSDecodableShape { - /// Specifies the fully qualified domain name (FQDN) of the Microsoft Active Directory that your storage virtual machine (SVM) belongs to. If you have multiple domains in your environment, configuring this setting makes sure that DataSync connects to the right SVM. + /// Specifies the name of the Windows domain that your storage virtual machine (SVM) belongs to. If you have multiple domains in your environment, configuring this setting makes sure that DataSync connects to the right SVM. If you have multiple Active Directory domains in your environment, configuring this parameter makes sure that DataSync connects to the right SVM. public let domain: String? public let mountOptions: SmbMountOptions? /// Specifies the password of a user who has permission to access your SVM. @@ -2919,6 +2919,61 @@ extension DataSync { } } + public struct FsxUpdateProtocol: AWSEncodableShape { + public let nfs: FsxProtocolNfs? + /// Specifies the Server Message Block (SMB) protocol configuration that DataSync uses to access your FSx for ONTAP file system's storage virtual machine (SVM). + public let smb: FsxUpdateProtocolSmb? + + @inlinable + public init(nfs: FsxProtocolNfs? = nil, smb: FsxUpdateProtocolSmb? = nil) { + self.nfs = nfs + self.smb = smb + } + + public func validate(name: String) throws { + try self.smb?.validate(name: "\(name).smb") + } + + private enum CodingKeys: String, CodingKey { + case nfs = "NFS" + case smb = "SMB" + } + } + + public struct FsxUpdateProtocolSmb: AWSEncodableShape { + /// Specifies the name of the Windows domain that your storage virtual machine (SVM) belongs to. If you have multiple Active Directory domains in your environment, configuring this parameter makes sure that DataSync connects to the right SVM. + public let domain: String? + public let mountOptions: SmbMountOptions? + /// Specifies the password of a user who has permission to access your SVM. + public let password: String? + /// Specifies a user that can mount and access the files, folders, and metadata in your SVM. For information about choosing a user with the right level of access for your transfer, see Using the SMB protocol. + public let user: String? + + @inlinable + public init(domain: String? = nil, mountOptions: SmbMountOptions? = nil, password: String? = nil, user: String? = nil) { + self.domain = domain + self.mountOptions = mountOptions + self.password = password + self.user = user + } + + public func validate(name: String) throws { + try self.validate(self.domain, name: "domain", parent: name, max: 253) + try self.validate(self.domain, name: "domain", parent: name, pattern: "^([A-Za-z0-9]((\\.|-+)?[A-Za-z0-9]){0,252})?$") + try self.validate(self.password, name: "password", parent: name, max: 104) + try self.validate(self.password, name: "password", parent: name, pattern: "^.{0,104}$") + try self.validate(self.user, name: "user", parent: name, max: 104) + try self.validate(self.user, name: "user", parent: name, pattern: "^[^\\x5B\\x5D\\\\/:;|=,+*?]{1,104}$") + } + + private enum CodingKeys: String, CodingKey { + case domain = "Domain" + case mountOptions = "MountOptions" + case password = "Password" + case user = "User" + } + } + public struct GenerateRecommendationsRequest: AWSEncodableShape { /// Specifies the Amazon Resource Name (ARN) of the discovery job that collects information about your on-premises storage system. public let discoveryJobArn: String @@ -4814,6 +4869,194 @@ extension DataSync { public init() {} } + public struct UpdateLocationEfsRequest: AWSEncodableShape { + /// Specifies the Amazon Resource Name (ARN) of the access point that DataSync uses to mount your Amazon EFS file system. For more information, see Accessing restricted Amazon EFS file systems. + public let accessPointArn: String? + /// Specifies an Identity and Access Management (IAM) role that allows DataSync to access your Amazon EFS file system. For information on creating this role, see Creating a DataSync IAM role for Amazon EFS file system access. + public let fileSystemAccessRoleArn: String? + /// Specifies whether you want DataSync to use Transport Layer Security (TLS) 1.2 encryption when it transfers data to or from your Amazon EFS file system. If you specify an access point using AccessPointArn or an IAM role using FileSystemAccessRoleArn, you must set this parameter to TLS1_2. + public let inTransitEncryption: EfsInTransitEncryption? + /// Specifies the Amazon Resource Name (ARN) of the Amazon EFS transfer location that you're updating. + public let locationArn: String + /// Specifies a mount path for your Amazon EFS file system. This is where DataSync reads or writes data on your file system (depending on if this is a source or destination location). By default, DataSync uses the root directory (or access point if you provide one by using AccessPointArn). You can also include subdirectories using forward slashes (for example, /path/to/folder). + public let subdirectory: String? + + @inlinable + public init(accessPointArn: String? = nil, fileSystemAccessRoleArn: String? = nil, inTransitEncryption: EfsInTransitEncryption? = nil, locationArn: String, subdirectory: String? = nil) { + self.accessPointArn = accessPointArn + self.fileSystemAccessRoleArn = fileSystemAccessRoleArn + self.inTransitEncryption = inTransitEncryption + self.locationArn = locationArn + self.subdirectory = subdirectory + } + + public func validate(name: String) throws { + try self.validate(self.accessPointArn, name: "accessPointArn", parent: name, max: 128) + try self.validate(self.accessPointArn, name: "accessPointArn", parent: name, pattern: "^(^arn:(aws|aws-cn|aws-us-gov|aws-iso|aws-iso-b):elasticfilesystem:[a-z\\-0-9]+:[0-9]{12}:access-point/fsap-[0-9a-f]{8,40}$)|(^$)$") + try self.validate(self.fileSystemAccessRoleArn, name: "fileSystemAccessRoleArn", parent: name, max: 2048) + try self.validate(self.fileSystemAccessRoleArn, name: "fileSystemAccessRoleArn", parent: name, pattern: "^(^arn:(aws|aws-cn|aws-us-gov|aws-iso|aws-iso-b):iam::[0-9]{12}:role/.*$)|(^$)$") + try self.validate(self.locationArn, name: "locationArn", parent: name, max: 128) + try self.validate(self.locationArn, name: "locationArn", parent: name, pattern: "^arn:(aws|aws-cn|aws-us-gov|aws-iso|aws-iso-b):datasync:[a-z\\-0-9]+:[0-9]{12}:location/loc-[0-9a-z]{17}$") + try self.validate(self.subdirectory, name: "subdirectory", parent: name, max: 4096) + try self.validate(self.subdirectory, name: "subdirectory", parent: name, pattern: "^[a-zA-Z0-9_\\-\\+\\./\\(\\)\\p{Zs}]*$") + } + + private enum CodingKeys: String, CodingKey { + case accessPointArn = "AccessPointArn" + case fileSystemAccessRoleArn = "FileSystemAccessRoleArn" + case inTransitEncryption = "InTransitEncryption" + case locationArn = "LocationArn" + case subdirectory = "Subdirectory" + } + } + + public struct UpdateLocationEfsResponse: AWSDecodableShape { + public init() {} + } + + public struct UpdateLocationFsxLustreRequest: AWSEncodableShape { + /// Specifies the Amazon Resource Name (ARN) of the FSx for Lustre transfer location that you're updating. + public let locationArn: String + /// Specifies a mount path for your FSx for Lustre file system. The path can include subdirectories. When the location is used as a source, DataSync reads data from the mount path. When the location is used as a destination, DataSync writes data to the mount path. If you don't include this parameter, DataSync uses the file system's root directory (/). + public let subdirectory: String? + + @inlinable + public init(locationArn: String, subdirectory: String? = nil) { + self.locationArn = locationArn + self.subdirectory = subdirectory + } + + public func validate(name: String) throws { + try self.validate(self.locationArn, name: "locationArn", parent: name, max: 128) + try self.validate(self.locationArn, name: "locationArn", parent: name, pattern: "^arn:(aws|aws-cn|aws-us-gov|aws-iso|aws-iso-b):datasync:[a-z\\-0-9]+:[0-9]{12}:location/loc-[0-9a-z]{17}$") + try self.validate(self.subdirectory, name: "subdirectory", parent: name, max: 4096) + try self.validate(self.subdirectory, name: "subdirectory", parent: name, pattern: "^[a-zA-Z0-9_\\-\\+\\./\\(\\)\\$\\p{Zs}]+$") + } + + private enum CodingKeys: String, CodingKey { + case locationArn = "LocationArn" + case subdirectory = "Subdirectory" + } + } + + public struct UpdateLocationFsxLustreResponse: AWSDecodableShape { + public init() {} + } + + public struct UpdateLocationFsxOntapRequest: AWSEncodableShape { + /// Specifies the Amazon Resource Name (ARN) of the FSx for ONTAP transfer location that you're updating. + public let locationArn: String + /// Specifies the data transfer protocol that DataSync uses to access your Amazon FSx file system. + public let `protocol`: FsxUpdateProtocol? + /// Specifies a path to the file share in the storage virtual machine (SVM) where you want to transfer data to or from. You can specify a junction path (also known as a mount point), qtree path (for NFS file shares), or share name (for SMB file shares). For example, your mount path might be /vol1, /vol1/tree1, or /share1. Don't specify a junction path in the SVM's root volume. For more information, see Managing FSx for ONTAP storage virtual machines in the Amazon FSx for NetApp ONTAP User Guide. + public let subdirectory: String? + + @inlinable + public init(locationArn: String, protocol: FsxUpdateProtocol? = nil, subdirectory: String? = nil) { + self.locationArn = locationArn + self.`protocol` = `protocol` + self.subdirectory = subdirectory + } + + public func validate(name: String) throws { + try self.validate(self.locationArn, name: "locationArn", parent: name, max: 128) + try self.validate(self.locationArn, name: "locationArn", parent: name, pattern: "^arn:(aws|aws-cn|aws-us-gov|aws-iso|aws-iso-b):datasync:[a-z\\-0-9]+:[0-9]{12}:location/loc-[0-9a-z]{17}$") + try self.`protocol`?.validate(name: "\(name).`protocol`") + try self.validate(self.subdirectory, name: "subdirectory", parent: name, max: 255) + try self.validate(self.subdirectory, name: "subdirectory", parent: name, pattern: "^[^\\u0000\\u0085\\u2028\\u2029\\r\\n]{1,255}$") + } + + private enum CodingKeys: String, CodingKey { + case locationArn = "LocationArn" + case `protocol` = "Protocol" + case subdirectory = "Subdirectory" + } + } + + public struct UpdateLocationFsxOntapResponse: AWSDecodableShape { + public init() {} + } + + public struct UpdateLocationFsxOpenZfsRequest: AWSEncodableShape { + /// Specifies the Amazon Resource Name (ARN) of the FSx for OpenZFS transfer location that you're updating. + public let locationArn: String + public let `protocol`: FsxProtocol? + /// Specifies a subdirectory in the location's path that must begin with /fsx. DataSync uses this subdirectory to read or write data (depending on whether the file system is a source or destination location). + public let subdirectory: String? + + @inlinable + public init(locationArn: String, protocol: FsxProtocol? = nil, subdirectory: String? = nil) { + self.locationArn = locationArn + self.`protocol` = `protocol` + self.subdirectory = subdirectory + } + + public func validate(name: String) throws { + try self.validate(self.locationArn, name: "locationArn", parent: name, max: 128) + try self.validate(self.locationArn, name: "locationArn", parent: name, pattern: "^arn:(aws|aws-cn|aws-us-gov|aws-iso|aws-iso-b):datasync:[a-z\\-0-9]+:[0-9]{12}:location/loc-[0-9a-z]{17}$") + try self.`protocol`?.validate(name: "\(name).`protocol`") + try self.validate(self.subdirectory, name: "subdirectory", parent: name, max: 4096) + try self.validate(self.subdirectory, name: "subdirectory", parent: name, pattern: "^[a-zA-Z0-9_\\-\\+\\./\\(\\)\\$\\p{Zs}]+$") + } + + private enum CodingKeys: String, CodingKey { + case locationArn = "LocationArn" + case `protocol` = "Protocol" + case subdirectory = "Subdirectory" + } + } + + public struct UpdateLocationFsxOpenZfsResponse: AWSDecodableShape { + public init() {} + } + + public struct UpdateLocationFsxWindowsRequest: AWSEncodableShape { + /// Specifies the name of the Windows domain that your FSx for Windows File Server file system belongs to. If you have multiple Active Directory domains in your environment, configuring this parameter makes sure that DataSync connects to the right file system. + public let domain: String? + /// Specifies the ARN of the FSx for Windows File Server transfer location that you're updating. + public let locationArn: String + /// Specifies the password of the user with the permissions to mount and access the files, folders, and file metadata in your FSx for Windows File Server file system. + public let password: String? + /// Specifies a mount path for your file system using forward slashes. DataSync uses this subdirectory to read or write data (depending on whether the file system is a source or destination location). + public let subdirectory: String? + /// Specifies the user with the permissions to mount and access the files, folders, and file metadata in your FSx for Windows File Server file system. For information about choosing a user with the right level of access for your transfer, see required permissions for FSx for Windows File Server locations. + public let user: String? + + @inlinable + public init(domain: String? = nil, locationArn: String, password: String? = nil, subdirectory: String? = nil, user: String? = nil) { + self.domain = domain + self.locationArn = locationArn + self.password = password + self.subdirectory = subdirectory + self.user = user + } + + public func validate(name: String) throws { + try self.validate(self.domain, name: "domain", parent: name, max: 253) + try self.validate(self.domain, name: "domain", parent: name, pattern: "^([A-Za-z0-9]((\\.|-+)?[A-Za-z0-9]){0,252})?$") + try self.validate(self.locationArn, name: "locationArn", parent: name, max: 128) + try self.validate(self.locationArn, name: "locationArn", parent: name, pattern: "^arn:(aws|aws-cn|aws-us-gov|aws-iso|aws-iso-b):datasync:[a-z\\-0-9]+:[0-9]{12}:location/loc-[0-9a-z]{17}$") + try self.validate(self.password, name: "password", parent: name, max: 104) + try self.validate(self.password, name: "password", parent: name, pattern: "^.{0,104}$") + try self.validate(self.subdirectory, name: "subdirectory", parent: name, max: 4096) + try self.validate(self.subdirectory, name: "subdirectory", parent: name, pattern: "^[a-zA-Z0-9_\\-\\+\\./\\(\\)\\$\\p{Zs}]+$") + try self.validate(self.user, name: "user", parent: name, max: 104) + try self.validate(self.user, name: "user", parent: name, pattern: "^[^\\x5B\\x5D\\\\/:;|=,+*?]{1,104}$") + } + + private enum CodingKeys: String, CodingKey { + case domain = "Domain" + case locationArn = "LocationArn" + case password = "Password" + case subdirectory = "Subdirectory" + case user = "User" + } + } + + public struct UpdateLocationFsxWindowsResponse: AWSDecodableShape { + public init() {} + } + public struct UpdateLocationHdfsRequest: AWSEncodableShape { /// The Amazon Resource Names (ARNs) of the DataSync agents that can connect to your HDFS cluster. public let agentArns: [String]? @@ -5014,6 +5257,43 @@ extension DataSync { public init() {} } + public struct UpdateLocationS3Request: AWSEncodableShape { + /// Specifies the Amazon Resource Name (ARN) of the Amazon S3 transfer location that you're updating. + public let locationArn: String + public let s3Config: S3Config? + /// Specifies the storage class that you want your objects to use when Amazon S3 is a transfer destination. For buckets in Amazon Web Services Regions, the storage class defaults to STANDARD. For buckets on Outposts, the storage class defaults to OUTPOSTS. For more information, see Storage class considerations with Amazon S3 transfers. + public let s3StorageClass: S3StorageClass? + /// Specifies a prefix in the S3 bucket that DataSync reads from or writes to (depending on whether the bucket is a source or destination location). DataSync can't transfer objects with a prefix that begins with a slash (/) or includes //, /./, or /../ patterns. For example: /photos photos//2006/January photos/./2006/February photos/../2006/March + public let subdirectory: String? + + @inlinable + public init(locationArn: String, s3Config: S3Config? = nil, s3StorageClass: S3StorageClass? = nil, subdirectory: String? = nil) { + self.locationArn = locationArn + self.s3Config = s3Config + self.s3StorageClass = s3StorageClass + self.subdirectory = subdirectory + } + + public func validate(name: String) throws { + try self.validate(self.locationArn, name: "locationArn", parent: name, max: 128) + try self.validate(self.locationArn, name: "locationArn", parent: name, pattern: "^arn:(aws|aws-cn|aws-us-gov|aws-iso|aws-iso-b):datasync:[a-z\\-0-9]+:[0-9]{12}:location/loc-[0-9a-z]{17}$") + try self.s3Config?.validate(name: "\(name).s3Config") + try self.validate(self.subdirectory, name: "subdirectory", parent: name, max: 4096) + try self.validate(self.subdirectory, name: "subdirectory", parent: name, pattern: "^[a-zA-Z0-9_\\-\\+\\./\\(\\)\\p{Zs}]*$") + } + + private enum CodingKeys: String, CodingKey { + case locationArn = "LocationArn" + case s3Config = "S3Config" + case s3StorageClass = "S3StorageClass" + case subdirectory = "Subdirectory" + } + } + + public struct UpdateLocationS3Response: AWSDecodableShape { + public init() {} + } + public struct UpdateLocationSmbRequest: AWSEncodableShape { /// Specifies the DataSync agent (or agents) that can connect to your SMB file server. You specify an agent by using its Amazon Resource Name (ARN). public let agentArns: [String]? diff --git a/Sources/Soto/Services/DataZone/DataZone_api.swift b/Sources/Soto/Services/DataZone/DataZone_api.swift index fd0f5f86fc..a6fe59b12e 100644 --- a/Sources/Soto/Services/DataZone/DataZone_api.swift +++ b/Sources/Soto/Services/DataZone/DataZone_api.swift @@ -87,6 +87,7 @@ public struct DataZone: AWSService { "ap-southeast-3": "datazone.ap-southeast-3.api.aws", "ap-southeast-4": "datazone.ap-southeast-4.api.aws", "ap-southeast-5": "datazone.ap-southeast-5.api.aws", + "ap-southeast-7": "datazone.ap-southeast-7.api.aws", "ca-central-1": "datazone.ca-central-1.api.aws", "ca-west-1": "datazone.ca-west-1.api.aws", "cn-north-1": "datazone.cn-north-1.api.amazonwebservices.com.cn", @@ -122,6 +123,7 @@ public struct DataZone: AWSService { "ap-southeast-3": "datazone-fips.ap-southeast-3.api.aws", "ap-southeast-4": "datazone-fips.ap-southeast-4.api.aws", "ap-southeast-5": "datazone-fips.ap-southeast-5.api.aws", + "ap-southeast-7": "datazone-fips.ap-southeast-7.api.aws", "ca-central-1": "datazone-fips.ca-central-1.amazonaws.com", "ca-west-1": "datazone-fips.ca-west-1.api.aws", "cn-north-1": "datazone-fips.cn-north-1.api.amazonwebservices.com.cn", diff --git a/Sources/Soto/Services/Detective/Detective_api.swift b/Sources/Soto/Services/Detective/Detective_api.swift index a7b85e98ba..8fdedf9ba7 100644 --- a/Sources/Soto/Services/Detective/Detective_api.swift +++ b/Sources/Soto/Services/Detective/Detective_api.swift @@ -915,7 +915,7 @@ public struct Detective: AWSService { return try await self.untagResource(input, logger: logger) } - /// Starts a data source packages for the behavior graph. + /// Starts a data source package for the Detective behavior graph. @Sendable @inlinable public func updateDatasourcePackages(_ input: UpdateDatasourcePackagesRequest, logger: Logger = AWSClient.loggingDisabled) async throws { @@ -928,10 +928,10 @@ public struct Detective: AWSService { logger: logger ) } - /// Starts a data source packages for the behavior graph. + /// Starts a data source package for the Detective behavior graph. /// /// Parameters: - /// - datasourcePackages: The data source package start for the behavior graph. + /// - datasourcePackages: The data source package to start for the behavior graph. /// - graphArn: The ARN of the behavior graph. /// - logger: Logger use during operation @inlinable diff --git a/Sources/Soto/Services/Detective/Detective_shapes.swift b/Sources/Soto/Services/Detective/Detective_shapes.swift index c8fa5e7fb8..1fb3091739 100644 --- a/Sources/Soto/Services/Detective/Detective_shapes.swift +++ b/Sources/Soto/Services/Detective/Detective_shapes.swift @@ -1784,7 +1784,7 @@ extension Detective { } public struct UpdateDatasourcePackagesRequest: AWSEncodableShape { - /// The data source package start for the behavior graph. + /// The data source package to start for the behavior graph. public let datasourcePackages: [DatasourcePackage] /// The ARN of the behavior graph. public let graphArn: String diff --git a/Sources/Soto/Services/DocDB/DocDB_api.swift b/Sources/Soto/Services/DocDB/DocDB_api.swift index 3d955e0eeb..347a682e4a 100644 --- a/Sources/Soto/Services/DocDB/DocDB_api.swift +++ b/Sources/Soto/Services/DocDB/DocDB_api.swift @@ -305,8 +305,10 @@ public struct DocDB: AWSService { /// - engineVersion: The version number of the database engine to use. The --engine-version will default to the latest major engine version. For production workloads, we recommend explicitly declaring this parameter with the intended major engine version. /// - globalClusterIdentifier: The cluster identifier of the new global cluster. /// - kmsKeyId: The KMS key identifier for an encrypted cluster. The KMS key identifier is the Amazon Resource Name (ARN) for the KMS encryption key. If you are creating a cluster using the same Amazon Web Services account that owns the KMS encryption key that is used to encrypt the new cluster, you can use the KMS key alias instead of the ARN for the KMS encryption key. If an encryption key is not specified in KmsKeyId: If the StorageEncrypted parameter is true, Amazon DocumentDB uses your default encryption key. KMS creates the default encryption key for your Amazon Web Services account. Your Amazon Web Services account has a different default encryption key for each Amazon Web Services Regions. + /// - manageMasterUserPassword: Specifies whether to manage the master user password with Amazon Web Services Secrets Manager. Constraint: You can't manage the master user password with Amazon Web Services Secrets Manager if MasterUserPassword is specified. /// - masterUsername: The name of the master user for the cluster. Constraints: Must be from 1 to 63 letters or numbers. The first character must be a letter. Cannot be a reserved word for the chosen database engine. /// - masterUserPassword: The password for the master database user. This password can contain any printable ASCII character except forward slash (/), double quote ("), or the "at" symbol (@). Constraints: Must contain from 8 to 100 characters. + /// - masterUserSecretKmsKeyId: The Amazon Web Services KMS key identifier to encrypt a secret that is automatically generated and managed in Amazon Web Services Secrets Manager. This setting is valid only if the master user password is managed by Amazon DocumentDB in Amazon Web Services Secrets Manager for the DB cluster. The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key. To use a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN. If you don't specify MasterUserSecretKmsKeyId, then the aws/secretsmanager KMS key is used to encrypt the secret. If the secret is in a different Amazon Web Services account, then you can't use the aws/secretsmanager KMS key to encrypt the secret, and you must use a customer managed KMS key. There is a default KMS key for your Amazon Web Services account. Your Amazon Web Services account has a different default KMS key for each Amazon Web Services Region. /// - port: The port number on which the instances in the cluster accept connections. /// - preferredBackupWindow: The daily time range during which automated backups are created if automated backups are enabled using the BackupRetentionPeriod parameter. The default is a 30-minute window selected at random from an 8-hour block of time for each Amazon Web Services Region. Constraints: Must be in the format hh24:mi-hh24:mi. Must be in Universal Coordinated Time (UTC). Must not conflict with the preferred maintenance window. Must be at least 30 minutes. /// - preferredMaintenanceWindow: The weekly time range during which system maintenance can occur, in Universal Coordinated Time (UTC). Format: ddd:hh24:mi-ddd:hh24:mi The default is a 30-minute window selected at random from an 8-hour block of time for each Amazon Web Services Region, occurring on a random day of the week. Valid days: Mon, Tue, Wed, Thu, Fri, Sat, Sun Constraints: Minimum 30-minute window. @@ -329,8 +331,10 @@ public struct DocDB: AWSService { engineVersion: String? = nil, globalClusterIdentifier: String? = nil, kmsKeyId: String? = nil, + manageMasterUserPassword: Bool? = nil, masterUsername: String? = nil, masterUserPassword: String? = nil, + masterUserSecretKmsKeyId: String? = nil, port: Int? = nil, preferredBackupWindow: String? = nil, preferredMaintenanceWindow: String? = nil, @@ -353,8 +357,10 @@ public struct DocDB: AWSService { engineVersion: engineVersion, globalClusterIdentifier: globalClusterIdentifier, kmsKeyId: kmsKeyId, + manageMasterUserPassword: manageMasterUserPassword, masterUsername: masterUsername, masterUserPassword: masterUserPassword, + masterUserSecretKmsKeyId: masterUserSecretKmsKeyId, port: port, preferredBackupWindow: preferredBackupWindow, preferredMaintenanceWindow: preferredMaintenanceWindow, @@ -1622,11 +1628,14 @@ public struct DocDB: AWSService { /// - dbClusterParameterGroupName: The name of the cluster parameter group to use for the cluster. /// - deletionProtection: Specifies whether this cluster can be deleted. If DeletionProtection is enabled, the cluster cannot be deleted unless it is modified and DeletionProtection is disabled. DeletionProtection protects clusters from being accidentally deleted. /// - engineVersion: The version number of the database engine to which you want to upgrade. Changing this parameter results in an outage. The change is applied during the next maintenance window unless ApplyImmediately is enabled. To list all of the available engine versions for Amazon DocumentDB use the following command: aws docdb describe-db-engine-versions --engine docdb --query "DBEngineVersions[].EngineVersion" + /// - manageMasterUserPassword: Specifies whether to manage the master user password with Amazon Web Services Secrets Manager. If the cluster doesn't manage the master user password with Amazon Web Services Secrets Manager, you can turn on this management. In this case, you can't specify MasterUserPassword. If the cluster already manages the master user password with Amazon Web Services Secrets Manager, and you specify that the master user password is not managed with Amazon Web Services Secrets Manager, then you must specify MasterUserPassword. In this case, Amazon DocumentDB deletes the secret and uses the new password for the master user specified by MasterUserPassword. /// - masterUserPassword: The password for the master database user. This password can contain any printable ASCII character except forward slash (/), double quote ("), or the "at" symbol (@). Constraints: Must contain from 8 to 100 characters. + /// - masterUserSecretKmsKeyId: The Amazon Web Services KMS key identifier to encrypt a secret that is automatically generated and managed in Amazon Web Services Secrets Manager. This setting is valid only if both of the following conditions are met: The cluster doesn't manage the master user password in Amazon Web Services Secrets Manager. If the cluster already manages the master user password in Amazon Web Services Secrets Manager, you can't change the KMS key that is used to encrypt the secret. You are enabling ManageMasterUserPassword to manage the master user password in Amazon Web Services Secrets Manager. If you are turning on ManageMasterUserPassword and don't specify MasterUserSecretKmsKeyId, then the aws/secretsmanager KMS key is used to encrypt the secret. If the secret is in a different Amazon Web Services account, then you can't use the aws/secretsmanager KMS key to encrypt the secret, and you must use a customer managed KMS key. The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key. To use a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN. There is a default KMS key for your Amazon Web Services account. Your Amazon Web Services account has a different default KMS key for each Amazon Web Services Region. /// - newDBClusterIdentifier: The new cluster identifier for the cluster when renaming a cluster. This value is stored as a lowercase string. Constraints: Must contain from 1 to 63 letters, numbers, or hyphens. The first character must be a letter. Cannot end with a hyphen or contain two consecutive hyphens. Example: my-cluster2 /// - port: The port number on which the cluster accepts connections. Constraints: Must be a value from 1150 to 65535. Default: The same port as the original cluster. /// - preferredBackupWindow: The daily time range during which automated backups are created if automated backups are enabled, using the BackupRetentionPeriod parameter. The default is a 30-minute window selected at random from an 8-hour block of time for each Amazon Web Services Region. Constraints: Must be in the format hh24:mi-hh24:mi. Must be in Universal Coordinated Time (UTC). Must not conflict with the preferred maintenance window. Must be at least 30 minutes. /// - preferredMaintenanceWindow: The weekly time range during which system maintenance can occur, in Universal Coordinated Time (UTC). Format: ddd:hh24:mi-ddd:hh24:mi The default is a 30-minute window selected at random from an 8-hour block of time for each Amazon Web Services Region, occurring on a random day of the week. Valid days: Mon, Tue, Wed, Thu, Fri, Sat, Sun Constraints: Minimum 30-minute window. + /// - rotateMasterUserPassword: Specifies whether to rotate the secret managed by Amazon Web Services Secrets Manager for the master user password. This setting is valid only if the master user password is managed by Amazon DocumentDB in Amazon Web Services Secrets Manager for the cluster. The secret value contains the updated password. Constraint: You must apply the change immediately when rotating the master user password. /// - storageType: The storage type to associate with the DB cluster. For information on storage types for Amazon DocumentDB clusters, see Cluster storage configurations in the Amazon DocumentDB Developer Guide. Valid values for storage type - standard | iopt1 Default value is standard /// - vpcSecurityGroupIds: A list of virtual private cloud (VPC) security groups that the cluster will belong to. /// - logger: Logger use during operation @@ -1640,11 +1649,14 @@ public struct DocDB: AWSService { dbClusterParameterGroupName: String? = nil, deletionProtection: Bool? = nil, engineVersion: String? = nil, + manageMasterUserPassword: Bool? = nil, masterUserPassword: String? = nil, + masterUserSecretKmsKeyId: String? = nil, newDBClusterIdentifier: String? = nil, port: Int? = nil, preferredBackupWindow: String? = nil, preferredMaintenanceWindow: String? = nil, + rotateMasterUserPassword: Bool? = nil, storageType: String? = nil, vpcSecurityGroupIds: [String]? = nil, logger: Logger = AWSClient.loggingDisabled @@ -1658,11 +1670,14 @@ public struct DocDB: AWSService { dbClusterParameterGroupName: dbClusterParameterGroupName, deletionProtection: deletionProtection, engineVersion: engineVersion, + manageMasterUserPassword: manageMasterUserPassword, masterUserPassword: masterUserPassword, + masterUserSecretKmsKeyId: masterUserSecretKmsKeyId, newDBClusterIdentifier: newDBClusterIdentifier, port: port, preferredBackupWindow: preferredBackupWindow, preferredMaintenanceWindow: preferredMaintenanceWindow, + rotateMasterUserPassword: rotateMasterUserPassword, storageType: storageType, vpcSecurityGroupIds: vpcSecurityGroupIds ) diff --git a/Sources/Soto/Services/DocDB/DocDB_shapes.swift b/Sources/Soto/Services/DocDB/DocDB_shapes.swift index 0640dd8123..b872907a3a 100644 --- a/Sources/Soto/Services/DocDB/DocDB_shapes.swift +++ b/Sources/Soto/Services/DocDB/DocDB_shapes.swift @@ -238,6 +238,28 @@ extension DocDB { } } + public struct ClusterMasterUserSecret: AWSDecodableShape { + /// The Amazon Web Services KMS key identifier that is used to encrypt the secret. + public let kmsKeyId: String? + /// The Amazon Resource Name (ARN) of the secret. + public let secretArn: String? + /// The status of the secret. The possible status values include the following: creating - The secret is being created. active - The secret is available for normal use and rotation. rotating - The secret is being rotated. impaired - The secret can be used to access database credentials, but it can't be rotated. A secret might have this status if, for example, permissions are changed so that Amazon DocumentDB can no longer access either the secret or the KMS key for the secret. When a secret has this status, you can correct the condition that caused the status. Alternatively, modify the instance to turn off automatic management of database credentials, and then modify the instance again to turn on automatic management of database credentials. + public let secretStatus: String? + + @inlinable + public init(kmsKeyId: String? = nil, secretArn: String? = nil, secretStatus: String? = nil) { + self.kmsKeyId = kmsKeyId + self.secretArn = secretArn + self.secretStatus = secretStatus + } + + private enum CodingKeys: String, CodingKey { + case kmsKeyId = "KmsKeyId" + case secretArn = "SecretArn" + case secretStatus = "SecretStatus" + } + } + public struct CopyDBClusterParameterGroupMessage: AWSEncodableShape { public struct _TagsEncoding: ArrayCoderProperties { public static let member = "Tag" } @@ -359,10 +381,14 @@ extension DocDB { public let globalClusterIdentifier: String? /// The KMS key identifier for an encrypted cluster. The KMS key identifier is the Amazon Resource Name (ARN) for the KMS encryption key. If you are creating a cluster using the same Amazon Web Services account that owns the KMS encryption key that is used to encrypt the new cluster, you can use the KMS key alias instead of the ARN for the KMS encryption key. If an encryption key is not specified in KmsKeyId: If the StorageEncrypted parameter is true, Amazon DocumentDB uses your default encryption key. KMS creates the default encryption key for your Amazon Web Services account. Your Amazon Web Services account has a different default encryption key for each Amazon Web Services Regions. public let kmsKeyId: String? + /// Specifies whether to manage the master user password with Amazon Web Services Secrets Manager. Constraint: You can't manage the master user password with Amazon Web Services Secrets Manager if MasterUserPassword is specified. + public let manageMasterUserPassword: Bool? /// The name of the master user for the cluster. Constraints: Must be from 1 to 63 letters or numbers. The first character must be a letter. Cannot be a reserved word for the chosen database engine. public let masterUsername: String? /// The password for the master database user. This password can contain any printable ASCII character except forward slash (/), double quote ("), or the "at" symbol (@). Constraints: Must contain from 8 to 100 characters. public let masterUserPassword: String? + /// The Amazon Web Services KMS key identifier to encrypt a secret that is automatically generated and managed in Amazon Web Services Secrets Manager. This setting is valid only if the master user password is managed by Amazon DocumentDB in Amazon Web Services Secrets Manager for the DB cluster. The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key. To use a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN. If you don't specify MasterUserSecretKmsKeyId, then the aws/secretsmanager KMS key is used to encrypt the secret. If the secret is in a different Amazon Web Services account, then you can't use the aws/secretsmanager KMS key to encrypt the secret, and you must use a customer managed KMS key. There is a default KMS key for your Amazon Web Services account. Your Amazon Web Services account has a different default KMS key for each Amazon Web Services Region. + public let masterUserSecretKmsKeyId: String? /// The port number on which the instances in the cluster accept connections. public let port: Int? /// The daily time range during which automated backups are created if automated backups are enabled using the BackupRetentionPeriod parameter. The default is a 30-minute window selected at random from an 8-hour block of time for each Amazon Web Services Region. Constraints: Must be in the format hh24:mi-hh24:mi. Must be in Universal Coordinated Time (UTC). Must not conflict with the preferred maintenance window. Must be at least 30 minutes. @@ -383,7 +409,7 @@ extension DocDB { public var vpcSecurityGroupIds: [String]? @inlinable - public init(availabilityZones: [String]? = nil, backupRetentionPeriod: Int? = nil, dbClusterIdentifier: String? = nil, dbClusterParameterGroupName: String? = nil, dbSubnetGroupName: String? = nil, deletionProtection: Bool? = nil, enableCloudwatchLogsExports: [String]? = nil, engine: String? = nil, engineVersion: String? = nil, globalClusterIdentifier: String? = nil, kmsKeyId: String? = nil, masterUsername: String? = nil, masterUserPassword: String? = nil, port: Int? = nil, preferredBackupWindow: String? = nil, preferredMaintenanceWindow: String? = nil, preSignedUrl: String? = nil, storageEncrypted: Bool? = nil, storageType: String? = nil, tags: [Tag]? = nil, vpcSecurityGroupIds: [String]? = nil) { + public init(availabilityZones: [String]? = nil, backupRetentionPeriod: Int? = nil, dbClusterIdentifier: String? = nil, dbClusterParameterGroupName: String? = nil, dbSubnetGroupName: String? = nil, deletionProtection: Bool? = nil, enableCloudwatchLogsExports: [String]? = nil, engine: String? = nil, engineVersion: String? = nil, globalClusterIdentifier: String? = nil, kmsKeyId: String? = nil, manageMasterUserPassword: Bool? = nil, masterUsername: String? = nil, masterUserPassword: String? = nil, masterUserSecretKmsKeyId: String? = nil, port: Int? = nil, preferredBackupWindow: String? = nil, preferredMaintenanceWindow: String? = nil, preSignedUrl: String? = nil, storageEncrypted: Bool? = nil, storageType: String? = nil, tags: [Tag]? = nil, vpcSecurityGroupIds: [String]? = nil) { self.availabilityZones = availabilityZones self.backupRetentionPeriod = backupRetentionPeriod self.dbClusterIdentifier = dbClusterIdentifier @@ -395,8 +421,10 @@ extension DocDB { self.engineVersion = engineVersion self.globalClusterIdentifier = globalClusterIdentifier self.kmsKeyId = kmsKeyId + self.manageMasterUserPassword = manageMasterUserPassword self.masterUsername = masterUsername self.masterUserPassword = masterUserPassword + self.masterUserSecretKmsKeyId = masterUserSecretKmsKeyId self.port = port self.preferredBackupWindow = preferredBackupWindow self.preferredMaintenanceWindow = preferredMaintenanceWindow @@ -425,8 +453,10 @@ extension DocDB { case engineVersion = "EngineVersion" case globalClusterIdentifier = "GlobalClusterIdentifier" case kmsKeyId = "KmsKeyId" + case manageMasterUserPassword = "ManageMasterUserPassword" case masterUsername = "MasterUsername" case masterUserPassword = "MasterUserPassword" + case masterUserSecretKmsKeyId = "MasterUserSecretKmsKeyId" case port = "Port" case preferredBackupWindow = "PreferredBackupWindow" case preferredMaintenanceWindow = "PreferredMaintenanceWindow" @@ -821,6 +851,8 @@ extension DocDB { public let latestRestorableTime: Date? /// Contains the master user name for the cluster. public let masterUsername: String? + /// The secret managed by Amazon DocumentDB in Amazon Web Services Secrets Manager for the master user password. + public let masterUserSecret: ClusterMasterUserSecret? /// Specifies whether the cluster has instances in multiple Availability Zones. public let multiAZ: Bool? /// Specifies the progress of the operation as a percentage. @@ -849,7 +881,7 @@ extension DocDB { public var vpcSecurityGroups: [VpcSecurityGroupMembership]? @inlinable - public init(associatedRoles: [DBClusterRole]? = nil, availabilityZones: [String]? = nil, backupRetentionPeriod: Int? = nil, cloneGroupId: String? = nil, clusterCreateTime: Date? = nil, dbClusterArn: String? = nil, dbClusterIdentifier: String? = nil, dbClusterMembers: [DBClusterMember]? = nil, dbClusterParameterGroup: String? = nil, dbClusterResourceId: String? = nil, dbSubnetGroup: String? = nil, deletionProtection: Bool? = nil, earliestRestorableTime: Date? = nil, enabledCloudwatchLogsExports: [String]? = nil, endpoint: String? = nil, engine: String? = nil, engineVersion: String? = nil, hostedZoneId: String? = nil, kmsKeyId: String? = nil, latestRestorableTime: Date? = nil, masterUsername: String? = nil, multiAZ: Bool? = nil, percentProgress: String? = nil, port: Int? = nil, preferredBackupWindow: String? = nil, preferredMaintenanceWindow: String? = nil, readerEndpoint: String? = nil, readReplicaIdentifiers: [String]? = nil, replicationSourceIdentifier: String? = nil, status: String? = nil, storageEncrypted: Bool? = nil, storageType: String? = nil, vpcSecurityGroups: [VpcSecurityGroupMembership]? = nil) { + public init(associatedRoles: [DBClusterRole]? = nil, availabilityZones: [String]? = nil, backupRetentionPeriod: Int? = nil, cloneGroupId: String? = nil, clusterCreateTime: Date? = nil, dbClusterArn: String? = nil, dbClusterIdentifier: String? = nil, dbClusterMembers: [DBClusterMember]? = nil, dbClusterParameterGroup: String? = nil, dbClusterResourceId: String? = nil, dbSubnetGroup: String? = nil, deletionProtection: Bool? = nil, earliestRestorableTime: Date? = nil, enabledCloudwatchLogsExports: [String]? = nil, endpoint: String? = nil, engine: String? = nil, engineVersion: String? = nil, hostedZoneId: String? = nil, kmsKeyId: String? = nil, latestRestorableTime: Date? = nil, masterUsername: String? = nil, masterUserSecret: ClusterMasterUserSecret? = nil, multiAZ: Bool? = nil, percentProgress: String? = nil, port: Int? = nil, preferredBackupWindow: String? = nil, preferredMaintenanceWindow: String? = nil, readerEndpoint: String? = nil, readReplicaIdentifiers: [String]? = nil, replicationSourceIdentifier: String? = nil, status: String? = nil, storageEncrypted: Bool? = nil, storageType: String? = nil, vpcSecurityGroups: [VpcSecurityGroupMembership]? = nil) { self.associatedRoles = associatedRoles self.availabilityZones = availabilityZones self.backupRetentionPeriod = backupRetentionPeriod @@ -871,6 +903,7 @@ extension DocDB { self.kmsKeyId = kmsKeyId self.latestRestorableTime = latestRestorableTime self.masterUsername = masterUsername + self.masterUserSecret = masterUserSecret self.multiAZ = multiAZ self.percentProgress = percentProgress self.port = port @@ -907,6 +940,7 @@ extension DocDB { case kmsKeyId = "KmsKeyId" case latestRestorableTime = "LatestRestorableTime" case masterUsername = "MasterUsername" + case masterUserSecret = "MasterUserSecret" case multiAZ = "MultiAZ" case percentProgress = "PercentProgress" case port = "Port" @@ -2716,8 +2750,12 @@ extension DocDB { public let deletionProtection: Bool? /// The version number of the database engine to which you want to upgrade. Changing this parameter results in an outage. The change is applied during the next maintenance window unless ApplyImmediately is enabled. To list all of the available engine versions for Amazon DocumentDB use the following command: aws docdb describe-db-engine-versions --engine docdb --query "DBEngineVersions[].EngineVersion" public let engineVersion: String? + /// Specifies whether to manage the master user password with Amazon Web Services Secrets Manager. If the cluster doesn't manage the master user password with Amazon Web Services Secrets Manager, you can turn on this management. In this case, you can't specify MasterUserPassword. If the cluster already manages the master user password with Amazon Web Services Secrets Manager, and you specify that the master user password is not managed with Amazon Web Services Secrets Manager, then you must specify MasterUserPassword. In this case, Amazon DocumentDB deletes the secret and uses the new password for the master user specified by MasterUserPassword. + public let manageMasterUserPassword: Bool? /// The password for the master database user. This password can contain any printable ASCII character except forward slash (/), double quote ("), or the "at" symbol (@). Constraints: Must contain from 8 to 100 characters. public let masterUserPassword: String? + /// The Amazon Web Services KMS key identifier to encrypt a secret that is automatically generated and managed in Amazon Web Services Secrets Manager. This setting is valid only if both of the following conditions are met: The cluster doesn't manage the master user password in Amazon Web Services Secrets Manager. If the cluster already manages the master user password in Amazon Web Services Secrets Manager, you can't change the KMS key that is used to encrypt the secret. You are enabling ManageMasterUserPassword to manage the master user password in Amazon Web Services Secrets Manager. If you are turning on ManageMasterUserPassword and don't specify MasterUserSecretKmsKeyId, then the aws/secretsmanager KMS key is used to encrypt the secret. If the secret is in a different Amazon Web Services account, then you can't use the aws/secretsmanager KMS key to encrypt the secret, and you must use a customer managed KMS key. The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key. To use a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN. There is a default KMS key for your Amazon Web Services account. Your Amazon Web Services account has a different default KMS key for each Amazon Web Services Region. + public let masterUserSecretKmsKeyId: String? /// The new cluster identifier for the cluster when renaming a cluster. This value is stored as a lowercase string. Constraints: Must contain from 1 to 63 letters, numbers, or hyphens. The first character must be a letter. Cannot end with a hyphen or contain two consecutive hyphens. Example: my-cluster2 public let newDBClusterIdentifier: String? /// The port number on which the cluster accepts connections. Constraints: Must be a value from 1150 to 65535. Default: The same port as the original cluster. @@ -2726,6 +2764,8 @@ extension DocDB { public let preferredBackupWindow: String? /// The weekly time range during which system maintenance can occur, in Universal Coordinated Time (UTC). Format: ddd:hh24:mi-ddd:hh24:mi The default is a 30-minute window selected at random from an 8-hour block of time for each Amazon Web Services Region, occurring on a random day of the week. Valid days: Mon, Tue, Wed, Thu, Fri, Sat, Sun Constraints: Minimum 30-minute window. public let preferredMaintenanceWindow: String? + /// Specifies whether to rotate the secret managed by Amazon Web Services Secrets Manager for the master user password. This setting is valid only if the master user password is managed by Amazon DocumentDB in Amazon Web Services Secrets Manager for the cluster. The secret value contains the updated password. Constraint: You must apply the change immediately when rotating the master user password. + public let rotateMasterUserPassword: Bool? /// The storage type to associate with the DB cluster. For information on storage types for Amazon DocumentDB clusters, see Cluster storage configurations in the Amazon DocumentDB Developer Guide. Valid values for storage type - standard | iopt1 Default value is standard public let storageType: String? /// A list of virtual private cloud (VPC) security groups that the cluster will belong to. @@ -2733,7 +2773,7 @@ extension DocDB { public var vpcSecurityGroupIds: [String]? @inlinable - public init(allowMajorVersionUpgrade: Bool? = nil, applyImmediately: Bool? = nil, backupRetentionPeriod: Int? = nil, cloudwatchLogsExportConfiguration: CloudwatchLogsExportConfiguration? = nil, dbClusterIdentifier: String? = nil, dbClusterParameterGroupName: String? = nil, deletionProtection: Bool? = nil, engineVersion: String? = nil, masterUserPassword: String? = nil, newDBClusterIdentifier: String? = nil, port: Int? = nil, preferredBackupWindow: String? = nil, preferredMaintenanceWindow: String? = nil, storageType: String? = nil, vpcSecurityGroupIds: [String]? = nil) { + public init(allowMajorVersionUpgrade: Bool? = nil, applyImmediately: Bool? = nil, backupRetentionPeriod: Int? = nil, cloudwatchLogsExportConfiguration: CloudwatchLogsExportConfiguration? = nil, dbClusterIdentifier: String? = nil, dbClusterParameterGroupName: String? = nil, deletionProtection: Bool? = nil, engineVersion: String? = nil, manageMasterUserPassword: Bool? = nil, masterUserPassword: String? = nil, masterUserSecretKmsKeyId: String? = nil, newDBClusterIdentifier: String? = nil, port: Int? = nil, preferredBackupWindow: String? = nil, preferredMaintenanceWindow: String? = nil, rotateMasterUserPassword: Bool? = nil, storageType: String? = nil, vpcSecurityGroupIds: [String]? = nil) { self.allowMajorVersionUpgrade = allowMajorVersionUpgrade self.applyImmediately = applyImmediately self.backupRetentionPeriod = backupRetentionPeriod @@ -2742,11 +2782,14 @@ extension DocDB { self.dbClusterParameterGroupName = dbClusterParameterGroupName self.deletionProtection = deletionProtection self.engineVersion = engineVersion + self.manageMasterUserPassword = manageMasterUserPassword self.masterUserPassword = masterUserPassword + self.masterUserSecretKmsKeyId = masterUserSecretKmsKeyId self.newDBClusterIdentifier = newDBClusterIdentifier self.port = port self.preferredBackupWindow = preferredBackupWindow self.preferredMaintenanceWindow = preferredMaintenanceWindow + self.rotateMasterUserPassword = rotateMasterUserPassword self.storageType = storageType self.vpcSecurityGroupIds = vpcSecurityGroupIds } @@ -2760,11 +2803,14 @@ extension DocDB { case dbClusterParameterGroupName = "DBClusterParameterGroupName" case deletionProtection = "DeletionProtection" case engineVersion = "EngineVersion" + case manageMasterUserPassword = "ManageMasterUserPassword" case masterUserPassword = "MasterUserPassword" + case masterUserSecretKmsKeyId = "MasterUserSecretKmsKeyId" case newDBClusterIdentifier = "NewDBClusterIdentifier" case port = "Port" case preferredBackupWindow = "PreferredBackupWindow" case preferredMaintenanceWindow = "PreferredMaintenanceWindow" + case rotateMasterUserPassword = "RotateMasterUserPassword" case storageType = "StorageType" case vpcSecurityGroupIds = "VpcSecurityGroupIds" } diff --git a/Sources/Soto/Services/DynamoDB/DynamoDB_api.swift b/Sources/Soto/Services/DynamoDB/DynamoDB_api.swift index c415905734..cf85b367f5 100644 --- a/Sources/Soto/Services/DynamoDB/DynamoDB_api.swift +++ b/Sources/Soto/Services/DynamoDB/DynamoDB_api.swift @@ -536,7 +536,7 @@ public struct DynamoDB: AWSService { return try await self.describeBackup(input, logger: logger) } - /// Checks the status of continuous backups and point in time recovery on the specified table. Continuous backups are ENABLED on all tables at table creation. If point in time recovery is enabled, PointInTimeRecoveryStatus will be set to ENABLED. After continuous backups and point in time recovery are enabled, you can restore to any point in time within EarliestRestorableDateTime and LatestRestorableDateTime. LatestRestorableDateTime is typically 5 minutes before the current time. You can restore your table to any point in time during the last 35 days. You can call DescribeContinuousBackups at a maximum rate of 10 times per second. + /// Checks the status of continuous backups and point in time recovery on the specified table. Continuous backups are ENABLED on all tables at table creation. If point in time recovery is enabled, PointInTimeRecoveryStatus will be set to ENABLED. After continuous backups and point in time recovery are enabled, you can restore to any point in time within EarliestRestorableDateTime and LatestRestorableDateTime. LatestRestorableDateTime is typically 5 minutes before the current time. You can restore your table to any point in time in the last 35 days. You can set the recovery period to any value between 1 and 35 days. You can call DescribeContinuousBackups at a maximum rate of 10 times per second. @Sendable @inlinable public func describeContinuousBackups(_ input: DescribeContinuousBackupsInput, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeContinuousBackupsOutput { @@ -551,7 +551,7 @@ public struct DynamoDB: AWSService { logger: logger ) } - /// Checks the status of continuous backups and point in time recovery on the specified table. Continuous backups are ENABLED on all tables at table creation. If point in time recovery is enabled, PointInTimeRecoveryStatus will be set to ENABLED. After continuous backups and point in time recovery are enabled, you can restore to any point in time within EarliestRestorableDateTime and LatestRestorableDateTime. LatestRestorableDateTime is typically 5 minutes before the current time. You can restore your table to any point in time during the last 35 days. You can call DescribeContinuousBackups at a maximum rate of 10 times per second. + /// Checks the status of continuous backups and point in time recovery on the specified table. Continuous backups are ENABLED on all tables at table creation. If point in time recovery is enabled, PointInTimeRecoveryStatus will be set to ENABLED. After continuous backups and point in time recovery are enabled, you can restore to any point in time within EarliestRestorableDateTime and LatestRestorableDateTime. LatestRestorableDateTime is typically 5 minutes before the current time. You can restore your table to any point in time in the last 35 days. You can set the recovery period to any value between 1 and 35 days. You can call DescribeContinuousBackups at a maximum rate of 10 times per second. /// /// Parameters: /// - tableName: Name of the table for which the customer wants to check the continuous backups and point in time recovery settings. You can also provide the Amazon Resource Name (ARN) of the table in this parameter. @@ -1722,7 +1722,7 @@ public struct DynamoDB: AWSService { return try await self.restoreTableFromBackup(input, logger: logger) } - /// Restores the specified table to the specified point in time within EarliestRestorableDateTime and LatestRestorableDateTime. You can restore your table to any point in time during the last 35 days. Any number of users can execute up to 50 concurrent restores (any type of restore) in a given account. When you restore using point in time recovery, DynamoDB restores your table data to the state based on the selected date and time (day:hour:minute:second) to a new table. Along with data, the following are also included on the new restored table using point in time recovery: Global secondary indexes (GSIs) Local secondary indexes (LSIs) Provisioned read and write capacity Encryption settings All these settings come from the current settings of the source table at the time of restore. You must manually set up the following on the restored table: Auto scaling policies IAM policies Amazon CloudWatch metrics and alarms Tags Stream settings Time to Live (TTL) settings Point in time recovery settings + /// Restores the specified table to the specified point in time within EarliestRestorableDateTime and LatestRestorableDateTime. You can restore your table to any point in time in the last 35 days. You can set the recovery period to any value between 1 and 35 days. Any number of users can execute up to 50 concurrent restores (any type of restore) in a given account. When you restore using point in time recovery, DynamoDB restores your table data to the state based on the selected date and time (day:hour:minute:second) to a new table. Along with data, the following are also included on the new restored table using point in time recovery: Global secondary indexes (GSIs) Local secondary indexes (LSIs) Provisioned read and write capacity Encryption settings All these settings come from the current settings of the source table at the time of restore. You must manually set up the following on the restored table: Auto scaling policies IAM policies Amazon CloudWatch metrics and alarms Tags Stream settings Time to Live (TTL) settings Point in time recovery settings @Sendable @inlinable public func restoreTableToPointInTime(_ input: RestoreTableToPointInTimeInput, logger: Logger = AWSClient.loggingDisabled) async throws -> RestoreTableToPointInTimeOutput { @@ -1737,7 +1737,7 @@ public struct DynamoDB: AWSService { logger: logger ) } - /// Restores the specified table to the specified point in time within EarliestRestorableDateTime and LatestRestorableDateTime. You can restore your table to any point in time during the last 35 days. Any number of users can execute up to 50 concurrent restores (any type of restore) in a given account. When you restore using point in time recovery, DynamoDB restores your table data to the state based on the selected date and time (day:hour:minute:second) to a new table. Along with data, the following are also included on the new restored table using point in time recovery: Global secondary indexes (GSIs) Local secondary indexes (LSIs) Provisioned read and write capacity Encryption settings All these settings come from the current settings of the source table at the time of restore. You must manually set up the following on the restored table: Auto scaling policies IAM policies Amazon CloudWatch metrics and alarms Tags Stream settings Time to Live (TTL) settings Point in time recovery settings + /// Restores the specified table to the specified point in time within EarliestRestorableDateTime and LatestRestorableDateTime. You can restore your table to any point in time in the last 35 days. You can set the recovery period to any value between 1 and 35 days. Any number of users can execute up to 50 concurrent restores (any type of restore) in a given account. When you restore using point in time recovery, DynamoDB restores your table data to the state based on the selected date and time (day:hour:minute:second) to a new table. Along with data, the following are also included on the new restored table using point in time recovery: Global secondary indexes (GSIs) Local secondary indexes (LSIs) Provisioned read and write capacity Encryption settings All these settings come from the current settings of the source table at the time of restore. You must manually set up the following on the restored table: Auto scaling policies IAM policies Amazon CloudWatch metrics and alarms Tags Stream settings Time to Live (TTL) settings Point in time recovery settings /// /// Parameters: /// - billingModeOverride: The billing mode of the restored table. @@ -2001,7 +2001,7 @@ public struct DynamoDB: AWSService { return try await self.untagResource(input, logger: logger) } - /// UpdateContinuousBackups enables or disables point in time recovery for the specified table. A successful UpdateContinuousBackups call returns the current ContinuousBackupsDescription. Continuous backups are ENABLED on all tables at table creation. If point in time recovery is enabled, PointInTimeRecoveryStatus will be set to ENABLED. Once continuous backups and point in time recovery are enabled, you can restore to any point in time within EarliestRestorableDateTime and LatestRestorableDateTime. LatestRestorableDateTime is typically 5 minutes before the current time. You can restore your table to any point in time during the last 35 days. + /// UpdateContinuousBackups enables or disables point in time recovery for the specified table. A successful UpdateContinuousBackups call returns the current ContinuousBackupsDescription. Continuous backups are ENABLED on all tables at table creation. If point in time recovery is enabled, PointInTimeRecoveryStatus will be set to ENABLED. Once continuous backups and point in time recovery are enabled, you can restore to any point in time within EarliestRestorableDateTime and LatestRestorableDateTime. LatestRestorableDateTime is typically 5 minutes before the current time. You can restore your table to any point in time in the last 35 days. You can set the recovery period to any value between 1 and 35 days. @Sendable @inlinable public func updateContinuousBackups(_ input: UpdateContinuousBackupsInput, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateContinuousBackupsOutput { @@ -2016,7 +2016,7 @@ public struct DynamoDB: AWSService { logger: logger ) } - /// UpdateContinuousBackups enables or disables point in time recovery for the specified table. A successful UpdateContinuousBackups call returns the current ContinuousBackupsDescription. Continuous backups are ENABLED on all tables at table creation. If point in time recovery is enabled, PointInTimeRecoveryStatus will be set to ENABLED. Once continuous backups and point in time recovery are enabled, you can restore to any point in time within EarliestRestorableDateTime and LatestRestorableDateTime. LatestRestorableDateTime is typically 5 minutes before the current time. You can restore your table to any point in time during the last 35 days. + /// UpdateContinuousBackups enables or disables point in time recovery for the specified table. A successful UpdateContinuousBackups call returns the current ContinuousBackupsDescription. Continuous backups are ENABLED on all tables at table creation. If point in time recovery is enabled, PointInTimeRecoveryStatus will be set to ENABLED. Once continuous backups and point in time recovery are enabled, you can restore to any point in time within EarliestRestorableDateTime and LatestRestorableDateTime. LatestRestorableDateTime is typically 5 minutes before the current time. You can restore your table to any point in time in the last 35 days. You can set the recovery period to any value between 1 and 35 days. /// /// Parameters: /// - pointInTimeRecoverySpecification: Represents the settings used to enable point in time recovery. diff --git a/Sources/Soto/Services/DynamoDB/DynamoDB_shapes.swift b/Sources/Soto/Services/DynamoDB/DynamoDB_shapes.swift index f86302bf77..7bd186a004 100644 --- a/Sources/Soto/Services/DynamoDB/DynamoDB_shapes.swift +++ b/Sources/Soto/Services/DynamoDB/DynamoDB_shapes.swift @@ -1133,7 +1133,7 @@ extension DynamoDB { public struct ConditionCheck: AWSEncodableShape { /// A condition that must be satisfied in order for a conditional update to succeed. For more information, see Condition expressions in the Amazon DynamoDB Developer Guide. public let conditionExpression: String - /// One or more substitution tokens for attribute names in an expression. For more information, see Expression attribute names in the Amazon DynamoDB Developer Guide. + /// One or more substitution tokens for attribute names in an expression. For more information, see Expression attribute names in the Amazon DynamoDB Developer Guide. public let expressionAttributeNames: [String: String]? /// One or more values that can be substituted in an expression. For more information, see Condition expressions in the Amazon DynamoDB Developer Guide. public let expressionAttributeValues: [String: AttributeValue]? @@ -4068,32 +4068,45 @@ extension DynamoDB { public let latestRestorableDateTime: Date? /// The current state of point in time recovery: ENABLED - Point in time recovery is enabled. DISABLED - Point in time recovery is disabled. public let pointInTimeRecoveryStatus: PointInTimeRecoveryStatus? + /// The number of preceding days for which continuous backups are taken and maintained. Your table data is only recoverable to any point-in-time from within the configured recovery period. This parameter is optional. If no value is provided, the value will default to 35. + public let recoveryPeriodInDays: Int? @inlinable - public init(earliestRestorableDateTime: Date? = nil, latestRestorableDateTime: Date? = nil, pointInTimeRecoveryStatus: PointInTimeRecoveryStatus? = nil) { + public init(earliestRestorableDateTime: Date? = nil, latestRestorableDateTime: Date? = nil, pointInTimeRecoveryStatus: PointInTimeRecoveryStatus? = nil, recoveryPeriodInDays: Int? = nil) { self.earliestRestorableDateTime = earliestRestorableDateTime self.latestRestorableDateTime = latestRestorableDateTime self.pointInTimeRecoveryStatus = pointInTimeRecoveryStatus + self.recoveryPeriodInDays = recoveryPeriodInDays } private enum CodingKeys: String, CodingKey { case earliestRestorableDateTime = "EarliestRestorableDateTime" case latestRestorableDateTime = "LatestRestorableDateTime" case pointInTimeRecoveryStatus = "PointInTimeRecoveryStatus" + case recoveryPeriodInDays = "RecoveryPeriodInDays" } } public struct PointInTimeRecoverySpecification: AWSEncodableShape { /// Indicates whether point in time recovery is enabled (true) or disabled (false) on the table. public let pointInTimeRecoveryEnabled: Bool + /// The number of preceding days for which continuous backups are taken and maintained. Your table data is only recoverable to any point-in-time from within the configured recovery period. This parameter is optional. If no value is provided, the value will default to 35. + public let recoveryPeriodInDays: Int? @inlinable - public init(pointInTimeRecoveryEnabled: Bool) { + public init(pointInTimeRecoveryEnabled: Bool, recoveryPeriodInDays: Int? = nil) { self.pointInTimeRecoveryEnabled = pointInTimeRecoveryEnabled + self.recoveryPeriodInDays = recoveryPeriodInDays + } + + public func validate(name: String) throws { + try self.validate(self.recoveryPeriodInDays, name: "recoveryPeriodInDays", parent: name, max: 35) + try self.validate(self.recoveryPeriodInDays, name: "recoveryPeriodInDays", parent: name, min: 1) } private enum CodingKeys: String, CodingKey { case pointInTimeRecoveryEnabled = "PointInTimeRecoveryEnabled" + case recoveryPeriodInDays = "RecoveryPeriodInDays" } } @@ -5993,6 +6006,7 @@ extension DynamoDB { } public func validate(name: String) throws { + try self.pointInTimeRecoverySpecification.validate(name: "\(name).pointInTimeRecoverySpecification") try self.validate(self.tableName, name: "tableName", parent: name, max: 1024) try self.validate(self.tableName, name: "tableName", parent: name, min: 1) } diff --git a/Sources/Soto/Services/EC2/EC2_api.swift b/Sources/Soto/Services/EC2/EC2_api.swift index 9305ff6f95..e670be6cfb 100644 --- a/Sources/Soto/Services/EC2/EC2_api.swift +++ b/Sources/Soto/Services/EC2/EC2_api.swift @@ -2760,6 +2760,7 @@ public struct EC2: AWSService { /// - clientToken: Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. /// - connectionLogOptions: Information about the client connection logging options. If you enable client connection logging, data about client connections is sent to a /// - description: A brief description of the Client VPN endpoint. + /// - disconnectOnSessionTimeout: Indicates whether the client VPN session is disconnected after the maximum timeout specified in SessionTimeoutHours is reached. If true, users are prompted to reconnect client VPN. If false, client VPN attempts to reconnect automatically. The default value is false. /// - dnsServers: Information about the DNS servers to be used for DNS resolution. A Client VPN endpoint can /// - dryRun: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. /// - securityGroupIds: The IDs of one or more security groups to apply to the target network. You must also specify the ID of the VPC that contains the security groups. @@ -2781,6 +2782,7 @@ public struct EC2: AWSService { clientToken: String? = CreateClientVpnEndpointRequest.idempotencyToken(), connectionLogOptions: ConnectionLogOptions? = nil, description: String? = nil, + disconnectOnSessionTimeout: Bool? = nil, dnsServers: [String]? = nil, dryRun: Bool? = nil, securityGroupIds: [String]? = nil, @@ -2802,6 +2804,7 @@ public struct EC2: AWSService { clientToken: clientToken, connectionLogOptions: connectionLogOptions, description: description, + disconnectOnSessionTimeout: disconnectOnSessionTimeout, dnsServers: dnsServers, dryRun: dryRun, securityGroupIds: securityGroupIds, @@ -20760,6 +20763,7 @@ public struct EC2: AWSService { /// - clientVpnEndpointId: The ID of the Client VPN endpoint to modify. /// - connectionLogOptions: Information about the client connection logging options. If you enable client connection logging, data about client connections is sent to a /// - description: A brief description of the Client VPN endpoint. + /// - disconnectOnSessionTimeout: Indicates whether the client VPN session is disconnected after the maximum timeout specified in sessionTimeoutHours is reached. If true, users are prompted to reconnect client VPN. If false, client VPN attempts to reconnect automatically. The default value is false. /// - dnsServers: Information about the DNS servers to be used by Client VPN connections. A Client VPN endpoint can have /// - dryRun: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. /// - securityGroupIds: The IDs of one or more security groups to apply to the target network. @@ -20777,6 +20781,7 @@ public struct EC2: AWSService { clientVpnEndpointId: String? = nil, connectionLogOptions: ConnectionLogOptions? = nil, description: String? = nil, + disconnectOnSessionTimeout: Bool? = nil, dnsServers: DnsServersOptionsModifyStructure? = nil, dryRun: Bool? = nil, securityGroupIds: [String]? = nil, @@ -20794,6 +20799,7 @@ public struct EC2: AWSService { clientVpnEndpointId: clientVpnEndpointId, connectionLogOptions: connectionLogOptions, description: description, + disconnectOnSessionTimeout: disconnectOnSessionTimeout, dnsServers: dnsServers, dryRun: dryRun, securityGroupIds: securityGroupIds, diff --git a/Sources/Soto/Services/EC2/EC2_shapes.swift b/Sources/Soto/Services/EC2/EC2_shapes.swift index 851180f4c3..bb644d715d 100644 --- a/Sources/Soto/Services/EC2/EC2_shapes.swift +++ b/Sources/Soto/Services/EC2/EC2_shapes.swift @@ -116,6 +116,7 @@ extension EC2 { } public enum AllocationType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case future = "future" case used = "used" public var description: String { return self.rawValue } } @@ -1373,6 +1374,8 @@ extension EC2 { public static var f116Xlarge: Self { .init(rawValue: "f1.16xlarge") } public static var f12Xlarge: Self { .init(rawValue: "f1.2xlarge") } public static var f14Xlarge: Self { .init(rawValue: "f1.4xlarge") } + public static var f212Xlarge: Self { .init(rawValue: "f2.12xlarge") } + public static var f248Xlarge: Self { .init(rawValue: "f2.48xlarge") } public static var g22Xlarge: Self { .init(rawValue: "g2.2xlarge") } public static var g28Xlarge: Self { .init(rawValue: "g2.8xlarge") } public static var g316Xlarge: Self { .init(rawValue: "g3.16xlarge") } @@ -1729,6 +1732,8 @@ extension EC2 { public static var p4d24Xlarge: Self { .init(rawValue: "p4d.24xlarge") } public static var p4de24Xlarge: Self { .init(rawValue: "p4de.24xlarge") } public static var p548Xlarge: Self { .init(rawValue: "p5.48xlarge") } + public static var p5e48Xlarge: Self { .init(rawValue: "p5e.48xlarge") } + public static var p5en48Xlarge: Self { .init(rawValue: "p5en.48xlarge") } public static var r32Xlarge: Self { .init(rawValue: "r3.2xlarge") } public static var r34Xlarge: Self { .init(rawValue: "r3.4xlarge") } public static var r38Xlarge: Self { .init(rawValue: "r3.8xlarge") } @@ -1965,6 +1970,7 @@ extension EC2 { public static var trn12Xlarge: Self { .init(rawValue: "trn1.2xlarge") } public static var trn132Xlarge: Self { .init(rawValue: "trn1.32xlarge") } public static var trn1n32Xlarge: Self { .init(rawValue: "trn1n.32xlarge") } + public static var trn248Xlarge: Self { .init(rawValue: "trn2.48xlarge") } public static var u12Tb1112Xlarge: Self { .init(rawValue: "u-12tb1.112xlarge") } public static var u12Tb1Metal: Self { .init(rawValue: "u-12tb1.metal") } public static var u18Tb1112Xlarge: Self { .init(rawValue: "u-18tb1.112xlarge") } @@ -1976,10 +1982,13 @@ extension EC2 { public static var u6Tb156Xlarge: Self { .init(rawValue: "u-6tb1.56xlarge") } public static var u6Tb1Metal: Self { .init(rawValue: "u-6tb1.metal") } public static var u7i12Tb224Xlarge: Self { .init(rawValue: "u7i-12tb.224xlarge") } + public static var u7i6Tb112Xlarge: Self { .init(rawValue: "u7i-6tb.112xlarge") } + public static var u7i8Tb112Xlarge: Self { .init(rawValue: "u7i-8tb.112xlarge") } public static var u7ib12Tb224Xlarge: Self { .init(rawValue: "u7ib-12tb.224xlarge") } public static var u7in16Tb224Xlarge: Self { .init(rawValue: "u7in-16tb.224xlarge") } public static var u7in24Tb224Xlarge: Self { .init(rawValue: "u7in-24tb.224xlarge") } public static var u7in32Tb224Xlarge: Self { .init(rawValue: "u7in-32tb.224xlarge") } + public static var u7inh32Tb480Xlarge: Self { .init(rawValue: "u7inh-32tb.480xlarge") } public static var u9Tb1112Xlarge: Self { .init(rawValue: "u-9tb1.112xlarge") } public static var u9Tb1Metal: Self { .init(rawValue: "u-9tb1.metal") } public static var vt124Xlarge: Self { .init(rawValue: "vt1.24xlarge") } @@ -8746,6 +8755,8 @@ extension EC2 { public let deletionTime: String? /// A brief description of the endpoint. public let description: String? + /// Indicates whether the client VPN session is disconnected after the maximum sessionTimeoutHours is reached. If true, users are prompted to reconnect client VPN. If false, client VPN attempts to reconnect automatically. The default value is false. + public let disconnectOnSessionTimeout: Bool? /// The DNS name to be used by clients when connecting to the Client VPN endpoint. public let dnsName: String? /// Information about the DNS servers to be used for DNS resolution. @@ -8778,7 +8789,7 @@ extension EC2 { public let vpnProtocol: VpnProtocol? @inlinable - public init(authenticationOptions: [ClientVpnAuthentication]? = nil, clientCidrBlock: String? = nil, clientConnectOptions: ClientConnectResponseOptions? = nil, clientLoginBannerOptions: ClientLoginBannerResponseOptions? = nil, clientVpnEndpointId: String? = nil, connectionLogOptions: ConnectionLogResponseOptions? = nil, creationTime: String? = nil, deletionTime: String? = nil, description: String? = nil, dnsName: String? = nil, dnsServers: [String]? = nil, securityGroupIds: [String]? = nil, selfServicePortalUrl: String? = nil, serverCertificateArn: String? = nil, sessionTimeoutHours: Int? = nil, splitTunnel: Bool? = nil, status: ClientVpnEndpointStatus? = nil, tags: [Tag]? = nil, transportProtocol: TransportProtocol? = nil, vpcId: String? = nil, vpnPort: Int? = nil, vpnProtocol: VpnProtocol? = nil) { + public init(authenticationOptions: [ClientVpnAuthentication]? = nil, clientCidrBlock: String? = nil, clientConnectOptions: ClientConnectResponseOptions? = nil, clientLoginBannerOptions: ClientLoginBannerResponseOptions? = nil, clientVpnEndpointId: String? = nil, connectionLogOptions: ConnectionLogResponseOptions? = nil, creationTime: String? = nil, deletionTime: String? = nil, description: String? = nil, disconnectOnSessionTimeout: Bool? = nil, dnsName: String? = nil, dnsServers: [String]? = nil, securityGroupIds: [String]? = nil, selfServicePortalUrl: String? = nil, serverCertificateArn: String? = nil, sessionTimeoutHours: Int? = nil, splitTunnel: Bool? = nil, status: ClientVpnEndpointStatus? = nil, tags: [Tag]? = nil, transportProtocol: TransportProtocol? = nil, vpcId: String? = nil, vpnPort: Int? = nil, vpnProtocol: VpnProtocol? = nil) { self.associatedTargetNetworks = nil self.authenticationOptions = authenticationOptions self.clientCidrBlock = clientCidrBlock @@ -8789,6 +8800,7 @@ extension EC2 { self.creationTime = creationTime self.deletionTime = deletionTime self.description = description + self.disconnectOnSessionTimeout = disconnectOnSessionTimeout self.dnsName = dnsName self.dnsServers = dnsServers self.securityGroupIds = securityGroupIds @@ -8806,7 +8818,7 @@ extension EC2 { @available(*, deprecated, message: "Members associatedTargetNetworks have been deprecated") @inlinable - public init(associatedTargetNetworks: [AssociatedTargetNetwork]? = nil, authenticationOptions: [ClientVpnAuthentication]? = nil, clientCidrBlock: String? = nil, clientConnectOptions: ClientConnectResponseOptions? = nil, clientLoginBannerOptions: ClientLoginBannerResponseOptions? = nil, clientVpnEndpointId: String? = nil, connectionLogOptions: ConnectionLogResponseOptions? = nil, creationTime: String? = nil, deletionTime: String? = nil, description: String? = nil, dnsName: String? = nil, dnsServers: [String]? = nil, securityGroupIds: [String]? = nil, selfServicePortalUrl: String? = nil, serverCertificateArn: String? = nil, sessionTimeoutHours: Int? = nil, splitTunnel: Bool? = nil, status: ClientVpnEndpointStatus? = nil, tags: [Tag]? = nil, transportProtocol: TransportProtocol? = nil, vpcId: String? = nil, vpnPort: Int? = nil, vpnProtocol: VpnProtocol? = nil) { + public init(associatedTargetNetworks: [AssociatedTargetNetwork]? = nil, authenticationOptions: [ClientVpnAuthentication]? = nil, clientCidrBlock: String? = nil, clientConnectOptions: ClientConnectResponseOptions? = nil, clientLoginBannerOptions: ClientLoginBannerResponseOptions? = nil, clientVpnEndpointId: String? = nil, connectionLogOptions: ConnectionLogResponseOptions? = nil, creationTime: String? = nil, deletionTime: String? = nil, description: String? = nil, disconnectOnSessionTimeout: Bool? = nil, dnsName: String? = nil, dnsServers: [String]? = nil, securityGroupIds: [String]? = nil, selfServicePortalUrl: String? = nil, serverCertificateArn: String? = nil, sessionTimeoutHours: Int? = nil, splitTunnel: Bool? = nil, status: ClientVpnEndpointStatus? = nil, tags: [Tag]? = nil, transportProtocol: TransportProtocol? = nil, vpcId: String? = nil, vpnPort: Int? = nil, vpnProtocol: VpnProtocol? = nil) { self.associatedTargetNetworks = associatedTargetNetworks self.authenticationOptions = authenticationOptions self.clientCidrBlock = clientCidrBlock @@ -8817,6 +8829,7 @@ extension EC2 { self.creationTime = creationTime self.deletionTime = deletionTime self.description = description + self.disconnectOnSessionTimeout = disconnectOnSessionTimeout self.dnsName = dnsName self.dnsServers = dnsServers self.securityGroupIds = securityGroupIds @@ -8843,6 +8856,7 @@ extension EC2 { case creationTime = "creationTime" case deletionTime = "deletionTime" case description = "description" + case disconnectOnSessionTimeout = "disconnectOnSessionTimeout" case dnsName = "dnsName" case dnsServers = "dnsServer" case securityGroupIds = "securityGroupIdSet" @@ -10077,6 +10091,8 @@ extension EC2 { public let connectionLogOptions: ConnectionLogOptions? /// A brief description of the Client VPN endpoint. public let description: String? + /// Indicates whether the client VPN session is disconnected after the maximum timeout specified in SessionTimeoutHours is reached. If true, users are prompted to reconnect client VPN. If false, client VPN attempts to reconnect automatically. The default value is false. + public let disconnectOnSessionTimeout: Bool? /// Information about the DNS servers to be used for DNS resolution. A Client VPN endpoint can /// have up to two DNS servers. If no DNS server is specified, the DNS address configured on the device is used for the DNS server. @OptionalCustomCoding> @@ -10107,7 +10123,7 @@ extension EC2 { public let vpnPort: Int? @inlinable - public init(authenticationOptions: [ClientVpnAuthenticationRequest]? = nil, clientCidrBlock: String? = nil, clientConnectOptions: ClientConnectOptions? = nil, clientLoginBannerOptions: ClientLoginBannerOptions? = nil, clientToken: String? = CreateClientVpnEndpointRequest.idempotencyToken(), connectionLogOptions: ConnectionLogOptions? = nil, description: String? = nil, dnsServers: [String]? = nil, dryRun: Bool? = nil, securityGroupIds: [String]? = nil, selfServicePortal: SelfServicePortal? = nil, serverCertificateArn: String? = nil, sessionTimeoutHours: Int? = nil, splitTunnel: Bool? = nil, tagSpecifications: [TagSpecification]? = nil, transportProtocol: TransportProtocol? = nil, vpcId: String? = nil, vpnPort: Int? = nil) { + public init(authenticationOptions: [ClientVpnAuthenticationRequest]? = nil, clientCidrBlock: String? = nil, clientConnectOptions: ClientConnectOptions? = nil, clientLoginBannerOptions: ClientLoginBannerOptions? = nil, clientToken: String? = CreateClientVpnEndpointRequest.idempotencyToken(), connectionLogOptions: ConnectionLogOptions? = nil, description: String? = nil, disconnectOnSessionTimeout: Bool? = nil, dnsServers: [String]? = nil, dryRun: Bool? = nil, securityGroupIds: [String]? = nil, selfServicePortal: SelfServicePortal? = nil, serverCertificateArn: String? = nil, sessionTimeoutHours: Int? = nil, splitTunnel: Bool? = nil, tagSpecifications: [TagSpecification]? = nil, transportProtocol: TransportProtocol? = nil, vpcId: String? = nil, vpnPort: Int? = nil) { self.authenticationOptions = authenticationOptions self.clientCidrBlock = clientCidrBlock self.clientConnectOptions = clientConnectOptions @@ -10115,6 +10131,7 @@ extension EC2 { self.clientToken = clientToken self.connectionLogOptions = connectionLogOptions self.description = description + self.disconnectOnSessionTimeout = disconnectOnSessionTimeout self.dnsServers = dnsServers self.dryRun = dryRun self.securityGroupIds = securityGroupIds @@ -10136,6 +10153,7 @@ extension EC2 { case clientToken = "ClientToken" case connectionLogOptions = "ConnectionLogOptions" case description = "Description" + case disconnectOnSessionTimeout = "DisconnectOnSessionTimeout" case dnsServers = "DnsServers" case dryRun = "DryRun" case securityGroupIds = "SecurityGroupId" @@ -43070,6 +43088,8 @@ extension EC2 { public let connectionLogOptions: ConnectionLogOptions? /// A brief description of the Client VPN endpoint. public let description: String? + /// Indicates whether the client VPN session is disconnected after the maximum timeout specified in sessionTimeoutHours is reached. If true, users are prompted to reconnect client VPN. If false, client VPN attempts to reconnect automatically. The default value is false. + public let disconnectOnSessionTimeout: Bool? /// Information about the DNS servers to be used by Client VPN connections. A Client VPN endpoint can have /// up to two DNS servers. public let dnsServers: DnsServersOptionsModifyStructure? @@ -43093,12 +43113,13 @@ extension EC2 { public let vpnPort: Int? @inlinable - public init(clientConnectOptions: ClientConnectOptions? = nil, clientLoginBannerOptions: ClientLoginBannerOptions? = nil, clientVpnEndpointId: String? = nil, connectionLogOptions: ConnectionLogOptions? = nil, description: String? = nil, dnsServers: DnsServersOptionsModifyStructure? = nil, dryRun: Bool? = nil, securityGroupIds: [String]? = nil, selfServicePortal: SelfServicePortal? = nil, serverCertificateArn: String? = nil, sessionTimeoutHours: Int? = nil, splitTunnel: Bool? = nil, vpcId: String? = nil, vpnPort: Int? = nil) { + public init(clientConnectOptions: ClientConnectOptions? = nil, clientLoginBannerOptions: ClientLoginBannerOptions? = nil, clientVpnEndpointId: String? = nil, connectionLogOptions: ConnectionLogOptions? = nil, description: String? = nil, disconnectOnSessionTimeout: Bool? = nil, dnsServers: DnsServersOptionsModifyStructure? = nil, dryRun: Bool? = nil, securityGroupIds: [String]? = nil, selfServicePortal: SelfServicePortal? = nil, serverCertificateArn: String? = nil, sessionTimeoutHours: Int? = nil, splitTunnel: Bool? = nil, vpcId: String? = nil, vpnPort: Int? = nil) { self.clientConnectOptions = clientConnectOptions self.clientLoginBannerOptions = clientLoginBannerOptions self.clientVpnEndpointId = clientVpnEndpointId self.connectionLogOptions = connectionLogOptions self.description = description + self.disconnectOnSessionTimeout = disconnectOnSessionTimeout self.dnsServers = dnsServers self.dryRun = dryRun self.securityGroupIds = securityGroupIds @@ -43116,6 +43137,7 @@ extension EC2 { case clientVpnEndpointId = "ClientVpnEndpointId" case connectionLogOptions = "ConnectionLogOptions" case description = "Description" + case disconnectOnSessionTimeout = "DisconnectOnSessionTimeout" case dnsServers = "DnsServers" case dryRun = "DryRun" case securityGroupIds = "SecurityGroupId" diff --git a/Sources/Soto/Services/ECR/ECR_api.swift b/Sources/Soto/Services/ECR/ECR_api.swift index f03150ceed..8bbcefa2d3 100644 --- a/Sources/Soto/Services/ECR/ECR_api.swift +++ b/Sources/Soto/Services/ECR/ECR_api.swift @@ -93,6 +93,7 @@ public struct ECR: AWSService { "ap-southeast-3": "api.ecr.ap-southeast-3.amazonaws.com", "ap-southeast-4": "api.ecr.ap-southeast-4.amazonaws.com", "ap-southeast-5": "api.ecr.ap-southeast-5.amazonaws.com", + "ap-southeast-7": "api.ecr.ap-southeast-7.amazonaws.com", "ca-central-1": "api.ecr.ca-central-1.amazonaws.com", "ca-west-1": "api.ecr.ca-west-1.amazonaws.com", "cn-north-1": "api.ecr.cn-north-1.amazonaws.com.cn", @@ -108,6 +109,7 @@ public struct ECR: AWSService { "il-central-1": "api.ecr.il-central-1.amazonaws.com", "me-central-1": "api.ecr.me-central-1.amazonaws.com", "me-south-1": "api.ecr.me-south-1.amazonaws.com", + "mx-central-1": "api.ecr.mx-central-1.amazonaws.com", "sa-east-1": "api.ecr.sa-east-1.amazonaws.com", "us-east-1": "api.ecr.us-east-1.amazonaws.com", "us-east-2": "api.ecr.us-east-2.amazonaws.com", @@ -123,33 +125,53 @@ public struct ECR: AWSService { /// FIPS and dualstack endpoints static var variantEndpoints: [EndpointVariantType: AWSServiceConfig.EndpointVariant] {[ + [.dualstack]: .init(endpoints: [ + "af-south-1": "ecr.af-south-1.api.aws", + "ap-east-1": "ecr.ap-east-1.api.aws", + "ap-northeast-1": "ecr.ap-northeast-1.api.aws", + "ap-northeast-2": "ecr.ap-northeast-2.api.aws", + "ap-northeast-3": "ecr.ap-northeast-3.api.aws", + "ap-south-1": "ecr.ap-south-1.api.aws", + "ap-south-2": "ecr.ap-south-2.api.aws", + "ap-southeast-1": "ecr.ap-southeast-1.api.aws", + "ap-southeast-2": "ecr.ap-southeast-2.api.aws", + "ap-southeast-3": "ecr.ap-southeast-3.api.aws", + "ap-southeast-4": "ecr.ap-southeast-4.api.aws", + "ap-southeast-5": "ecr.ap-southeast-5.api.aws", + "ap-southeast-7": "ecr.ap-southeast-7.api.aws", + "ca-central-1": "ecr.ca-central-1.api.aws", + "ca-west-1": "ecr.ca-west-1.api.aws", + "cn-north-1": "ecr.cn-north-1.api.amazonwebservices.com.cn", + "cn-northwest-1": "ecr.cn-northwest-1.api.amazonwebservices.com.cn", + "eu-central-1": "ecr.eu-central-1.api.aws", + "eu-central-2": "ecr.eu-central-2.api.aws", + "eu-north-1": "ecr.eu-north-1.api.aws", + "eu-south-1": "ecr.eu-south-1.api.aws", + "eu-south-2": "ecr.eu-south-2.api.aws", + "eu-west-1": "ecr.eu-west-1.api.aws", + "eu-west-2": "ecr.eu-west-2.api.aws", + "eu-west-3": "ecr.eu-west-3.api.aws", + "il-central-1": "ecr.il-central-1.api.aws", + "me-central-1": "ecr.me-central-1.api.aws", + "me-south-1": "ecr.me-south-1.api.aws", + "mx-central-1": "ecr.mx-central-1.api.aws", + "sa-east-1": "ecr.sa-east-1.api.aws", + "us-east-1": "ecr.us-east-1.api.aws", + "us-east-2": "ecr.us-east-2.api.aws", + "us-gov-east-1": "ecr.us-gov-east-1.api.aws", + "us-gov-west-1": "ecr.us-gov-west-1.api.aws", + "us-west-1": "ecr.us-west-1.api.aws", + "us-west-2": "ecr.us-west-2.api.aws" + ]), + [.dualstack, .fips]: .init(endpoints: [ + "us-east-1": "ecr-fips.us-east-1.api.aws", + "us-east-2": "ecr-fips.us-east-2.api.aws", + "us-gov-east-1": "ecr-fips.us-gov-east-1.api.aws", + "us-gov-west-1": "ecr-fips.us-gov-west-1.api.aws", + "us-west-1": "ecr-fips.us-west-1.api.aws", + "us-west-2": "ecr-fips.us-west-2.api.aws" + ]), [.fips]: .init(endpoints: [ - "af-south-1": "ecr-fips.af-south-1.amazonaws.com", - "ap-east-1": "ecr-fips.ap-east-1.amazonaws.com", - "ap-northeast-1": "ecr-fips.ap-northeast-1.amazonaws.com", - "ap-northeast-2": "ecr-fips.ap-northeast-2.amazonaws.com", - "ap-northeast-3": "ecr-fips.ap-northeast-3.amazonaws.com", - "ap-south-1": "ecr-fips.ap-south-1.amazonaws.com", - "ap-south-2": "ecr-fips.ap-south-2.amazonaws.com", - "ap-southeast-1": "ecr-fips.ap-southeast-1.amazonaws.com", - "ap-southeast-2": "ecr-fips.ap-southeast-2.amazonaws.com", - "ap-southeast-3": "ecr-fips.ap-southeast-3.amazonaws.com", - "ap-southeast-4": "ecr-fips.ap-southeast-4.amazonaws.com", - "ap-southeast-5": "ecr-fips.ap-southeast-5.amazonaws.com", - "ca-central-1": "ecr-fips.ca-central-1.amazonaws.com", - "ca-west-1": "ecr-fips.ca-west-1.amazonaws.com", - "eu-central-1": "ecr-fips.eu-central-1.amazonaws.com", - "eu-central-2": "ecr-fips.eu-central-2.amazonaws.com", - "eu-north-1": "ecr-fips.eu-north-1.amazonaws.com", - "eu-south-1": "ecr-fips.eu-south-1.amazonaws.com", - "eu-south-2": "ecr-fips.eu-south-2.amazonaws.com", - "eu-west-1": "ecr-fips.eu-west-1.amazonaws.com", - "eu-west-2": "ecr-fips.eu-west-2.amazonaws.com", - "eu-west-3": "ecr-fips.eu-west-3.amazonaws.com", - "il-central-1": "ecr-fips.il-central-1.amazonaws.com", - "me-central-1": "ecr-fips.me-central-1.amazonaws.com", - "me-south-1": "ecr-fips.me-south-1.amazonaws.com", - "sa-east-1": "ecr-fips.sa-east-1.amazonaws.com", "us-east-1": "ecr-fips.us-east-1.amazonaws.com", "us-east-2": "ecr-fips.us-east-2.amazonaws.com", "us-gov-east-1": "ecr-fips.us-gov-east-1.amazonaws.com", @@ -917,7 +939,7 @@ public struct ECR: AWSService { return try await self.describeRepositoryCreationTemplates(input, logger: logger) } - /// Retrieves the basic scan type version name. + /// Retrieves the account setting value for the specified setting name. @Sendable @inlinable public func getAccountSetting(_ input: GetAccountSettingRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetAccountSettingResponse { @@ -930,10 +952,10 @@ public struct ECR: AWSService { logger: logger ) } - /// Retrieves the basic scan type version name. + /// Retrieves the account setting value for the specified setting name. /// /// Parameters: - /// - name: Basic scan type version name. + /// - name: The name of the account setting, such as BASIC_SCAN_TYPE_VERSION or REGISTRY_POLICY_SCOPE. /// - logger: Logger use during operation @inlinable public func getAccountSetting( @@ -1269,7 +1291,7 @@ public struct ECR: AWSService { return try await self.listTagsForResource(input, logger: logger) } - /// Allows you to change the basic scan type version by setting the name parameter to either CLAIR to AWS_NATIVE. + /// Allows you to change the basic scan type version or registry policy scope. @Sendable @inlinable public func putAccountSetting(_ input: PutAccountSettingRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> PutAccountSettingResponse { @@ -1282,11 +1304,11 @@ public struct ECR: AWSService { logger: logger ) } - /// Allows you to change the basic scan type version by setting the name parameter to either CLAIR to AWS_NATIVE. + /// Allows you to change the basic scan type version or registry policy scope. /// /// Parameters: - /// - name: Basic scan type version name. - /// - value: Setting value that determines what basic scan type is being used: AWS_NATIVE or CLAIR. + /// - name: The name of the account setting, such as BASIC_SCAN_TYPE_VERSION or REGISTRY_POLICY_SCOPE. + /// - value: Setting value that is specified. The following are valid values for the basic scan type being used: AWS_NATIVE or CLAIR. The following are valid values for the registry policy scope being used: V1 or V2. /// - logger: Logger use during operation @inlinable public func putAccountSetting( diff --git a/Sources/Soto/Services/ECR/ECR_shapes.swift b/Sources/Soto/Services/ECR/ECR_shapes.swift index 642b767edc..0b369e7e58 100644 --- a/Sources/Soto/Services/ECR/ECR_shapes.swift +++ b/Sources/Soto/Services/ECR/ECR_shapes.swift @@ -1564,7 +1564,7 @@ extension ECR { } public struct GetAccountSettingRequest: AWSEncodableShape { - /// Basic scan type version name. + /// The name of the account setting, such as BASIC_SCAN_TYPE_VERSION or REGISTRY_POLICY_SCOPE. public let name: String @inlinable @@ -1583,9 +1583,9 @@ extension ECR { } public struct GetAccountSettingResponse: AWSDecodableShape { - /// Retrieves the basic scan type version name. + /// Retrieves the name of the account setting. public let name: String? - /// Retrieves the value that specifies what basic scan type is being used: AWS_NATIVE or CLAIR. + /// The setting value for the setting name. The following are valid values for the basic scan type being used: AWS_NATIVE or CLAIR. The following are valid values for the registry policy scope being used: V1 or V2. public let value: String? @inlinable @@ -2538,9 +2538,9 @@ extension ECR { } public struct PutAccountSettingRequest: AWSEncodableShape { - /// Basic scan type version name. + /// The name of the account setting, such as BASIC_SCAN_TYPE_VERSION or REGISTRY_POLICY_SCOPE. public let name: String - /// Setting value that determines what basic scan type is being used: AWS_NATIVE or CLAIR. + /// Setting value that is specified. The following are valid values for the basic scan type being used: AWS_NATIVE or CLAIR. The following are valid values for the registry policy scope being used: V1 or V2. public let value: String @inlinable @@ -2561,9 +2561,9 @@ extension ECR { } public struct PutAccountSettingResponse: AWSDecodableShape { - /// Retrieves the the basic scan type version name. + /// Retrieves the name of the account setting. public let name: String? - /// Retrieves the basic scan type value, either AWS_NATIVE or -. + /// Retrieves the value of the specified account setting. public let value: String? @inlinable diff --git a/Sources/Soto/Services/ECRPublic/ECRPublic_api.swift b/Sources/Soto/Services/ECRPublic/ECRPublic_api.swift index d07f3ae6d0..c65e9e0f92 100644 --- a/Sources/Soto/Services/ECRPublic/ECRPublic_api.swift +++ b/Sources/Soto/Services/ECRPublic/ECRPublic_api.swift @@ -68,6 +68,7 @@ public struct ECRPublic: AWSService { apiVersion: "2020-10-30", endpoint: endpoint, serviceEndpoints: Self.serviceEndpoints, + variantEndpoints: Self.variantEndpoints, errorType: ECRPublicErrorType.self, xmlNamespace: "http://ecr-public.amazonaws.com/doc/2020-12-02/", middleware: middleware, @@ -85,6 +86,12 @@ public struct ECRPublic: AWSService { ]} + /// FIPS and dualstack endpoints + static var variantEndpoints: [EndpointVariantType: AWSServiceConfig.EndpointVariant] {[ + [.dualstack]: .init(endpoints: [ + "us-east-1": "ecr-public.us-east-1.api.aws" + ]) + ]} // MARK: API Calls diff --git a/Sources/Soto/Services/ECS/ECS_api.swift b/Sources/Soto/Services/ECS/ECS_api.swift index 9adcb4d539..4249ebd677 100644 --- a/Sources/Soto/Services/ECS/ECS_api.swift +++ b/Sources/Soto/Services/ECS/ECS_api.swift @@ -1541,7 +1541,7 @@ public struct ECS: AWSService { } /// This operation lists all the service deployments that meet the specified filter - /// criteria. A service deployment happens when you release a softwre update for the service. You + /// criteria. A service deployment happens when you release a software update for the service. You /// route traffic from the running service revisions to the new service revison and control /// the number of running tasks. This API returns the values that you use for the request parameters in DescribeServiceRevisions. @Sendable @@ -1557,7 +1557,7 @@ public struct ECS: AWSService { ) } /// This operation lists all the service deployments that meet the specified filter - /// criteria. A service deployment happens when you release a softwre update for the service. You + /// criteria. A service deployment happens when you release a software update for the service. You /// route traffic from the running service revisions to the new service revison and control /// the number of running tasks. This API returns the values that you use for the request parameters in DescribeServiceRevisions. /// diff --git a/Sources/Soto/Services/ECS/ECS_shapes.swift b/Sources/Soto/Services/ECS/ECS_shapes.swift index f730fddc17..042fc102d3 100644 --- a/Sources/Soto/Services/ECS/ECS_shapes.swift +++ b/Sources/Soto/Services/ECS/ECS_shapes.swift @@ -653,11 +653,10 @@ extension ECS { public let assignPublicIp: AssignPublicIp? /// The IDs of the security groups associated with the task or service. If you don't /// specify a security group, the default security group for the VPC is used. There's a - /// limit of 5 security groups that can be specified per - /// awsvpcConfiguration. All specified security groups must be from the same VPC. + /// limit of 5 security groups that can be specified. All specified security groups must be from the same VPC. public let securityGroups: [String]? /// The IDs of the subnets associated with the task or service. There's a limit of 16 - /// subnets that can be specified per awsvpcConfiguration. All specified subnets must be from the same VPC. + /// subnets that can be specified. All specified subnets must be from the same VPC. public let subnets: [String] @inlinable @@ -1215,7 +1214,7 @@ extension ECS { /// isolation is achieved on the container instance using security groups and VPC /// settings. public let links: [String]? - /// Linux-specific modifications that are applied to the container, such as Linux kernel + /// Linux-specific modifications that are applied to the default Docker container configuration, such as Linux kernel /// capabilities. For more information see KernelCapabilities. This parameter is not supported for Windows containers. public let linuxParameters: LinuxParameters? /// The log configuration specification for the container. This parameter maps to LogConfig in the docker container create command diff --git a/Sources/Soto/Services/EFS/EFS_api.swift b/Sources/Soto/Services/EFS/EFS_api.swift index 7f6d1b8057..1df8c5a90a 100644 --- a/Sources/Soto/Services/EFS/EFS_api.swift +++ b/Sources/Soto/Services/EFS/EFS_api.swift @@ -92,6 +92,7 @@ public struct EFS: AWSService { "ap-southeast-3": "elasticfilesystem-fips.ap-southeast-3.amazonaws.com", "ap-southeast-4": "elasticfilesystem-fips.ap-southeast-4.amazonaws.com", "ap-southeast-5": "elasticfilesystem-fips.ap-southeast-5.amazonaws.com", + "ap-southeast-7": "elasticfilesystem-fips.ap-southeast-7.amazonaws.com", "ca-central-1": "elasticfilesystem-fips.ca-central-1.amazonaws.com", "ca-west-1": "elasticfilesystem-fips.ca-west-1.amazonaws.com", "cn-north-1": "elasticfilesystem-fips.cn-north-1.amazonaws.com.cn", @@ -107,6 +108,7 @@ public struct EFS: AWSService { "il-central-1": "elasticfilesystem-fips.il-central-1.amazonaws.com", "me-central-1": "elasticfilesystem-fips.me-central-1.amazonaws.com", "me-south-1": "elasticfilesystem-fips.me-south-1.amazonaws.com", + "mx-central-1": "elasticfilesystem-fips.mx-central-1.amazonaws.com", "sa-east-1": "elasticfilesystem-fips.sa-east-1.amazonaws.com", "us-east-1": "elasticfilesystem-fips.us-east-1.amazonaws.com", "us-east-2": "elasticfilesystem-fips.us-east-2.amazonaws.com", diff --git a/Sources/Soto/Services/EKS/EKS_api.swift b/Sources/Soto/Services/EKS/EKS_api.swift index d1a97599c4..b4ac1d9a50 100644 --- a/Sources/Soto/Services/EKS/EKS_api.swift +++ b/Sources/Soto/Services/EKS/EKS_api.swift @@ -92,6 +92,7 @@ public struct EKS: AWSService { "ap-southeast-3": "fips.eks.ap-southeast-3.amazonaws.com", "ap-southeast-4": "fips.eks.ap-southeast-4.amazonaws.com", "ap-southeast-5": "fips.eks.ap-southeast-5.amazonaws.com", + "ap-southeast-7": "fips.eks.ap-southeast-7.amazonaws.com", "ca-central-1": "fips.eks.ca-central-1.amazonaws.com", "ca-west-1": "fips.eks.ca-west-1.amazonaws.com", "eu-central-1": "fips.eks.eu-central-1.amazonaws.com", @@ -105,6 +106,7 @@ public struct EKS: AWSService { "il-central-1": "fips.eks.il-central-1.amazonaws.com", "me-central-1": "fips.eks.me-central-1.amazonaws.com", "me-south-1": "fips.eks.me-south-1.amazonaws.com", + "mx-central-1": "fips.eks.mx-central-1.amazonaws.com", "sa-east-1": "fips.eks.sa-east-1.amazonaws.com", "us-east-1": "fips.eks.us-east-1.amazonaws.com", "us-east-2": "fips.eks.us-east-2.amazonaws.com", @@ -247,9 +249,9 @@ public struct EKS: AWSService { /// - clientRequestToken: A unique, case-sensitive identifier that you provide to ensure /// - clusterName: The name of your cluster. /// - kubernetesGroups: The value for name that you've specified for kind: Group as a subject in a Kubernetes RoleBinding or ClusterRoleBinding object. Amazon EKS doesn't confirm that the value for name exists in any bindings on your cluster. You can specify one or more names. Kubernetes authorizes the principalArn of the access entry to access any cluster objects that you've specified in a Kubernetes Role or ClusterRole object that is also specified in a binding's roleRef. For more information about creating Kubernetes RoleBinding, ClusterRoleBinding, Role, or ClusterRole objects, see Using RBAC Authorization in the Kubernetes documentation. If you want Amazon EKS to authorize the principalArn (instead of, or in addition to Kubernetes authorizing the principalArn), you can associate one or more access policies to the access entry using AssociateAccessPolicy. If you associate any access policies, the principalARN has all permissions assigned in the associated access policies and all permissions in any Kubernetes Role or ClusterRole objects that the group names are bound to. - /// - principalArn: The ARN of the IAM principal for the AccessEntry. You can specify one ARN for each access entry. You can't specify the same ARN in more than one access entry. This value can't be changed after access entry creation. The valid principals differ depending on the type of the access entry in the type field. The only valid ARN is IAM roles for the types of access entries for nodes: . You can use every IAM principal type for STANDARD access entries. You can't use the STS session principal type with access entries because this is a temporary principal for each session and not a permanent identity that can be assigned permissions. IAM best practices recommend using IAM roles with temporary credentials, rather than IAM users with long-term credentials. + /// - principalArn: The ARN of the IAM principal for the AccessEntry. You can specify one ARN for each access entry. You can't specify the same ARN in more than one access entry. This value can't be changed after access entry creation. The valid principals differ depending on the type of the access entry in the type field. For STANDARD access entries, you can use every IAM principal type. For nodes (EC2 (for EKS Auto Mode), EC2_LINUX, EC2_WINDOWS, FARGATE_LINUX, and HYBRID_LINUX), the only valid ARN is IAM roles. You can't use the STS session principal type with access entries because this is a temporary principal for each session and not a permanent identity that can be assigned permissions. IAM best practices recommend using IAM roles with temporary credentials, rather than IAM users with long-term credentials. /// - tags: Metadata that assists with categorization and organization. Each tag consists of a key and an optional value. You define both. Tags don't propagate to any other cluster or Amazon Web Services resources. - /// - type: The type of the new access entry. Valid values are Standard, FARGATE_LINUX, EC2_LINUX, and EC2_WINDOWS. If the principalArn is for an IAM role that's used for self-managed Amazon EC2 nodes, specify EC2_LINUX or EC2_WINDOWS. Amazon EKS grants the necessary permissions to the node for you. If the principalArn is for any other purpose, specify STANDARD. If you don't specify a value, Amazon EKS sets the value to STANDARD. It's unnecessary to create access entries for IAM roles used with Fargate profiles or managed Amazon EC2 nodes, because Amazon EKS creates entries in the aws-auth ConfigMap for the roles. You can't change this value once you've created the access entry. If you set the value to EC2_LINUX or EC2_WINDOWS, you can't specify values for kubernetesGroups, or associate an AccessPolicy to the access entry. + /// - type: The type of the new access entry. Valid values are STANDARD, FARGATE_LINUX, EC2_LINUX, EC2_WINDOWS, EC2 (for EKS Auto Mode), HYBRID_LINUX, and HYPERPOD_LINUX. If the principalArn is for an IAM role that's used for self-managed Amazon EC2 nodes, specify EC2_LINUX or EC2_WINDOWS. Amazon EKS grants the necessary permissions to the node for you. If the principalArn is for any other purpose, specify STANDARD. If you don't specify a value, Amazon EKS sets the value to STANDARD. If you have the access mode of the cluster set to API_AND_CONFIG_MAP, it's unnecessary to create access entries for IAM roles used with Fargate profiles or managed Amazon EC2 nodes, because Amazon EKS creates entries in the aws-auth ConfigMap for the roles. You can't change this value once you've created the access entry. If you set the value to EC2_LINUX or EC2_WINDOWS, you can't specify values for kubernetesGroups, or associate an AccessPolicy to the access entry. /// - username: The username to authenticate to Kubernetes with. We recommend not specifying a username and letting Amazon EKS specify it for you. For more information about the value Amazon EKS specifies for you, or constraints before specifying your own username, see Creating access entries in the Amazon EKS User Guide. /// - logger: Logger use during operation @inlinable @@ -296,7 +298,7 @@ public struct EKS: AWSService { /// - clientRequestToken: A unique, case-sensitive identifier that you provide to ensure /// - clusterName: The name of your cluster. /// - configurationValues: The set of configuration values for the add-on that's created. The values that you provide are validated against the schema returned by DescribeAddonConfiguration. - /// - podIdentityAssociations: An array of Pod Identity Assocations to be created. Each EKS Pod Identity association maps a Kubernetes service account to an IAM Role. For more information, see Attach an IAM Role to an Amazon EKS add-on using Pod Identity in the EKS User Guide. + /// - podIdentityAssociations: An array of Pod Identity Assocations to be created. Each EKS Pod Identity association maps a Kubernetes service account to an IAM Role. For more information, see Attach an IAM Role to an Amazon EKS add-on using Pod Identity in the Amazon EKS User Guide. /// - resolveConflicts: How to resolve field value conflicts for an Amazon EKS add-on. Conflicts are handled based on the value you choose: None – If the self-managed version of the add-on is installed on your cluster, Amazon EKS doesn't change the value. Creation of the add-on might fail. Overwrite – If the self-managed version of the add-on is installed on your cluster and the Amazon EKS default value is different than the existing value, Amazon EKS changes the value to the Amazon EKS default value. Preserve – This is similar to the NONE option. If the self-managed version of the add-on is installed on your cluster Amazon EKS doesn't change the add-on resource properties. Creation of the add-on might fail if conflicts are detected. This option works differently during the update operation. For more information, see UpdateAddon. If you don't currently have the self-managed version of the add-on installed on your cluster, the Amazon EKS add-on is installed. Amazon EKS sets all values to default values, regardless of the option that you specify. /// - serviceAccountRoleArn: The Amazon Resource Name (ARN) of an existing IAM role to bind to the add-on's service account. The role must be assigned the IAM permissions required by the add-on. If you don't specify an existing IAM role, then the add-on uses the permissions assigned to the node IAM role. For more information, see Amazon EKS node IAM role in the Amazon EKS User Guide. To specify an existing IAM role, you must have an IAM OpenID Connect (OIDC) provider created for your cluster. For more information, see Enabling IAM roles for service accounts on your cluster in the Amazon EKS User Guide. /// - tags: Metadata that assists with categorization and organization. Each tag consists of a key and an optional value. You define both. Tags don't propagate to any other cluster or Amazon Web Services resources. @@ -350,7 +352,7 @@ public struct EKS: AWSService { /// - computeConfig: Enable or disable the compute capability of EKS Auto Mode when creating your EKS Auto Mode cluster. If the compute capability is enabled, EKS Auto Mode will create and delete EC2 Managed Instances in your Amazon Web Services account /// - encryptionConfig: The encryption configuration for the cluster. /// - kubernetesNetworkConfig: The Kubernetes network configuration for the cluster. - /// - logging: Enable or disable exporting the Kubernetes control plane logs for your cluster to CloudWatch Logs. By default, cluster control plane logs aren't exported to CloudWatch Logs. For more information, see Amazon EKS Cluster control plane logs in the Amazon EKS User Guide . CloudWatch Logs ingestion, archive storage, and data scanning rates apply to exported control plane logs. For more information, see CloudWatch Pricing. + /// - logging: Enable or disable exporting the Kubernetes control plane logs for your cluster to CloudWatch Logs . By default, cluster control plane logs aren't exported to CloudWatch Logs . For more information, see Amazon EKS Cluster control plane logs in the Amazon EKS User Guide . CloudWatch Logs ingestion, archive storage, and data scanning rates apply to exported control plane logs. For more information, see CloudWatch Pricing. /// - name: The unique name to give to your cluster. The name can contain only alphanumeric characters (case-sensitive), /// - outpostConfig: An object representing the configuration of your local Amazon EKS cluster on an Amazon Web Services Outpost. Before creating a local cluster on an Outpost, review Local clusters for Amazon EKS on Amazon Web Services Outposts in the Amazon EKS User Guide. This object isn't available for creating Amazon EKS clusters on the Amazon Web Services cloud. /// - remoteNetworkConfig: The configuration in the cluster for EKS Hybrid Nodes. You can't change or update this configuration after the cluster is created. @@ -360,7 +362,7 @@ public struct EKS: AWSService { /// - tags: Metadata that assists with categorization and organization. Each tag consists of a key and an optional value. You define both. Tags don't propagate to any other cluster or Amazon Web Services resources. /// - upgradePolicy: New clusters, by default, have extended support enabled. You can disable extended support when creating a cluster by setting this value to STANDARD. /// - version: The desired Kubernetes version for your cluster. If you don't specify a value here, the default version available in Amazon EKS is used. The default version might not be the latest version available. - /// - zonalShiftConfig: Enable or disable ARC zonal shift for the cluster. If zonal shift is enabled, Amazon Web Services configures zonal autoshift for the cluster. Zonal shift is a feature of Amazon Application Recovery Controller (ARC). ARC zonal shift is designed to be a temporary measure that allows you to move traffic for a resource away from an impaired AZ until the zonal shift expires or you cancel it. You can extend the zonal shift if necessary. You can start a zonal shift for an EKS cluster, or you can allow Amazon Web Services to do it for you by enabling zonal autoshift. This shift updates the flow of east-to-west network traffic in your cluster to only consider network endpoints for Pods running on worker nodes in healthy AZs. Additionally, any ALB or NLB handling ingress traffic for applications in your EKS cluster will automatically route traffic to targets in the healthy AZs. For more information about zonal shift in EKS, see Learn about Amazon Application Recovery Controller (ARC) Zonal Shift in Amazon EKS in the Amazon EKS User Guide . + /// - zonalShiftConfig: Enable or disable ARC zonal shift for the cluster. If zonal shift is enabled, Amazon Web Services configures zonal autoshift for the cluster. Zonal shift is a feature of Amazon Application Recovery Controller (ARC). ARC zonal shift is designed to be a temporary measure that allows you to move traffic for a resource away from an impaired AZ until the zonal shift expires or you cancel it. You can extend the zonal shift if necessary. You can start a zonal shift for an Amazon EKS cluster, or you can allow Amazon Web Services to do it for you by enabling zonal autoshift. This shift updates the flow of east-to-west network traffic in your cluster to only consider network endpoints for Pods running on worker nodes in healthy AZs. Additionally, any ALB or NLB handling ingress traffic for applications in your Amazon EKS cluster will automatically route traffic to targets in the healthy AZs. For more information about zonal shift in EKS, see Learn about Amazon Application Recovery Controller (ARC) Zonal Shift in Amazon EKS in the Amazon EKS User Guide . /// - logger: Logger use during operation @inlinable public func createCluster( @@ -452,7 +454,7 @@ public struct EKS: AWSService { return try await self.createEksAnywhereSubscription(input, logger: logger) } - /// Creates an Fargate profile for your Amazon EKS cluster. You must have at least one Fargate profile in a cluster to be able to run pods on Fargate. The Fargate profile allows an administrator to declare which pods run on Fargate and specify which pods run on which Fargate profile. This declaration is done through the profile’s selectors. Each profile can have up to five selectors that contain a namespace and labels. A namespace is required for every selector. The label field consists of multiple optional key-value pairs. Pods that match the selectors are scheduled on Fargate. If a to-be-scheduled pod matches any of the selectors in the Fargate profile, then that pod is run on Fargate. When you create a Fargate profile, you must specify a pod execution role to use with the pods that are scheduled with the profile. This role is added to the cluster's Kubernetes Role Based Access Control (RBAC) for authorization so that the kubelet that is running on the Fargate infrastructure can register with your Amazon EKS cluster so that it can appear in your cluster as a node. The pod execution role also provides IAM permissions to the Fargate infrastructure to allow read access to Amazon ECR image repositories. For more information, see Pod Execution Role in the Amazon EKS User Guide. Fargate profiles are immutable. However, you can create a new updated profile to replace an existing profile and then delete the original after the updated profile has finished creating. If any Fargate profiles in a cluster are in the DELETING status, you must wait for that Fargate profile to finish deleting before you can create any other profiles in that cluster. For more information, see Fargate profile in the Amazon EKS User Guide. + /// Creates an Fargate profile for your Amazon EKS cluster. You must have at least one Fargate profile in a cluster to be able to run pods on Fargate. The Fargate profile allows an administrator to declare which pods run on Fargate and specify which pods run on which Fargate profile. This declaration is done through the profile's selectors. Each profile can have up to five selectors that contain a namespace and labels. A namespace is required for every selector. The label field consists of multiple optional key-value pairs. Pods that match the selectors are scheduled on Fargate. If a to-be-scheduled pod matches any of the selectors in the Fargate profile, then that pod is run on Fargate. When you create a Fargate profile, you must specify a pod execution role to use with the pods that are scheduled with the profile. This role is added to the cluster's Kubernetes Role Based Access Control (RBAC) for authorization so that the kubelet that is running on the Fargate infrastructure can register with your Amazon EKS cluster so that it can appear in your cluster as a node. The pod execution role also provides IAM permissions to the Fargate infrastructure to allow read access to Amazon ECR image repositories. For more information, see Pod Execution Role in the Amazon EKS User Guide. Fargate profiles are immutable. However, you can create a new updated profile to replace an existing profile and then delete the original after the updated profile has finished creating. If any Fargate profiles in a cluster are in the DELETING status, you must wait for that Fargate profile to finish deleting before you can create any other profiles in that cluster. For more information, see Fargate profile in the Amazon EKS User Guide. @Sendable @inlinable public func createFargateProfile(_ input: CreateFargateProfileRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateFargateProfileResponse { @@ -465,7 +467,7 @@ public struct EKS: AWSService { logger: logger ) } - /// Creates an Fargate profile for your Amazon EKS cluster. You must have at least one Fargate profile in a cluster to be able to run pods on Fargate. The Fargate profile allows an administrator to declare which pods run on Fargate and specify which pods run on which Fargate profile. This declaration is done through the profile’s selectors. Each profile can have up to five selectors that contain a namespace and labels. A namespace is required for every selector. The label field consists of multiple optional key-value pairs. Pods that match the selectors are scheduled on Fargate. If a to-be-scheduled pod matches any of the selectors in the Fargate profile, then that pod is run on Fargate. When you create a Fargate profile, you must specify a pod execution role to use with the pods that are scheduled with the profile. This role is added to the cluster's Kubernetes Role Based Access Control (RBAC) for authorization so that the kubelet that is running on the Fargate infrastructure can register with your Amazon EKS cluster so that it can appear in your cluster as a node. The pod execution role also provides IAM permissions to the Fargate infrastructure to allow read access to Amazon ECR image repositories. For more information, see Pod Execution Role in the Amazon EKS User Guide. Fargate profiles are immutable. However, you can create a new updated profile to replace an existing profile and then delete the original after the updated profile has finished creating. If any Fargate profiles in a cluster are in the DELETING status, you must wait for that Fargate profile to finish deleting before you can create any other profiles in that cluster. For more information, see Fargate profile in the Amazon EKS User Guide. + /// Creates an Fargate profile for your Amazon EKS cluster. You must have at least one Fargate profile in a cluster to be able to run pods on Fargate. The Fargate profile allows an administrator to declare which pods run on Fargate and specify which pods run on which Fargate profile. This declaration is done through the profile's selectors. Each profile can have up to five selectors that contain a namespace and labels. A namespace is required for every selector. The label field consists of multiple optional key-value pairs. Pods that match the selectors are scheduled on Fargate. If a to-be-scheduled pod matches any of the selectors in the Fargate profile, then that pod is run on Fargate. When you create a Fargate profile, you must specify a pod execution role to use with the pods that are scheduled with the profile. This role is added to the cluster's Kubernetes Role Based Access Control (RBAC) for authorization so that the kubelet that is running on the Fargate infrastructure can register with your Amazon EKS cluster so that it can appear in your cluster as a node. The pod execution role also provides IAM permissions to the Fargate infrastructure to allow read access to Amazon ECR image repositories. For more information, see Pod Execution Role in the Amazon EKS User Guide. Fargate profiles are immutable. However, you can create a new updated profile to replace an existing profile and then delete the original after the updated profile has finished creating. If any Fargate profiles in a cluster are in the DELETING status, you must wait for that Fargate profile to finish deleting before you can create any other profiles in that cluster. For more information, see Fargate profile in the Amazon EKS User Guide. /// /// Parameters: /// - clientRequestToken: A unique, case-sensitive identifier that you provide to ensure @@ -1048,6 +1050,53 @@ public struct EKS: AWSService { return try await self.describeCluster(input, logger: logger) } + /// Lists available Kubernetes versions for Amazon EKS clusters. + @Sendable + @inlinable + public func describeClusterVersions(_ input: DescribeClusterVersionsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeClusterVersionsResponse { + try await self.client.execute( + operation: "DescribeClusterVersions", + path: "/cluster-versions", + httpMethod: .GET, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Lists available Kubernetes versions for Amazon EKS clusters. + /// + /// Parameters: + /// - clusterType: The type of cluster to filter versions by. + /// - clusterVersions: List of specific cluster versions to describe. + /// - defaultOnly: Filter to show only default versions. + /// - includeAll: Include all available versions in the response. + /// - maxResults: Maximum number of results to return. + /// - nextToken: Pagination token for the next set of results. + /// - status: Filter versions by their current status. + /// - logger: Logger use during operation + @inlinable + public func describeClusterVersions( + clusterType: String? = nil, + clusterVersions: [String]? = nil, + defaultOnly: Bool? = nil, + includeAll: Bool? = nil, + maxResults: Int? = nil, + nextToken: String? = nil, + status: ClusterVersionStatus? = nil, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> DescribeClusterVersionsResponse { + let input = DescribeClusterVersionsRequest( + clusterType: clusterType, + clusterVersions: clusterVersions, + defaultOnly: defaultOnly, + includeAll: includeAll, + maxResults: maxResults, + nextToken: nextToken, + status: status + ) + return try await self.describeClusterVersions(input, logger: logger) + } + /// Returns descriptive information about a subscription. @Sendable @inlinable @@ -1976,7 +2025,7 @@ public struct EKS: AWSService { /// - clientRequestToken: A unique, case-sensitive identifier that you provide to ensure /// - clusterName: The name of your cluster. /// - configurationValues: The set of configuration values for the add-on that's created. The values that you provide are validated against the schema returned by DescribeAddonConfiguration. - /// - podIdentityAssociations: An array of Pod Identity Assocations to be updated. Each EKS Pod Identity association maps a Kubernetes service account to an IAM Role. If this value is left blank, no change. If an empty array is provided, existing Pod Identity Assocations owned by the Addon are deleted. For more information, see Attach an IAM Role to an Amazon EKS add-on using Pod Identity in the EKS User Guide. + /// - podIdentityAssociations: An array of Pod Identity Assocations to be updated. Each EKS Pod Identity association maps a Kubernetes service account to an IAM Role. If this value is left blank, no change. If an empty array is provided, existing Pod Identity Assocations owned by the Addon are deleted. For more information, see Attach an IAM Role to an Amazon EKS add-on using Pod Identity in the Amazon EKS User Guide. /// - resolveConflicts: How to resolve field value conflicts for an Amazon EKS add-on if you've changed a value from the Amazon EKS default value. Conflicts are handled based on the option you choose: None – Amazon EKS doesn't change the value. The update might fail. Overwrite – Amazon EKS overwrites the changed value back to the Amazon EKS default value. Preserve – Amazon EKS preserves the value. If you choose this option, we recommend that you test any field and value changes on a non-production cluster before updating the add-on on your production cluster. /// - serviceAccountRoleArn: The Amazon Resource Name (ARN) of an existing IAM role to bind to the add-on's service account. The role must be assigned the IAM permissions required by the add-on. If you don't specify an existing IAM role, then the add-on uses the permissions assigned to the node IAM role. For more information, see Amazon EKS node IAM role in the Amazon EKS User Guide. To specify an existing IAM role, you must have an IAM OpenID Connect (OIDC) provider created for your cluster. For more information, see Enabling IAM roles for service accounts on your cluster in the Amazon EKS User Guide. /// - logger: Logger use during operation @@ -2025,7 +2074,7 @@ public struct EKS: AWSService { /// - clientRequestToken: A unique, case-sensitive identifier that you provide to ensure /// - computeConfig: Update the configuration of the compute capability of your EKS Auto Mode cluster. For example, enable the capability. /// - kubernetesNetworkConfig: - /// - logging: Enable or disable exporting the Kubernetes control plane logs for your cluster to CloudWatch Logs. By default, cluster control plane logs aren't exported to CloudWatch Logs. For more information, see Amazon EKS cluster control plane logs in the Amazon EKS User Guide . CloudWatch Logs ingestion, archive storage, and data scanning rates apply to exported control plane logs. For more information, see CloudWatch Pricing. + /// - logging: Enable or disable exporting the Kubernetes control plane logs for your cluster to CloudWatch Logs . By default, cluster control plane logs aren't exported to CloudWatch Logs . For more information, see Amazon EKS cluster control plane logs in the Amazon EKS User Guide . CloudWatch Logs ingestion, archive storage, and data scanning rates apply to exported control plane logs. For more information, see CloudWatch Pricing. /// - name: The name of the Amazon EKS cluster to update. /// - resourcesVpcConfig: /// - storageConfig: Update the configuration of the block storage capability of your EKS Auto Mode cluster. For example, enable the capability. @@ -2061,7 +2110,7 @@ public struct EKS: AWSService { return try await self.updateClusterConfig(input, logger: logger) } - /// Updates an Amazon EKS cluster to the specified Kubernetes version. Your cluster continues to function during the update. The response output includes an update ID that you can use to track the status of your cluster update with the DescribeUpdate API operation. Cluster updates are asynchronous, and they should finish within a few minutes. During an update, the cluster status moves to UPDATING (this status transition is eventually consistent). When the update is complete (either Failed or Successful), the cluster status moves to Active. If your cluster has managed node groups attached to it, all of your node groups’ Kubernetes versions must match the cluster’s Kubernetes version in order to update the cluster to a new Kubernetes version. + /// Updates an Amazon EKS cluster to the specified Kubernetes version. Your cluster continues to function during the update. The response output includes an update ID that you can use to track the status of your cluster update with the DescribeUpdate API operation. Cluster updates are asynchronous, and they should finish within a few minutes. During an update, the cluster status moves to UPDATING (this status transition is eventually consistent). When the update is complete (either Failed or Successful), the cluster status moves to Active. If your cluster has managed node groups attached to it, all of your node groups' Kubernetes versions must match the cluster's Kubernetes version in order to update the cluster to a new Kubernetes version. @Sendable @inlinable public func updateClusterVersion(_ input: UpdateClusterVersionRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateClusterVersionResponse { @@ -2074,7 +2123,7 @@ public struct EKS: AWSService { logger: logger ) } - /// Updates an Amazon EKS cluster to the specified Kubernetes version. Your cluster continues to function during the update. The response output includes an update ID that you can use to track the status of your cluster update with the DescribeUpdate API operation. Cluster updates are asynchronous, and they should finish within a few minutes. During an update, the cluster status moves to UPDATING (this status transition is eventually consistent). When the update is complete (either Failed or Successful), the cluster status moves to Active. If your cluster has managed node groups attached to it, all of your node groups’ Kubernetes versions must match the cluster’s Kubernetes version in order to update the cluster to a new Kubernetes version. + /// Updates an Amazon EKS cluster to the specified Kubernetes version. Your cluster continues to function during the update. The response output includes an update ID that you can use to track the status of your cluster update with the DescribeUpdate API operation. Cluster updates are asynchronous, and they should finish within a few minutes. During an update, the cluster status moves to UPDATING (this status transition is eventually consistent). When the update is complete (either Failed or Successful), the cluster status moves to Active. If your cluster has managed node groups attached to it, all of your node groups' Kubernetes versions must match the cluster's Kubernetes version in order to update the cluster to a new Kubernetes version. /// /// Parameters: /// - clientRequestToken: A unique, case-sensitive identifier that you provide to ensure @@ -2131,7 +2180,7 @@ public struct EKS: AWSService { return try await self.updateEksAnywhereSubscription(input, logger: logger) } - /// Updates an Amazon EKS managed node group configuration. Your node group continues to function during the update. The response output includes an update ID that you can use to track the status of your node group update with the DescribeUpdate API operation. Currently you can update the Kubernetes labels for a node group or the scaling configuration. + /// Updates an Amazon EKS managed node group configuration. Your node group continues to function during the update. The response output includes an update ID that you can use to track the status of your node group update with the DescribeUpdate API operation. You can update the Kubernetes labels and taints for a node group and the scaling and version update configuration. @Sendable @inlinable public func updateNodegroupConfig(_ input: UpdateNodegroupConfigRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateNodegroupConfigResponse { @@ -2144,7 +2193,7 @@ public struct EKS: AWSService { logger: logger ) } - /// Updates an Amazon EKS managed node group configuration. Your node group continues to function during the update. The response output includes an update ID that you can use to track the status of your node group update with the DescribeUpdate API operation. Currently you can update the Kubernetes labels for a node group or the scaling configuration. + /// Updates an Amazon EKS managed node group configuration. Your node group continues to function during the update. The response output includes an update ID that you can use to track the status of your node group update with the DescribeUpdate API operation. You can update the Kubernetes labels and taints for a node group and the scaling and version update configuration. /// /// Parameters: /// - clientRequestToken: A unique, case-sensitive identifier that you provide to ensure @@ -2329,6 +2378,55 @@ extension EKS { return self.describeAddonVersionsPaginator(input, logger: logger) } + /// Return PaginatorSequence for operation ``describeClusterVersions(_:logger:)``. + /// + /// - Parameters: + /// - input: Input for operation + /// - logger: Logger used for logging + @inlinable + public func describeClusterVersionsPaginator( + _ input: DescribeClusterVersionsRequest, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + return .init( + input: input, + command: self.describeClusterVersions, + inputKey: \DescribeClusterVersionsRequest.nextToken, + outputKey: \DescribeClusterVersionsResponse.nextToken, + logger: logger + ) + } + /// Return PaginatorSequence for operation ``describeClusterVersions(_:logger:)``. + /// + /// - Parameters: + /// - clusterType: The type of cluster to filter versions by. + /// - clusterVersions: List of specific cluster versions to describe. + /// - defaultOnly: Filter to show only default versions. + /// - includeAll: Include all available versions in the response. + /// - maxResults: Maximum number of results to return. + /// - status: Filter versions by their current status. + /// - logger: Logger used for logging + @inlinable + public func describeClusterVersionsPaginator( + clusterType: String? = nil, + clusterVersions: [String]? = nil, + defaultOnly: Bool? = nil, + includeAll: Bool? = nil, + maxResults: Int? = nil, + status: ClusterVersionStatus? = nil, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + let input = DescribeClusterVersionsRequest( + clusterType: clusterType, + clusterVersions: clusterVersions, + defaultOnly: defaultOnly, + includeAll: includeAll, + maxResults: maxResults, + status: status + ) + return self.describeClusterVersionsPaginator(input, logger: logger) + } + /// Return PaginatorSequence for operation ``listAccessEntries(_:logger:)``. /// /// - Parameters: @@ -2807,6 +2905,21 @@ extension EKS.DescribeAddonVersionsRequest: AWSPaginateToken { } } +extension EKS.DescribeClusterVersionsRequest: AWSPaginateToken { + @inlinable + public func usingPaginationToken(_ token: String) -> EKS.DescribeClusterVersionsRequest { + return .init( + clusterType: self.clusterType, + clusterVersions: self.clusterVersions, + defaultOnly: self.defaultOnly, + includeAll: self.includeAll, + maxResults: self.maxResults, + nextToken: token, + status: self.status + ) + } +} + extension EKS.ListAccessEntriesRequest: AWSPaginateToken { @inlinable public func usingPaginationToken(_ token: String) -> EKS.ListAccessEntriesRequest { diff --git a/Sources/Soto/Services/EKS/EKS_shapes.swift b/Sources/Soto/Services/EKS/EKS_shapes.swift index 1d59f812de..7f8ef6520d 100644 --- a/Sources/Soto/Services/EKS/EKS_shapes.swift +++ b/Sources/Soto/Services/EKS/EKS_shapes.swift @@ -130,6 +130,13 @@ extension EKS { public var description: String { return self.rawValue } } + public enum ClusterVersionStatus: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case extendedSupport = "extended-support" + case standardSupport = "standard-support" + case unsupported = "unsupported" + public var description: String { return self.rawValue } + } + public enum ConfigStatus: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case active = "ACTIVE" case creating = "CREATING" @@ -282,6 +289,12 @@ extension EKS { public var description: String { return self.rawValue } } + public enum NodegroupUpdateStrategies: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case `default` = "DEFAULT" + case minimal = "MINIMAL" + public var description: String { return self.rawValue } + } + public enum ResolveConflicts: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case none = "NONE" case overwrite = "OVERWRITE" @@ -334,6 +347,7 @@ extension EKS { case subnets = "Subnets" case taintsToAdd = "TaintsToAdd" case taintsToRemove = "TaintsToRemove" + case updateStrategy = "UpdateStrategy" case upgradePolicy = "UpgradePolicy" case version = "Version" case zonalShiftConfig = "ZonalShiftConfig" @@ -488,7 +502,7 @@ extension EKS { public let modifiedAt: Date? /// The owner of the add-on. public let owner: String? - /// An array of Pod Identity Assocations owned by the Addon. Each EKS Pod Identity association maps a role to a service account in a namespace in the cluster. For more information, see Attach an IAM Role to an Amazon EKS add-on using Pod Identity in the EKS User Guide. + /// An array of Pod Identity Assocations owned by the Addon. Each EKS Pod Identity association maps a role to a service account in a namespace in the cluster. For more information, see Attach an IAM Role to an Amazon EKS add-on using Pod Identity in the Amazon EKS User Guide. public let podIdentityAssociations: [String]? /// The publisher of the add-on. public let publisher: String? @@ -537,6 +551,24 @@ extension EKS { } } + public struct AddonCompatibilityDetail: AWSDecodableShape { + /// The list of compatible Amazon EKS add-on versions for the next Kubernetes version. + public let compatibleVersions: [String]? + /// The name of the Amazon EKS add-on. + public let name: String? + + @inlinable + public init(compatibleVersions: [String]? = nil, name: String? = nil) { + self.compatibleVersions = compatibleVersions + self.name = name + } + + private enum CodingKeys: String, CodingKey { + case compatibleVersions = "compatibleVersions" + case name = "name" + } + } + public struct AddonHealth: AWSDecodableShape { /// An object representing the health issues for an add-on. public let issues: [AddonIssue]? @@ -942,7 +974,7 @@ extension EKS { /// A unique, case-sensitive identifier that you provide to ensure /// the idempotency of the request. public let clientRequestToken: String? - /// Indicates the current configuration of the compute capability on your EKS Auto Mode cluster. For example, if the capability is enabled or disabled. If the compute capability is enabled, EKS Auto Mode will create and delete EC2 Managed Instances in your Amazon Web Services account. For more information, see EKS Auto Mode compute capability in the EKS User Guide. + /// Indicates the current configuration of the compute capability on your EKS Auto Mode cluster. For example, if the capability is enabled or disabled. If the compute capability is enabled, EKS Auto Mode will create and delete EC2 Managed Instances in your Amazon Web Services account. For more information, see EKS Auto Mode compute capability in the Amazon EKS User Guide. public let computeConfig: ComputeConfigResponse? /// The configuration used to connect to a cluster for registration. public let connectorConfig: ConnectorConfigResponse? @@ -976,11 +1008,11 @@ extension EKS { public let roleArn: String? /// The current status of the cluster. public let status: ClusterStatus? - /// Indicates the current configuration of the block storage capability on your EKS Auto Mode cluster. For example, if the capability is enabled or disabled. If the block storage capability is enabled, EKS Auto Mode will create and delete EBS volumes in your Amazon Web Services account. For more information, see EKS Auto Mode block storage capability in the EKS User Guide. + /// Indicates the current configuration of the block storage capability on your EKS Auto Mode cluster. For example, if the capability is enabled or disabled. If the block storage capability is enabled, EKS Auto Mode will create and delete EBS volumes in your Amazon Web Services account. For more information, see EKS Auto Mode block storage capability in the Amazon EKS User Guide. public let storageConfig: StorageConfigResponse? /// Metadata that assists with categorization and organization. Each tag consists of a key and an optional value. You define both. Tags don't propagate to any other cluster or Amazon Web Services resources. public let tags: [String: String]? - /// This value indicates if extended support is enabled or disabled for the cluster. Learn more about EKS Extended Support in the EKS User Guide. + /// This value indicates if extended support is enabled or disabled for the cluster. Learn more about EKS Extended Support in the Amazon EKS User Guide. public let upgradePolicy: UpgradePolicyResponse? /// The Kubernetes server version for the cluster. public let version: String? @@ -1083,6 +1115,52 @@ extension EKS { } } + public struct ClusterVersionInformation: AWSDecodableShape { + /// The type of cluster this version is for. + public let clusterType: String? + /// The Kubernetes version for the cluster. + public let clusterVersion: String? + /// Default platform version for this Kubernetes version. + public let defaultPlatformVersion: String? + /// Indicates if this is a default version. + public let defaultVersion: Bool? + /// Date when extended support ends for this version. + public let endOfExtendedSupportDate: Date? + /// Date when standard support ends for this version. + public let endOfStandardSupportDate: Date? + /// The patch version of Kubernetes for this cluster version. + public let kubernetesPatchVersion: String? + /// The release date of this cluster version. + public let releaseDate: Date? + /// Current status of this cluster version. + public let status: ClusterVersionStatus? + + @inlinable + public init(clusterType: String? = nil, clusterVersion: String? = nil, defaultPlatformVersion: String? = nil, defaultVersion: Bool? = nil, endOfExtendedSupportDate: Date? = nil, endOfStandardSupportDate: Date? = nil, kubernetesPatchVersion: String? = nil, releaseDate: Date? = nil, status: ClusterVersionStatus? = nil) { + self.clusterType = clusterType + self.clusterVersion = clusterVersion + self.defaultPlatformVersion = defaultPlatformVersion + self.defaultVersion = defaultVersion + self.endOfExtendedSupportDate = endOfExtendedSupportDate + self.endOfStandardSupportDate = endOfStandardSupportDate + self.kubernetesPatchVersion = kubernetesPatchVersion + self.releaseDate = releaseDate + self.status = status + } + + private enum CodingKeys: String, CodingKey { + case clusterType = "clusterType" + case clusterVersion = "clusterVersion" + case defaultPlatformVersion = "defaultPlatformVersion" + case defaultVersion = "defaultVersion" + case endOfExtendedSupportDate = "endOfExtendedSupportDate" + case endOfStandardSupportDate = "endOfStandardSupportDate" + case kubernetesPatchVersion = "kubernetesPatchVersion" + case releaseDate = "releaseDate" + case status = "status" + } + } + public struct Compatibility: AWSDecodableShape { /// The supported Kubernetes version of the cluster. public let clusterVersion: String? @@ -1108,9 +1186,9 @@ extension EKS { public struct ComputeConfigRequest: AWSEncodableShape { /// Request to enable or disable the compute capability on your EKS Auto Mode cluster. If the compute capability is enabled, EKS Auto Mode will create and delete EC2 Managed Instances in your Amazon Web Services account. public let enabled: Bool? - /// Configuration for node pools that defines the compute resources for your EKS Auto Mode cluster. For more information, see EKS Auto Mode Node Pools in the EKS User Guide. + /// Configuration for node pools that defines the compute resources for your EKS Auto Mode cluster. For more information, see EKS Auto Mode Node Pools in the Amazon EKS User Guide. public let nodePools: [String]? - /// The ARN of the IAM Role EKS will assign to EC2 Managed Instances in your EKS Auto Mode cluster. This value cannot be changed after the compute capability of EKS Auto Mode is enabled. For more information, see the IAM Reference in the EKS User Guide. + /// The ARN of the IAM Role EKS will assign to EC2 Managed Instances in your EKS Auto Mode cluster. This value cannot be changed after the compute capability of EKS Auto Mode is enabled. For more information, see the IAM Reference in the Amazon EKS User Guide. public let nodeRoleArn: String? @inlinable @@ -1130,7 +1208,7 @@ extension EKS { public struct ComputeConfigResponse: AWSDecodableShape { /// Indicates if the compute capability is enabled on your EKS Auto Mode cluster. If the compute capability is enabled, EKS Auto Mode will create and delete EC2 Managed Instances in your Amazon Web Services account. public let enabled: Bool? - /// Indicates the current configuration of node pools in your EKS Auto Mode cluster. For more information, see EKS Auto Mode Node Pools in the EKS User Guide. + /// Indicates the current configuration of node pools in your EKS Auto Mode cluster. For more information, see EKS Auto Mode Node Pools in the Amazon EKS User Guide. public let nodePools: [String]? /// The ARN of the IAM Role EKS will assign to EC2 Managed Instances in your EKS Auto Mode cluster. public let nodeRoleArn: String? @@ -1251,11 +1329,11 @@ extension EKS { public let clusterName: String /// The value for name that you've specified for kind: Group as a subject in a Kubernetes RoleBinding or ClusterRoleBinding object. Amazon EKS doesn't confirm that the value for name exists in any bindings on your cluster. You can specify one or more names. Kubernetes authorizes the principalArn of the access entry to access any cluster objects that you've specified in a Kubernetes Role or ClusterRole object that is also specified in a binding's roleRef. For more information about creating Kubernetes RoleBinding, ClusterRoleBinding, Role, or ClusterRole objects, see Using RBAC Authorization in the Kubernetes documentation. If you want Amazon EKS to authorize the principalArn (instead of, or in addition to Kubernetes authorizing the principalArn), you can associate one or more access policies to the access entry using AssociateAccessPolicy. If you associate any access policies, the principalARN has all permissions assigned in the associated access policies and all permissions in any Kubernetes Role or ClusterRole objects that the group names are bound to. public let kubernetesGroups: [String]? - /// The ARN of the IAM principal for the AccessEntry. You can specify one ARN for each access entry. You can't specify the same ARN in more than one access entry. This value can't be changed after access entry creation. The valid principals differ depending on the type of the access entry in the type field. The only valid ARN is IAM roles for the types of access entries for nodes: . You can use every IAM principal type for STANDARD access entries. You can't use the STS session principal type with access entries because this is a temporary principal for each session and not a permanent identity that can be assigned permissions. IAM best practices recommend using IAM roles with temporary credentials, rather than IAM users with long-term credentials. + /// The ARN of the IAM principal for the AccessEntry. You can specify one ARN for each access entry. You can't specify the same ARN in more than one access entry. This value can't be changed after access entry creation. The valid principals differ depending on the type of the access entry in the type field. For STANDARD access entries, you can use every IAM principal type. For nodes (EC2 (for EKS Auto Mode), EC2_LINUX, EC2_WINDOWS, FARGATE_LINUX, and HYBRID_LINUX), the only valid ARN is IAM roles. You can't use the STS session principal type with access entries because this is a temporary principal for each session and not a permanent identity that can be assigned permissions. IAM best practices recommend using IAM roles with temporary credentials, rather than IAM users with long-term credentials. public let principalArn: String /// Metadata that assists with categorization and organization. Each tag consists of a key and an optional value. You define both. Tags don't propagate to any other cluster or Amazon Web Services resources. public let tags: [String: String]? - /// The type of the new access entry. Valid values are Standard, FARGATE_LINUX, EC2_LINUX, and EC2_WINDOWS. If the principalArn is for an IAM role that's used for self-managed Amazon EC2 nodes, specify EC2_LINUX or EC2_WINDOWS. Amazon EKS grants the necessary permissions to the node for you. If the principalArn is for any other purpose, specify STANDARD. If you don't specify a value, Amazon EKS sets the value to STANDARD. It's unnecessary to create access entries for IAM roles used with Fargate profiles or managed Amazon EC2 nodes, because Amazon EKS creates entries in the aws-auth ConfigMap for the roles. You can't change this value once you've created the access entry. If you set the value to EC2_LINUX or EC2_WINDOWS, you can't specify values for kubernetesGroups, or associate an AccessPolicy to the access entry. + /// The type of the new access entry. Valid values are STANDARD, FARGATE_LINUX, EC2_LINUX, EC2_WINDOWS, EC2 (for EKS Auto Mode), HYBRID_LINUX, and HYPERPOD_LINUX. If the principalArn is for an IAM role that's used for self-managed Amazon EC2 nodes, specify EC2_LINUX or EC2_WINDOWS. Amazon EKS grants the necessary permissions to the node for you. If the principalArn is for any other purpose, specify STANDARD. If you don't specify a value, Amazon EKS sets the value to STANDARD. If you have the access mode of the cluster set to API_AND_CONFIG_MAP, it's unnecessary to create access entries for IAM roles used with Fargate profiles or managed Amazon EC2 nodes, because Amazon EKS creates entries in the aws-auth ConfigMap for the roles. You can't change this value once you've created the access entry. If you set the value to EC2_LINUX or EC2_WINDOWS, you can't specify values for kubernetesGroups, or associate an AccessPolicy to the access entry. public let type: String? /// The username to authenticate to Kubernetes with. We recommend not specifying a username and letting Amazon EKS specify it for you. For more information about the value Amazon EKS specifies for you, or constraints before specifying your own username, see Creating access entries in the Amazon EKS User Guide. public let username: String? @@ -1328,7 +1406,7 @@ extension EKS { public let clusterName: String /// The set of configuration values for the add-on that's created. The values that you provide are validated against the schema returned by DescribeAddonConfiguration. public let configurationValues: String? - /// An array of Pod Identity Assocations to be created. Each EKS Pod Identity association maps a Kubernetes service account to an IAM Role. For more information, see Attach an IAM Role to an Amazon EKS add-on using Pod Identity in the EKS User Guide. + /// An array of Pod Identity Assocations to be created. Each EKS Pod Identity association maps a Kubernetes service account to an IAM Role. For more information, see Attach an IAM Role to an Amazon EKS add-on using Pod Identity in the Amazon EKS User Guide. public let podIdentityAssociations: [AddonPodIdentityAssociations]? /// How to resolve field value conflicts for an Amazon EKS add-on. Conflicts are handled based on the value you choose: None – If the self-managed version of the add-on is installed on your cluster, Amazon EKS doesn't change the value. Creation of the add-on might fail. Overwrite – If the self-managed version of the add-on is installed on your cluster and the Amazon EKS default value is different than the existing value, Amazon EKS changes the value to the Amazon EKS default value. Preserve – This is similar to the NONE option. If the self-managed version of the add-on is installed on your cluster Amazon EKS doesn't change the add-on resource properties. Creation of the add-on might fail if conflicts are detected. This option works differently during the update operation. For more information, see UpdateAddon. If you don't currently have the self-managed version of the add-on installed on your cluster, the Amazon EKS add-on is installed. Amazon EKS sets all values to default values, regardless of the option that you specify. public let resolveConflicts: ResolveConflicts? @@ -1418,7 +1496,7 @@ extension EKS { public let encryptionConfig: [EncryptionConfig]? /// The Kubernetes network configuration for the cluster. public let kubernetesNetworkConfig: KubernetesNetworkConfigRequest? - /// Enable or disable exporting the Kubernetes control plane logs for your cluster to CloudWatch Logs. By default, cluster control plane logs aren't exported to CloudWatch Logs. For more information, see Amazon EKS Cluster control plane logs in the Amazon EKS User Guide . CloudWatch Logs ingestion, archive storage, and data scanning rates apply to exported control plane logs. For more information, see CloudWatch Pricing. + /// Enable or disable exporting the Kubernetes control plane logs for your cluster to CloudWatch Logs . By default, cluster control plane logs aren't exported to CloudWatch Logs . For more information, see Amazon EKS Cluster control plane logs in the Amazon EKS User Guide . CloudWatch Logs ingestion, archive storage, and data scanning rates apply to exported control plane logs. For more information, see CloudWatch Pricing. public let logging: Logging? /// The unique name to give to your cluster. The name can contain only alphanumeric characters (case-sensitive), /// hyphens, and underscores. It must start with an alphanumeric character and can't be longer than @@ -1441,7 +1519,7 @@ extension EKS { public let upgradePolicy: UpgradePolicyRequest? /// The desired Kubernetes version for your cluster. If you don't specify a value here, the default version available in Amazon EKS is used. The default version might not be the latest version available. public let version: String? - /// Enable or disable ARC zonal shift for the cluster. If zonal shift is enabled, Amazon Web Services configures zonal autoshift for the cluster. Zonal shift is a feature of Amazon Application Recovery Controller (ARC). ARC zonal shift is designed to be a temporary measure that allows you to move traffic for a resource away from an impaired AZ until the zonal shift expires or you cancel it. You can extend the zonal shift if necessary. You can start a zonal shift for an EKS cluster, or you can allow Amazon Web Services to do it for you by enabling zonal autoshift. This shift updates the flow of east-to-west network traffic in your cluster to only consider network endpoints for Pods running on worker nodes in healthy AZs. Additionally, any ALB or NLB handling ingress traffic for applications in your EKS cluster will automatically route traffic to targets in the healthy AZs. For more information about zonal shift in EKS, see Learn about Amazon Application Recovery Controller (ARC) Zonal Shift in Amazon EKS in the Amazon EKS User Guide . + /// Enable or disable ARC zonal shift for the cluster. If zonal shift is enabled, Amazon Web Services configures zonal autoshift for the cluster. Zonal shift is a feature of Amazon Application Recovery Controller (ARC). ARC zonal shift is designed to be a temporary measure that allows you to move traffic for a resource away from an impaired AZ until the zonal shift expires or you cancel it. You can extend the zonal shift if necessary. You can start a zonal shift for an Amazon EKS cluster, or you can allow Amazon Web Services to do it for you by enabling zonal autoshift. This shift updates the flow of east-to-west network traffic in your cluster to only consider network endpoints for Pods running on worker nodes in healthy AZs. Additionally, any ALB or NLB handling ingress traffic for applications in your Amazon EKS cluster will automatically route traffic to targets in the healthy AZs. For more information about zonal shift in EKS, see Learn about Amazon Application Recovery Controller (ARC) Zonal Shift in Amazon EKS in the Amazon EKS User Guide . public let zonalShiftConfig: ZonalShiftConfigRequest? @inlinable @@ -2395,6 +2473,71 @@ extension EKS { } } + public struct DescribeClusterVersionsRequest: AWSEncodableShape { + /// The type of cluster to filter versions by. + public let clusterType: String? + /// List of specific cluster versions to describe. + public let clusterVersions: [String]? + /// Filter to show only default versions. + public let defaultOnly: Bool? + /// Include all available versions in the response. + public let includeAll: Bool? + /// Maximum number of results to return. + public let maxResults: Int? + /// Pagination token for the next set of results. + public let nextToken: String? + /// Filter versions by their current status. + public let status: ClusterVersionStatus? + + @inlinable + public init(clusterType: String? = nil, clusterVersions: [String]? = nil, defaultOnly: Bool? = nil, includeAll: Bool? = nil, maxResults: Int? = nil, nextToken: String? = nil, status: ClusterVersionStatus? = nil) { + self.clusterType = clusterType + self.clusterVersions = clusterVersions + self.defaultOnly = defaultOnly + self.includeAll = includeAll + self.maxResults = maxResults + self.nextToken = nextToken + self.status = status + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodeQuery(self.clusterType, key: "clusterType") + request.encodeQuery(self.clusterVersions, key: "clusterVersions") + request.encodeQuery(self.defaultOnly, key: "defaultOnly") + request.encodeQuery(self.includeAll, key: "includeAll") + request.encodeQuery(self.maxResults, key: "maxResults") + request.encodeQuery(self.nextToken, key: "nextToken") + request.encodeQuery(self.status, key: "status") + } + + public func validate(name: String) throws { + try self.validate(self.maxResults, name: "maxResults", parent: name, max: 100) + try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) + } + + private enum CodingKeys: CodingKey {} + } + + public struct DescribeClusterVersionsResponse: AWSDecodableShape { + /// List of cluster version information objects. + public let clusterVersions: [ClusterVersionInformation]? + /// Pagination token for the next set of results. + public let nextToken: String? + + @inlinable + public init(clusterVersions: [ClusterVersionInformation]? = nil, nextToken: String? = nil) { + self.clusterVersions = clusterVersions + self.nextToken = nextToken + } + + private enum CodingKeys: String, CodingKey { + case clusterVersions = "clusterVersions" + case nextToken = "nextToken" + } + } + public struct DescribeEksAnywhereSubscriptionRequest: AWSEncodableShape { /// The ID of the subscription. public let id: String @@ -3065,15 +3208,19 @@ extension EKS { } public struct InsightCategorySpecificSummary: AWSDecodableShape { + /// A list of AddonCompatibilityDetail objects for Amazon EKS add-ons. + public let addonCompatibilityDetails: [AddonCompatibilityDetail]? /// The summary information about deprecated resource usage for an insight check in the UPGRADE_READINESS category. public let deprecationDetails: [DeprecationDetail]? @inlinable - public init(deprecationDetails: [DeprecationDetail]? = nil) { + public init(addonCompatibilityDetails: [AddonCompatibilityDetail]? = nil, deprecationDetails: [DeprecationDetail]? = nil) { + self.addonCompatibilityDetails = addonCompatibilityDetails self.deprecationDetails = deprecationDetails } private enum CodingKeys: String, CodingKey { + case addonCompatibilityDetails = "addonCompatibilityDetails" case deprecationDetails = "deprecationDetails" } } @@ -3183,7 +3330,7 @@ extension EKS { } public struct Issue: AWSDecodableShape { - /// A brief description of the error. AccessDenied: Amazon EKS or one or more of your managed nodes is failing to authenticate or authorize with your Kubernetes cluster API server. AsgInstanceLaunchFailures: Your Auto Scaling group is experiencing failures while attempting to launch instances. AutoScalingGroupNotFound: We couldn't find the Auto Scaling group associated with the managed node group. You may be able to recreate an Auto Scaling group with the same settings to recover. ClusterUnreachable: Amazon EKS or one or more of your managed nodes is unable to to communicate with your Kubernetes cluster API server. This can happen if there are network disruptions or if API servers are timing out processing requests. Ec2InstanceTypeDoesNotExist: One or more of the supplied Amazon EC2 instance types do not exist. Amazon EKS checked for the instance types that you provided in this Amazon Web Services Region, and one or more aren't available. Ec2LaunchTemplateNotFound: We couldn't find the Amazon EC2 launch template for your managed node group. You may be able to recreate a launch template with the same settings to recover. Ec2LaunchTemplateVersionMismatch: The Amazon EC2 launch template version for your managed node group does not match the version that Amazon EKS created. You may be able to revert to the version that Amazon EKS created to recover. Ec2SecurityGroupDeletionFailure: We could not delete the remote access security group for your managed node group. Remove any dependencies from the security group. Ec2SecurityGroupNotFound: We couldn't find the cluster security group for the cluster. You must recreate your cluster. Ec2SubnetInvalidConfiguration: One or more Amazon EC2 subnets specified for a node group do not automatically assign public IP addresses to instances launched into it. If you want your instances to be assigned a public IP address, then you need to enable the auto-assign public IP address setting for the subnet. See Modifying the public IPv4 addressing attribute for your subnet in the Amazon VPC User Guide. IamInstanceProfileNotFound: We couldn't find the IAM instance profile for your managed node group. You may be able to recreate an instance profile with the same settings to recover. IamNodeRoleNotFound: We couldn't find the IAM role for your managed node group. You may be able to recreate an IAM role with the same settings to recover. InstanceLimitExceeded: Your Amazon Web Services account is unable to launch any more instances of the specified instance type. You may be able to request an Amazon EC2 instance limit increase to recover. InsufficientFreeAddresses: One or more of the subnets associated with your managed node group does not have enough available IP addresses for new nodes. InternalFailure: These errors are usually caused by an Amazon EKS server-side issue. NodeCreationFailure: Your launched instances are unable to register with your Amazon EKS cluster. Common causes of this failure are insufficient node IAM role permissions or lack of outbound internet access for the nodes. + /// A brief description of the error. AccessDenied: Amazon EKS or one or more of your managed nodes is failing to authenticate or authorize with your Kubernetes cluster API server. AsgInstanceLaunchFailures: Your Auto Scaling group is experiencing failures while attempting to launch instances. AutoScalingGroupNotFound: We couldn't find the Auto Scaling group associated with the managed node group. You may be able to recreate an Auto Scaling group with the same settings to recover. ClusterUnreachable: Amazon EKS or one or more of your managed nodes is unable to to communicate with your Kubernetes cluster API server. This can happen if there are network disruptions or if API servers are timing out processing requests. Ec2InstanceTypeDoesNotExist: One or more of the supplied Amazon EC2 instance types do not exist. Amazon EKS checked for the instance types that you provided in this Amazon Web Services Region, and one or more aren't available. Ec2LaunchTemplateNotFound: We couldn't find the Amazon EC2 launch template for your managed node group. You may be able to recreate a launch template with the same settings to recover. Ec2LaunchTemplateVersionMismatch: The Amazon EC2 launch template version for your managed node group does not match the version that Amazon EKS created. You may be able to revert to the version that Amazon EKS created to recover. Ec2SecurityGroupDeletionFailure: We could not delete the remote access security group for your managed node group. Remove any dependencies from the security group. Ec2SecurityGroupNotFound: We couldn't find the cluster security group for the cluster. You must recreate your cluster. Ec2SubnetInvalidConfiguration: One or more Amazon EC2 subnets specified for a node group do not automatically assign public IP addresses to instances launched into it. If you want your instances to be assigned a public IP address, then you need to enable the auto-assign public IP address setting for the subnet. See Modifying the public IPv4 addressing attribute for your subnet in the Amazon VPC User Guide. IamInstanceProfileNotFound: We couldn't find the IAM instance profile for your managed node group. You may be able to recreate an instance profile with the same settings to recover. IamNodeRoleNotFound: We couldn't find the IAM role for your managed node group. You may be able to recreate an IAM role with the same settings to recover. InstanceLimitExceeded: Your Amazon Web Services account is unable to launch any more instances of the specified instance type. You may be able to request an Amazon EC2 instance limit increase to recover. InsufficientFreeAddresses: One or more of the subnets associated with your managed node group does not have enough available IP addresses for new nodes. InternalFailure: These errors are usually caused by an Amazon EKS server-side issue. NodeCreationFailure: Your launched instances are unable to register with your Amazon EKS cluster. Common causes of this failure are insufficient node IAM role permissions or lack of outbound internet access for the nodes. public let code: NodegroupIssueCode? /// The error message associated with the issue. public let message: String? @@ -3205,7 +3352,7 @@ extension EKS { } public struct KubernetesNetworkConfigRequest: AWSEncodableShape { - /// Request to enable or disable the load balancing capability on your EKS Auto Mode cluster. For more information, see EKS Auto Mode load balancing capability in the EKS User Guide. + /// Request to enable or disable the load balancing capability on your EKS Auto Mode cluster. For more information, see EKS Auto Mode load balancing capability in the Amazon EKS User Guide. public let elasticLoadBalancing: ElasticLoadBalancing? /// Specify which IP family is used to assign Kubernetes pod and service IP addresses. If you don't specify a value, ipv4 is used by default. You can only specify an IP family when you create a cluster and can't change this value once the cluster is created. If you specify ipv6, the VPC and subnets that you specify for cluster creation must have both IPv4 and IPv6 CIDR blocks assigned to them. You can't specify ipv6 for clusters in China Regions. You can only specify ipv6 for 1.21 and later clusters that use version 1.10.1 or later of the Amazon VPC CNI add-on. If you specify ipv6, then ensure that your VPC meets the requirements listed in the considerations listed in Assigning IPv6 addresses to pods and services in the Amazon EKS User Guide. Kubernetes assigns services IPv6 addresses from the unique local address range (fc00::/7). You can't specify a custom IPv6 CIDR block. Pod addresses are assigned from the subnet's IPv6 CIDR. public let ipFamily: IpFamily? @@ -3517,7 +3664,7 @@ extension EKS { } public struct ListClustersResponse: AWSDecodableShape { - /// A list of all of the clusters for your account in the specified Amazon Web Services Region. + /// A list of all of the clusters for your account in the specified Amazon Web Services Region . public let clusters: [String]? /// The nextToken value returned from a previous paginated request, where maxResults was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the nextToken value. This value is null when there are no more results to return. This token should be treated as an opaque identifier that is used only to retrieve the next items in a list and not for other programmatic purposes. public let nextToken: String? @@ -3934,7 +4081,7 @@ extension EKS { } public struct LogSetup: AWSEncodableShape & AWSDecodableShape { - /// If a log type is enabled, that log type exports its control plane logs to CloudWatch Logs. If a log type isn't enabled, that log type doesn't export its control plane logs. Each individual log type can be enabled or disabled independently. + /// If a log type is enabled, that log type exports its control plane logs to CloudWatch Logs . If a log type isn't enabled, that log type doesn't export its control plane logs. Each individual log type can be enabled or disabled independently. public let enabled: Bool? /// The available cluster control plane log types. public let types: [LogType]? @@ -4136,9 +4283,9 @@ extension EKS { } public struct NodegroupScalingConfig: AWSEncodableShape & AWSDecodableShape { - /// The current number of nodes that the managed node group should maintain. If you use the Kubernetes Cluster Autoscaler, you shouldn't change the desiredSize value directly, as this can cause the Cluster Autoscaler to suddenly scale up or scale down. Whenever this parameter changes, the number of worker nodes in the node group is updated to the specified size. If this parameter is given a value that is smaller than the current number of running worker nodes, the necessary number of worker nodes are terminated to match the given value. When using CloudFormation, no action occurs if you remove this parameter from your CFN template. This parameter can be different from minSize in some cases, such as when starting with extra hosts for testing. This parameter can also be different when you want to start with an estimated number of needed hosts, but let the Cluster Autoscaler reduce the number if there are too many. When the Cluster Autoscaler is used, the desiredSize parameter is altered by the Cluster Autoscaler (but can be out-of-date for short periods of time). the Cluster Autoscaler doesn't scale a managed node group lower than minSize or higher than maxSize. + /// The current number of nodes that the managed node group should maintain. If you use the Kubernetes Cluster Autoscaler, you shouldn't change the desiredSize value directly, as this can cause the Cluster Autoscaler to suddenly scale up or scale down. Whenever this parameter changes, the number of worker nodes in the node group is updated to the specified size. If this parameter is given a value that is smaller than the current number of running worker nodes, the necessary number of worker nodes are terminated to match the given value. When using CloudFormation, no action occurs if you remove this parameter from your CFN template. This parameter can be different from minSize in some cases, such as when starting with extra hosts for testing. This parameter can also be different when you want to start with an estimated number of needed hosts, but let the Cluster Autoscaler reduce the number if there are too many. When the Cluster Autoscaler is used, the desiredSize parameter is altered by the Cluster Autoscaler (but can be out-of-date for short periods of time). the Cluster Autoscaler doesn't scale a managed node group lower than minSize or higher than maxSize. public let desiredSize: Int? - /// The maximum number of nodes that the managed node group can scale out to. For information about the maximum number that you can specify, see Amazon EKS service quotas in the Amazon EKS User Guide. + /// The maximum number of nodes that the managed node group can scale out to. For information about the maximum number that you can specify, see Amazon EKS service quotas in the Amazon EKS User Guide. public let maxSize: Int? /// The minimum number of nodes that the managed node group can scale in to. public let minSize: Int? @@ -4168,11 +4315,14 @@ extension EKS { public let maxUnavailable: Int? /// The maximum percentage of nodes unavailable during a version update. This percentage of nodes are updated in parallel, up to 100 nodes at once. This value or maxUnavailable is required to have a value. public let maxUnavailablePercentage: Int? + /// The configuration for the behavior to follow during a node group version update of this managed node group. You choose between two possible strategies for replacing nodes during an UpdateNodegroupVersion action. An Amazon EKS managed node group updates by replacing nodes with new nodes of newer AMI versions in parallel. The update strategy changes the managed node update behavior of the managed node group for each quantity. The default strategy has guardrails to protect you from misconfiguration and launches the new instances first, before terminating the old instances. The minimal strategy removes the guardrails and terminates the old instances before launching the new instances. This minimal strategy is useful in scenarios where you are constrained to resources or costs (for example, with hardware accelerators such as GPUs). + public let updateStrategy: NodegroupUpdateStrategies? @inlinable - public init(maxUnavailable: Int? = nil, maxUnavailablePercentage: Int? = nil) { + public init(maxUnavailable: Int? = nil, maxUnavailablePercentage: Int? = nil, updateStrategy: NodegroupUpdateStrategies? = nil) { self.maxUnavailable = maxUnavailable self.maxUnavailablePercentage = maxUnavailablePercentage + self.updateStrategy = updateStrategy } public func validate(name: String) throws { @@ -4184,6 +4334,7 @@ extension EKS { private enum CodingKeys: String, CodingKey { case maxUnavailable = "maxUnavailable" case maxUnavailablePercentage = "maxUnavailablePercentage" + case updateStrategy = "updateStrategy" } } @@ -4834,7 +4985,7 @@ extension EKS { public let clusterName: String /// The set of configuration values for the add-on that's created. The values that you provide are validated against the schema returned by DescribeAddonConfiguration. public let configurationValues: String? - /// An array of Pod Identity Assocations to be updated. Each EKS Pod Identity association maps a Kubernetes service account to an IAM Role. If this value is left blank, no change. If an empty array is provided, existing Pod Identity Assocations owned by the Addon are deleted. For more information, see Attach an IAM Role to an Amazon EKS add-on using Pod Identity in the EKS User Guide. + /// An array of Pod Identity Assocations to be updated. Each EKS Pod Identity association maps a Kubernetes service account to an IAM Role. If this value is left blank, no change. If an empty array is provided, existing Pod Identity Assocations owned by the Addon are deleted. For more information, see Attach an IAM Role to an Amazon EKS add-on using Pod Identity in the Amazon EKS User Guide. public let podIdentityAssociations: [AddonPodIdentityAssociations]? /// How to resolve field value conflicts for an Amazon EKS add-on if you've changed a value from the Amazon EKS default value. Conflicts are handled based on the option you choose: None – Amazon EKS doesn't change the value. The update might fail. Overwrite – Amazon EKS overwrites the changed value back to the Amazon EKS default value. Preserve – Amazon EKS preserves the value. If you choose this option, we recommend that you test any field and value changes on a non-production cluster before updating the add-on on your production cluster. public let resolveConflicts: ResolveConflicts? @@ -4906,7 +5057,7 @@ extension EKS { /// Update the configuration of the compute capability of your EKS Auto Mode cluster. For example, enable the capability. public let computeConfig: ComputeConfigRequest? public let kubernetesNetworkConfig: KubernetesNetworkConfigRequest? - /// Enable or disable exporting the Kubernetes control plane logs for your cluster to CloudWatch Logs. By default, cluster control plane logs aren't exported to CloudWatch Logs. For more information, see Amazon EKS cluster control plane logs in the Amazon EKS User Guide . CloudWatch Logs ingestion, archive storage, and data scanning rates apply to exported control plane logs. For more information, see CloudWatch Pricing. + /// Enable or disable exporting the Kubernetes control plane logs for your cluster to CloudWatch Logs . By default, cluster control plane logs aren't exported to CloudWatch Logs . For more information, see Amazon EKS cluster control plane logs in the Amazon EKS User Guide . CloudWatch Logs ingestion, archive storage, and data scanning rates apply to exported control plane logs. For more information, see CloudWatch Pricing. public let logging: Logging? /// The name of the Amazon EKS cluster to update. public let name: String @@ -5317,7 +5468,7 @@ extension EKS { } public struct UpgradePolicyRequest: AWSEncodableShape { - /// If the cluster is set to EXTENDED, it will enter extended support at the end of standard support. If the cluster is set to STANDARD, it will be automatically upgraded at the end of standard support. Learn more about EKS Extended Support in the EKS User Guide. + /// If the cluster is set to EXTENDED, it will enter extended support at the end of standard support. If the cluster is set to STANDARD, it will be automatically upgraded at the end of standard support. Learn more about EKS Extended Support in the Amazon EKS User Guide. public let supportType: SupportType? @inlinable @@ -5331,7 +5482,7 @@ extension EKS { } public struct UpgradePolicyResponse: AWSDecodableShape { - /// If the cluster is set to EXTENDED, it will enter extended support at the end of standard support. If the cluster is set to STANDARD, it will be automatically upgraded at the end of standard support. Learn more about EKS Extended Support in the EKS User Guide. + /// If the cluster is set to EXTENDED, it will enter extended support at the end of standard support. If the cluster is set to STANDARD, it will be automatically upgraded at the end of standard support. Learn more about EKS Extended Support in the Amazon EKS User Guide. public let supportType: SupportType? @inlinable @@ -5349,7 +5500,7 @@ extension EKS { public let endpointPrivateAccess: Bool? /// Set this value to false to disable public access to your cluster's Kubernetes API server endpoint. If you disable public access, your cluster's Kubernetes API server can only receive requests from within the cluster VPC. The default value for this parameter is true, which enables public access for your Kubernetes API server. For more information, see Amazon EKS cluster endpoint access control in the Amazon EKS User Guide . public let endpointPublicAccess: Bool? - /// The CIDR blocks that are allowed access to your cluster's public Kubernetes API server endpoint. Communication to the endpoint from addresses outside of the CIDR blocks that you specify is denied. The default value is 0.0.0.0/0. If you've disabled private endpoint access, make sure that you specify the necessary CIDR blocks for every node and Fargate Pod in the cluster. For more information, see Amazon EKS cluster endpoint access control in the Amazon EKS User Guide . + /// The CIDR blocks that are allowed access to your cluster's public Kubernetes API server endpoint. Communication to the endpoint from addresses outside of the CIDR blocks that you specify is denied. The default value is 0.0.0.0/0. If you've disabled private endpoint access, make sure that you specify the necessary CIDR blocks for every node and Fargate Pod in the cluster. For more information, see Amazon EKS cluster endpoint access control in the Amazon EKS User Guide . public let publicAccessCidrs: [String]? /// Specify one or more security groups for the cross-account elastic network interfaces that Amazon EKS creates to use that allow communication between your nodes and the Kubernetes control plane. If you don't specify any security groups, then familiarize yourself with the difference between Amazon EKS defaults for clusters deployed with Kubernetes. For more information, see Amazon EKS security group considerations in the Amazon EKS User Guide . public let securityGroupIds: [String]? diff --git a/Sources/Soto/Services/EKSAuth/EKSAuth_api.swift b/Sources/Soto/Services/EKSAuth/EKSAuth_api.swift index ec1d9d3657..4d973ac301 100644 --- a/Sources/Soto/Services/EKSAuth/EKSAuth_api.swift +++ b/Sources/Soto/Services/EKSAuth/EKSAuth_api.swift @@ -90,6 +90,7 @@ public struct EKSAuth: AWSService { "ap-southeast-3": "eks-auth.ap-southeast-3.api.aws", "ap-southeast-4": "eks-auth.ap-southeast-4.api.aws", "ap-southeast-5": "eks-auth.ap-southeast-5.api.aws", + "ap-southeast-7": "eks-auth.ap-southeast-7.api.aws", "ca-central-1": "eks-auth.ca-central-1.api.aws", "ca-west-1": "eks-auth.ca-west-1.api.aws", "cn-north-1": "eks-auth.cn-north-1.api.amazonwebservices.com.cn", @@ -105,6 +106,7 @@ public struct EKSAuth: AWSService { "il-central-1": "eks-auth.il-central-1.api.aws", "me-central-1": "eks-auth.me-central-1.api.aws", "me-south-1": "eks-auth.me-south-1.api.aws", + "mx-central-1": "eks-auth.mx-central-1.api.aws", "sa-east-1": "eks-auth.sa-east-1.api.aws", "us-east-1": "eks-auth.us-east-1.api.aws", "us-east-2": "eks-auth.us-east-2.api.aws", @@ -130,6 +132,7 @@ public struct EKSAuth: AWSService { "ap-southeast-3": "eks-auth-fips.ap-southeast-3.api.aws", "ap-southeast-4": "eks-auth-fips.ap-southeast-4.api.aws", "ap-southeast-5": "eks-auth-fips.ap-southeast-5.api.aws", + "ap-southeast-7": "eks-auth-fips.ap-southeast-7.api.aws", "ca-central-1": "eks-auth-fips.ca-central-1.api.aws", "ca-west-1": "eks-auth-fips.ca-west-1.api.aws", "cn-north-1": "eks-auth-fips.cn-north-1.api.amazonwebservices.com.cn", @@ -145,6 +148,7 @@ public struct EKSAuth: AWSService { "il-central-1": "eks-auth-fips.il-central-1.api.aws", "me-central-1": "eks-auth-fips.me-central-1.api.aws", "me-south-1": "eks-auth-fips.me-south-1.api.aws", + "mx-central-1": "eks-auth-fips.mx-central-1.api.aws", "sa-east-1": "eks-auth-fips.sa-east-1.api.aws", "us-east-1": "eks-auth-fips.us-east-1.api.aws", "us-east-2": "eks-auth-fips.us-east-2.api.aws", diff --git a/Sources/Soto/Services/EMRServerless/EMRServerless_shapes.swift b/Sources/Soto/Services/EMRServerless/EMRServerless_shapes.swift index 69341fd79d..49a2a75e77 100644 --- a/Sources/Soto/Services/EMRServerless/EMRServerless_shapes.swift +++ b/Sources/Soto/Services/EMRServerless/EMRServerless_shapes.swift @@ -1616,7 +1616,7 @@ extension EMRServerless { } public func validate(name: String) throws { - try self.validate(self.entryPoint, name: "entryPoint", parent: name, max: 256) + try self.validate(self.entryPoint, name: "entryPoint", parent: name, max: 4096) try self.validate(self.entryPoint, name: "entryPoint", parent: name, min: 1) try self.validate(self.entryPoint, name: "entryPoint", parent: name, pattern: ".*\\S.*") try self.entryPointArguments?.forEach { diff --git a/Sources/Soto/Services/ElasticsearchService/ElasticsearchService_api.swift b/Sources/Soto/Services/ElasticsearchService/ElasticsearchService_api.swift index 432a14e631..5e2ac7ad77 100644 --- a/Sources/Soto/Services/ElasticsearchService/ElasticsearchService_api.swift +++ b/Sources/Soto/Services/ElasticsearchService/ElasticsearchService_api.swift @@ -93,6 +93,7 @@ public struct ElasticsearchService: AWSService { "ap-southeast-3": "aos.ap-southeast-3.api.aws", "ap-southeast-4": "aos.ap-southeast-4.api.aws", "ap-southeast-5": "aos.ap-southeast-5.api.aws", + "ap-southeast-7": "aos.ap-southeast-7.api.aws", "ca-central-1": "aos.ca-central-1.api.aws", "ca-west-1": "aos.ca-west-1.api.aws", "cn-north-1": "aos.cn-north-1.api.amazonwebservices.com.cn", @@ -108,6 +109,7 @@ public struct ElasticsearchService: AWSService { "il-central-1": "aos.il-central-1.api.aws", "me-central-1": "aos.me-central-1.api.aws", "me-south-1": "aos.me-south-1.api.aws", + "mx-central-1": "aos.mx-central-1.api.aws", "sa-east-1": "aos.sa-east-1.api.aws", "us-east-1": "aos.us-east-1.api.aws", "us-east-2": "aos.us-east-2.api.aws", diff --git a/Sources/Soto/Services/FMS/FMS_shapes.swift b/Sources/Soto/Services/FMS/FMS_shapes.swift index dfeeb938f1..b1532d3e01 100644 --- a/Sources/Soto/Services/FMS/FMS_shapes.swift +++ b/Sources/Soto/Services/FMS/FMS_shapes.swift @@ -136,6 +136,12 @@ extension FMS { public var description: String { return self.rawValue } } + public enum ResourceTagLogicalOperator: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case and = "AND" + case or = "OR" + public var description: String { return self.rawValue } + } + public enum RuleOrder: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case defaultActionOrder = "DEFAULT_ACTION_ORDER" case strictOrder = "STRICT_ORDER" @@ -3050,6 +3056,8 @@ extension FMS { public let remediationEnabled: Bool /// The unique identifiers of the resource sets used by the policy. public let resourceSetIds: [String]? + /// Specifies whether to combine multiple resource tags with AND, so that a resource must have all tags to be included or excluded, or OR, so that a resource must have at least one tag. Default: AND + public let resourceTagLogicalOperator: ResourceTagLogicalOperator? /// An array of ResourceTag objects. public let resourceTags: [ResourceTag]? /// The type of resource protected by or in scope of the policy. This is in the format shown in the Amazon Web Services Resource Types Reference. To apply this policy to multiple resource types, specify a resource type of ResourceTypeList and then specify the resource types in a ResourceTypeList. The following are valid resource types for each Firewall Manager policy type: Amazon Web Services WAF Classic - AWS::ApiGateway::Stage, AWS::CloudFront::Distribution, and AWS::ElasticLoadBalancingV2::LoadBalancer. WAF - AWS::ApiGateway::Stage, AWS::ElasticLoadBalancingV2::LoadBalancer, and AWS::CloudFront::Distribution. Shield Advanced - AWS::ElasticLoadBalancingV2::LoadBalancer, AWS::ElasticLoadBalancing::LoadBalancer, AWS::EC2::EIP, and AWS::CloudFront::Distribution. Network ACL - AWS::EC2::Subnet. Security group usage audit - AWS::EC2::SecurityGroup. Security group content audit - AWS::EC2::SecurityGroup, AWS::EC2::NetworkInterface, and AWS::EC2::Instance. DNS Firewall, Network Firewall, and third-party firewall - AWS::EC2::VPC. @@ -3060,7 +3068,7 @@ extension FMS { public let securityServicePolicyData: SecurityServicePolicyData @inlinable - public init(deleteUnusedFMManagedResources: Bool? = nil, excludeMap: [CustomerPolicyScopeIdType: [String]]? = nil, excludeResourceTags: Bool, includeMap: [CustomerPolicyScopeIdType: [String]]? = nil, policyDescription: String? = nil, policyId: String? = nil, policyName: String, policyStatus: CustomerPolicyStatus? = nil, policyUpdateToken: String? = nil, remediationEnabled: Bool, resourceSetIds: [String]? = nil, resourceTags: [ResourceTag]? = nil, resourceType: String, resourceTypeList: [String]? = nil, securityServicePolicyData: SecurityServicePolicyData) { + public init(deleteUnusedFMManagedResources: Bool? = nil, excludeMap: [CustomerPolicyScopeIdType: [String]]? = nil, excludeResourceTags: Bool, includeMap: [CustomerPolicyScopeIdType: [String]]? = nil, policyDescription: String? = nil, policyId: String? = nil, policyName: String, policyStatus: CustomerPolicyStatus? = nil, policyUpdateToken: String? = nil, remediationEnabled: Bool, resourceSetIds: [String]? = nil, resourceTagLogicalOperator: ResourceTagLogicalOperator? = nil, resourceTags: [ResourceTag]? = nil, resourceType: String, resourceTypeList: [String]? = nil, securityServicePolicyData: SecurityServicePolicyData) { self.deleteUnusedFMManagedResources = deleteUnusedFMManagedResources self.excludeMap = excludeMap self.excludeResourceTags = excludeResourceTags @@ -3072,6 +3080,7 @@ extension FMS { self.policyUpdateToken = policyUpdateToken self.remediationEnabled = remediationEnabled self.resourceSetIds = resourceSetIds + self.resourceTagLogicalOperator = resourceTagLogicalOperator self.resourceTags = resourceTags self.resourceType = resourceType self.resourceTypeList = resourceTypeList @@ -3122,6 +3131,7 @@ extension FMS { case policyUpdateToken = "PolicyUpdateToken" case remediationEnabled = "RemediationEnabled" case resourceSetIds = "ResourceSetIds" + case resourceTagLogicalOperator = "ResourceTagLogicalOperator" case resourceTags = "ResourceTags" case resourceType = "ResourceType" case resourceTypeList = "ResourceTypeList" @@ -3902,9 +3912,9 @@ extension FMS { public func validate(name: String) throws { try self.validate(self.key, name: "key", parent: name, max: 128) try self.validate(self.key, name: "key", parent: name, min: 1) - try self.validate(self.key, name: "key", parent: name, pattern: "^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)$") + try self.validate(self.key, name: "key", parent: name, pattern: "^([\\p{L}\\p{Z}\\p{N}_.:\\/=+\\-@*\\\\]*)$") try self.validate(self.value, name: "value", parent: name, max: 256) - try self.validate(self.value, name: "value", parent: name, pattern: "^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)$") + try self.validate(self.value, name: "value", parent: name, pattern: "^([\\p{L}\\p{Z}\\p{N}_.:\\/=+\\-@*\\\\]*)$") } private enum CodingKeys: String, CodingKey { diff --git a/Sources/Soto/Services/GameLift/GameLift_api.swift b/Sources/Soto/Services/GameLift/GameLift_api.swift index 94af390e1d..6fe4bf0aed 100644 --- a/Sources/Soto/Services/GameLift/GameLift_api.swift +++ b/Sources/Soto/Services/GameLift/GameLift_api.swift @@ -192,7 +192,7 @@ public struct GameLift: AWSService { return try await self.createAlias(input, logger: logger) } - /// Creates an Amazon GameLift build resource for your game server software and stores the software for deployment to hosting resources. Combine game server binaries and dependencies into a single .zip file Use the CLI command upload-build to quickly and simply create a new build and upload your game build .zip file to Amazon GameLift Amazon S3. This helper command eliminates the need to explicitly manage access permissions. Alternatively, use the CreateBuild action for the following scenarios: You want to create a build and upload a game build zip file from in an Amazon S3 location that you control. In this scenario, you need to give Amazon GameLift permission to access to the Amazon S3 bucket. With permission in place, call CreateBuild and specify a build name, the build's runtime operating system, and the Amazon S3 storage location where the build file is stored. You want to create a build and upload a local game build zip file to an Amazon S3 location that's controlled by Amazon GameLift. (See the upload-build CLI command for this scenario.) In this scenario, you need to request temporary access credentials to the Amazon GameLift Amazon S3 location. Specify a build name and the build's runtime operating system. The response provides an Amazon S3 location and a set of temporary access credentials. Use the credentials to upload your build files to the specified Amazon S3 location (see Uploading Objects in the Amazon S3 Developer Guide). You can't update build files after uploading them to Amazon GameLift Amazon S3. If successful, this action creates a new build resource with a unique build ID and places it in INITIALIZED status. When the build reaches READY status, you can create fleets with it. Learn more Uploading Your Game Create a Build with Files in Amazon S3 All APIs by task + /// Creates a new Amazon GameLift build resource for your game server binary files. Combine game server binaries into a zip file for use with Amazon GameLift. When setting up a new game build for Amazon GameLift, we recommend using the CLI command upload-build . This helper command combines two tasks: (1) it uploads your build files from a file directory to an Amazon GameLift Amazon S3 location, and (2) it creates a new build resource. You can use the CreateBuild operation in the following scenarios: Create a new game build with build files that are in an Amazon S3 location under an Amazon Web Services account that you control. To use this option, you give Amazon GameLift access to the Amazon S3 bucket. With permissions in place, specify a build name, operating system, and the Amazon S3 storage location of your game build. Upload your build files to a Amazon GameLift Amazon S3 location. To use this option, specify a build name and operating system. This operation creates a new build resource and also returns an Amazon S3 location with temporary access credentials. Use the credentials to manually upload your build files to the specified Amazon S3 location. For more information, see Uploading Objects in the Amazon S3 Developer Guide. After you upload build files to the Amazon GameLift Amazon S3 location, you can't update them. If successful, this operation creates a new build resource with a unique build ID and places it in INITIALIZED status. A build must be in READY status before you can create fleets with it. Learn more Uploading Your Game Create a Build with Files in Amazon S3 All APIs by task @Sendable @inlinable public func createBuild(_ input: CreateBuildInput, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateBuildOutput { @@ -205,11 +205,11 @@ public struct GameLift: AWSService { logger: logger ) } - /// Creates an Amazon GameLift build resource for your game server software and stores the software for deployment to hosting resources. Combine game server binaries and dependencies into a single .zip file Use the CLI command upload-build to quickly and simply create a new build and upload your game build .zip file to Amazon GameLift Amazon S3. This helper command eliminates the need to explicitly manage access permissions. Alternatively, use the CreateBuild action for the following scenarios: You want to create a build and upload a game build zip file from in an Amazon S3 location that you control. In this scenario, you need to give Amazon GameLift permission to access to the Amazon S3 bucket. With permission in place, call CreateBuild and specify a build name, the build's runtime operating system, and the Amazon S3 storage location where the build file is stored. You want to create a build and upload a local game build zip file to an Amazon S3 location that's controlled by Amazon GameLift. (See the upload-build CLI command for this scenario.) In this scenario, you need to request temporary access credentials to the Amazon GameLift Amazon S3 location. Specify a build name and the build's runtime operating system. The response provides an Amazon S3 location and a set of temporary access credentials. Use the credentials to upload your build files to the specified Amazon S3 location (see Uploading Objects in the Amazon S3 Developer Guide). You can't update build files after uploading them to Amazon GameLift Amazon S3. If successful, this action creates a new build resource with a unique build ID and places it in INITIALIZED status. When the build reaches READY status, you can create fleets with it. Learn more Uploading Your Game Create a Build with Files in Amazon S3 All APIs by task + /// Creates a new Amazon GameLift build resource for your game server binary files. Combine game server binaries into a zip file for use with Amazon GameLift. When setting up a new game build for Amazon GameLift, we recommend using the CLI command upload-build . This helper command combines two tasks: (1) it uploads your build files from a file directory to an Amazon GameLift Amazon S3 location, and (2) it creates a new build resource. You can use the CreateBuild operation in the following scenarios: Create a new game build with build files that are in an Amazon S3 location under an Amazon Web Services account that you control. To use this option, you give Amazon GameLift access to the Amazon S3 bucket. With permissions in place, specify a build name, operating system, and the Amazon S3 storage location of your game build. Upload your build files to a Amazon GameLift Amazon S3 location. To use this option, specify a build name and operating system. This operation creates a new build resource and also returns an Amazon S3 location with temporary access credentials. Use the credentials to manually upload your build files to the specified Amazon S3 location. For more information, see Uploading Objects in the Amazon S3 Developer Guide. After you upload build files to the Amazon GameLift Amazon S3 location, you can't update them. If successful, this operation creates a new build resource with a unique build ID and places it in INITIALIZED status. A build must be in READY status before you can create fleets with it. Learn more Uploading Your Game Create a Build with Files in Amazon S3 All APIs by task /// /// Parameters: /// - name: A descriptive label that is associated with a build. Build names do not need to be unique. You can change this value later. - /// - operatingSystem: The environment that your game server binaries run on. This value determines the type of fleet resources that you use for this build. If your game build contains multiple executables, they all must run on the same operating system. This parameter is required, and there's no default value. You can't change a build's operating system later. Amazon Linux 2 (AL2) will reach end of support on 6/30/2025. See more details in the Amazon Linux 2 FAQs. For game servers that are hosted on AL2 and use Amazon GameLift server SDK 4.x., first update the game server build to server SDK 5.x, and then deploy to AL2023 instances. See Migrate to Amazon GameLift server SDK version 5. + /// - operatingSystem: The operating system that your game server binaries run on. This value determines the type of fleet resources that you use for this build. If your game build contains multiple executables, they all must run on the same operating system. You must specify a valid operating system in this request. There is no default value. You can't change a build's operating system later. Amazon Linux 2 (AL2) will reach end of support on 6/30/2025. See more details in the Amazon Linux 2 FAQs. For game servers that are hosted on AL2 and use Amazon GameLift server SDK 4.x., first update the game server build to server SDK 5.x, and then deploy to AL2023 instances. See Migrate to Amazon GameLift server SDK version 5. /// - serverSdkVersion: A server SDK version you used when integrating your game server build with Amazon GameLift. For more information see Integrate games with custom game servers. By default Amazon GameLift sets this value to 4.0.2. /// - storageLocation: Information indicating where your game build files are stored. Use this parameter only when creating a build with files stored in an Amazon S3 bucket that you own. The storage location must specify an Amazon S3 bucket name and key. The location must also specify a role ARN that you set up to allow Amazon GameLift to access your Amazon S3 bucket. The S3 bucket and your new build must be in the same Region. If a StorageLocation is specified, the size of your file can be found in your Amazon S3 bucket. Amazon GameLift will report a SizeOnDisk of 0. /// - tags: A list of labels to assign to the new build resource. Tags are developer defined key-value pairs. Tagging Amazon Web Services resources are useful for resource management, access management and cost allocation. For more information, see Tagging Amazon Web Services Resources in the Amazon Web Services General Reference. Once the resource is created, you can use TagResource, UntagResource, and ListTagsForResource to add, remove, and view tags. The maximum tag limit may be lower than stated. See the Amazon Web Services General Reference for actual tagging limits. @@ -255,7 +255,7 @@ public struct GameLift: AWSService { /// - billingType: Indicates whether to use On-Demand or Spot instances for this fleet. Learn more about when to use On-Demand versus Spot Instances. This fleet property can't be changed after the fleet is created. By default, this property is set to ON_DEMAND. You can't update this fleet property later. /// - description: A meaningful description of the container fleet. /// - fleetRoleArn: The unique identifier for an Identity and Access Management (IAM) role with permissions to run your containers on resources that are managed by Amazon GameLift. Use an IAM service role with the GameLiftContainerFleetPolicy managed policy attached. For more information, see Set up an IAM service role. You can't change this fleet property after the fleet is created. IAM role ARN values use the following pattern: arn:aws:iam::[Amazon Web Services account]:role/[role name]. - /// - gameServerContainerGroupDefinitionName: A container group definition resource that describes how to deploy containers with your game server build and support software onto each fleet instance. You can specify the container group definition's name to use the latest version. Alternatively, provide an ARN value with a specific version number. Create a container group definition by calling CreateContainerGroupDefinition. This operation creates a ContainerGroupDefinition resource. + /// - gameServerContainerGroupDefinitionName: A container group definition resource that describes how to deploy containers with your game server build and support software onto each fleet instance. You can specify the container group definition's name to use the latest version. Alternatively, provide an ARN value with a specific version number. Create a container group definition by calling CreateContainerGroupDefinition. This operation creates a ContainerGroupDefinition resource. /// - gameServerContainerGroupsPerInstance: The number of times to replicate the game server container group on each fleet instance. By default, Amazon GameLift calculates the maximum number of game server container groups that can fit on each instance. This calculation is based on the CPU and memory resources of the fleet's instance type). To use the calculated maximum, don't set this parameter. If you set this number manually, Amazon GameLift uses your value as long as it's less than the calculated maximum. /// - gameSessionCreationLimitPolicy: A policy that limits the number of game sessions that each individual player can create on instances in this fleet. The limit applies for a specified span of time. /// - instanceConnectionPortRange: The set of port numbers to open on each fleet instance. A fleet's connection ports map to container ports that are configured in the fleet's container group definitions. By default, Amazon GameLift calculates an optimal port range based on your fleet configuration. To use the calculated range, don't set this parameter. The values are: Port range: 4192 to a number calculated based on your fleet configuration. Amazon GameLift uses the following formula: 4192 + [# of game server container groups per fleet instance] * [# of container ports in the game server container group definition] + [# of container ports in the game server container group definition] You can also choose to manually set this parameter. When manually setting this parameter, you must use port numbers that match the fleet's inbound permissions port range. If you set values manually, Amazon GameLift no longer calculates a port range for you, even if you later remove the manual settings. @@ -265,7 +265,7 @@ public struct GameLift: AWSService { /// - logConfiguration: A method for collecting container logs for the fleet. Amazon GameLift saves all standard output for each container in logs, including game session logs. You can select from the following methods: CLOUDWATCH -- Send logs to an Amazon CloudWatch log group that you define. Each container emits a log stream, which is organized in the log group. S3 -- Store logs in an Amazon S3 bucket that you define. NONE -- Don't collect container logs. By default, this property is set to CLOUDWATCH. Amazon GameLift requires permissions to send logs other Amazon Web Services services in your account. These permissions are included in the IAM fleet role for this container fleet (see FleetRoleArn). /// - metricGroups: The name of an Amazon Web Services CloudWatch metric group to add this fleet to. You can use a metric group to aggregate metrics for multiple fleets. You can specify an existing metric group name or use a new name to create a new metric group. Each fleet can have only one metric group, but you can change this value at any time. /// - newGameSessionProtectionPolicy: Determines whether Amazon GameLift can shut down game sessions on the fleet that are actively running and hosting players. Amazon GameLift might prompt an instance shutdown when scaling down fleet capacity or when retiring unhealthy instances. You can also set game session protection for individual game sessions using UpdateGameSession. NoProtection -- Game sessions can be shut down during active gameplay. FullProtection -- Game sessions in ACTIVE status can't be shut down. By default, this property is set to NoProtection. - /// - perInstanceContainerGroupDefinitionName: The name of a container group definition resource that describes a set of axillary software. A fleet instance has one process for executables in this container group. A per-instance container group is optional. You can update the fleet to add or remove a per-instance container group at any time. You can specify the container group definition's name to use the latest version. Alternatively, provide an ARN value with a specific version number. Create a container group definition by calling CreateContainerGroupDefinition. This operation creates a ContainerGroupDefinition resource. + /// - perInstanceContainerGroupDefinitionName: The name of a container group definition resource that describes a set of axillary software. A fleet instance has one process for executables in this container group. A per-instance container group is optional. You can update the fleet to add or remove a per-instance container group at any time. You can specify the container group definition's name to use the latest version. Alternatively, provide an ARN value with a specific version number. Create a container group definition by calling https://docs.aws.amazon.com/gamelift/latest/apireference/API_CreateContainerGroupDefinition.html. This operation creates a https://docs.aws.amazon.com/gamelift/latest/apireference/API_ContainerGroupDefinition.html resource. /// - tags: A list of labels to assign to the new fleet resource. Tags are developer-defined key-value pairs. Tagging Amazon Web Services resources are useful for resource management, access management and cost allocation. For more information, see Tagging Amazon Web Services Resources in the Amazon Web Services General Reference. /// - logger: Logger use during operation @inlinable @@ -379,9 +379,9 @@ public struct GameLift: AWSService { /// - anywhereConfiguration: Amazon GameLift Anywhere configuration options. /// - buildId: The unique identifier for a custom game server build to be deployed to a fleet with compute type EC2. You can use either the build ID or ARN. The build must be uploaded to Amazon GameLift and in READY status. This fleet property can't be changed after the fleet is created. /// - certificateConfiguration: Prompts Amazon GameLift to generate a TLS/SSL certificate for the fleet. Amazon GameLift uses the certificates to encrypt traffic between game clients and the game servers running on Amazon GameLift. By default, the CertificateConfiguration is DISABLED. You can't change this property after you create the fleet. Certificate Manager (ACM) certificates expire after 13 months. Certificate expiration can cause fleets to fail, preventing players from connecting to instances in the fleet. We recommend you replace fleets before 13 months, consider using fleet aliases for a smooth transition. ACM isn't available in all Amazon Web Services regions. A fleet creation request with certificate generation enabled in an unsupported Region, fails with a 4xx error. For more information about the supported Regions, see Supported Regions in the Certificate Manager User Guide. - /// - computeType: The type of compute resource used to host your game servers. EC2 – The game server build is deployed to Amazon EC2 instances for cloud hosting. This is the default setting. ANYWHERE – Your game server and supporting software is deployed to compute resources that are provided and managed by you. With this compute type, you can also set the AnywhereConfiguration parameter. + /// - computeType: The type of compute resource used to host your game servers. EC2 – The game server build is deployed to Amazon EC2 instances for cloud hosting. This is the default setting. ANYWHERE – Game servers and supporting software are deployed to compute resources that you provide and manage. With this compute type, you can also set the AnywhereConfiguration parameter. /// - description: A description for the fleet. - /// - ec2InboundPermissions: The IP address ranges and port settings that allow inbound traffic to access game server processes and other processes on this fleet. Set this parameter for managed EC2 fleets. You can leave this parameter empty when creating the fleet, but you must call UpdateFleetPortSettings to set it before players can connect to game sessions. As a best practice, we recommend opening ports for remote access only when you need them and closing them when you're finished. For Realtime Servers fleets, Amazon GameLift automatically sets TCP and UDP ranges. + /// - ec2InboundPermissions: The IP address ranges and port settings that allow inbound traffic to access game server processes and other processes on this fleet. Set this parameter for managed EC2 fleets. You can leave this parameter empty when creating the fleet, but you must call https://docs.aws.amazon.com/gamelift/latest/apireference/API_UpdateFleetPortSettings to set it before players can connect to game sessions. As a best practice, we recommend opening ports for remote access only when you need them and closing them when you're finished. For Realtime Servers fleets, Amazon GameLift automatically sets TCP and UDP ranges. /// - ec2InstanceType: The Amazon GameLift-supported Amazon EC2 instance type to use with managed EC2 fleets. Instance type determines the computing resources that will be used to host your game servers, including CPU, memory, storage, and networking capacity. See Amazon Elastic Compute Cloud Instance Types for detailed descriptions of Amazon EC2 instance types. /// - fleetType: Indicates whether to use On-Demand or Spot instances for this fleet. By default, this property is set to ON_DEMAND. Learn more about when to use On-Demand versus Spot Instances. This fleet property can't be changed after the fleet is created. /// - instanceRoleArn: A unique identifier for an IAM role that manages access to your Amazon Web Services services. With an instance role ARN set, any application that runs on an instance in this fleet can assume the role, including install scripts, server processes, and daemons (background processes). Create a role or look up a role's ARN by using the IAM dashboard in the Amazon Web Services Management Console. Learn more about using on-box credentials for your game servers at Access external resources from a game server. This fleet property can't be changed after the fleet is created. @@ -455,7 +455,7 @@ public struct GameLift: AWSService { return try await self.createFleet(input, logger: logger) } - /// Adds remote locations to a managed EC2 fleet or managed container fleet and begins populating the new locations with instances. The new instances conform to the fleet's instance type, auto-scaling, and other configuration settings. You can't add remote locations to a fleet that resides in an Amazon Web Services Region that doesn't support multiple locations. Fleets created prior to March 2021 can't support multiple locations. To add fleet locations, specify the fleet to be updated and provide a list of one or more locations. If successful, this operation returns the list of added locations with their status set to NEW. Amazon GameLift initiates the process of starting an instance in each added location. You can track the status of each new location by monitoring location creation events using DescribeFleetEvents. Learn more Setting up fleets Update fleet locations Amazon GameLift service locations for managed hosting. + /// Adds remote locations to an EC2 and begins populating the new locations with instances. The new instances conform to the fleet's instance type, auto-scaling, and other configuration settings. You can't add remote locations to a fleet that resides in an Amazon Web Services Region that doesn't support multiple locations. Fleets created prior to March 2021 can't support multiple locations. To add fleet locations, specify the fleet to be updated and provide a list of one or more locations. If successful, this operation returns the list of added locations with their status set to NEW. Amazon GameLift initiates the process of starting an instance in each added location. You can track the status of each new location by monitoring location creation events using DescribeFleetEvents. Learn more Setting up fleets Update fleet locations Amazon GameLift service locations for managed hosting. @Sendable @inlinable public func createFleetLocations(_ input: CreateFleetLocationsInput, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateFleetLocationsOutput { @@ -468,7 +468,7 @@ public struct GameLift: AWSService { logger: logger ) } - /// Adds remote locations to a managed EC2 fleet or managed container fleet and begins populating the new locations with instances. The new instances conform to the fleet's instance type, auto-scaling, and other configuration settings. You can't add remote locations to a fleet that resides in an Amazon Web Services Region that doesn't support multiple locations. Fleets created prior to March 2021 can't support multiple locations. To add fleet locations, specify the fleet to be updated and provide a list of one or more locations. If successful, this operation returns the list of added locations with their status set to NEW. Amazon GameLift initiates the process of starting an instance in each added location. You can track the status of each new location by monitoring location creation events using DescribeFleetEvents. Learn more Setting up fleets Update fleet locations Amazon GameLift service locations for managed hosting. + /// Adds remote locations to an EC2 and begins populating the new locations with instances. The new instances conform to the fleet's instance type, auto-scaling, and other configuration settings. You can't add remote locations to a fleet that resides in an Amazon Web Services Region that doesn't support multiple locations. Fleets created prior to March 2021 can't support multiple locations. To add fleet locations, specify the fleet to be updated and provide a list of one or more locations. If successful, this operation returns the list of added locations with their status set to NEW. Amazon GameLift initiates the process of starting an instance in each added location. You can track the status of each new location by monitoring location creation events using DescribeFleetEvents. Learn more Setting up fleets Update fleet locations Amazon GameLift service locations for managed hosting. /// /// Parameters: /// - fleetId: A unique identifier for the fleet to add locations to. You can use either the fleet ID or ARN value. @@ -546,7 +546,7 @@ public struct GameLift: AWSService { return try await self.createGameServerGroup(input, logger: logger) } - /// Creates a multiplayer game session for players in a specific fleet location. This operation prompts an available server process to start a game session and retrieves connection information for the new game session. As an alternative, consider using the Amazon GameLift game session placement feature with StartGameSessionPlacement , which uses the FleetIQ algorithm and queues to optimize the placement process. When creating a game session, you specify exactly where you want to place it and provide a set of game session configuration settings. The target fleet must be in ACTIVE status. You can use this operation in the following ways: To create a game session on an instance in a fleet's home Region, provide a fleet or alias ID along with your game session configuration. To create a game session on an instance in a fleet's remote location, provide a fleet or alias ID and a location name, along with your game session configuration. To create a game session on an instance in an Anywhere fleet, specify the fleet's custom location. If successful, Amazon GameLift initiates a workflow to start a new game session and returns a GameSession object containing the game session configuration and status. When the game session status is ACTIVE, it is updated with connection information and you can create player sessions for the game session. By default, newly created game sessions are open to new players. You can restrict new player access by using UpdateGameSession to change the game session's player session creation policy. Amazon GameLift retains logs for active for 14 days. To access the logs, call GetGameSessionLogUrl to download the log files. Available in Amazon GameLift Local. Learn more Start a game session All APIs by task + /// Creates a multiplayer game session for players in a specific fleet location. This operation prompts an available server process to start a game session and retrieves connection information for the new game session. As an alternative, consider using the Amazon GameLift game session placement feature with StartGameSessionPlacement, which uses the FleetIQ algorithm and queues to optimize the placement process. When creating a game session, you specify exactly where you want to place it and provide a set of game session configuration settings. The target fleet must be in ACTIVE status. You can use this operation in the following ways: To create a game session on an instance in a fleet's home Region, provide a fleet or alias ID along with your game session configuration. To create a game session on an instance in a fleet's remote location, provide a fleet or alias ID and a location name, along with your game session configuration. To create a game session on an instance in an Anywhere fleet, specify the fleet's custom location. If successful, Amazon GameLift initiates a workflow to start a new game session and returns a GameSession object containing the game session configuration and status. When the game session status is ACTIVE, it is updated with connection information and you can create player sessions for the game session. By default, newly created game sessions are open to new players. You can restrict new player access by using UpdateGameSession to change the game session's player session creation policy. Amazon GameLift retains logs for active for 14 days. To access the logs, call GetGameSessionLogUrl to download the log files. Available in Amazon GameLift Local. Learn more Start a game session All APIs by task @Sendable @inlinable public func createGameSession(_ input: CreateGameSessionInput, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateGameSessionOutput { @@ -559,7 +559,7 @@ public struct GameLift: AWSService { logger: logger ) } - /// Creates a multiplayer game session for players in a specific fleet location. This operation prompts an available server process to start a game session and retrieves connection information for the new game session. As an alternative, consider using the Amazon GameLift game session placement feature with StartGameSessionPlacement , which uses the FleetIQ algorithm and queues to optimize the placement process. When creating a game session, you specify exactly where you want to place it and provide a set of game session configuration settings. The target fleet must be in ACTIVE status. You can use this operation in the following ways: To create a game session on an instance in a fleet's home Region, provide a fleet or alias ID along with your game session configuration. To create a game session on an instance in a fleet's remote location, provide a fleet or alias ID and a location name, along with your game session configuration. To create a game session on an instance in an Anywhere fleet, specify the fleet's custom location. If successful, Amazon GameLift initiates a workflow to start a new game session and returns a GameSession object containing the game session configuration and status. When the game session status is ACTIVE, it is updated with connection information and you can create player sessions for the game session. By default, newly created game sessions are open to new players. You can restrict new player access by using UpdateGameSession to change the game session's player session creation policy. Amazon GameLift retains logs for active for 14 days. To access the logs, call GetGameSessionLogUrl to download the log files. Available in Amazon GameLift Local. Learn more Start a game session All APIs by task + /// Creates a multiplayer game session for players in a specific fleet location. This operation prompts an available server process to start a game session and retrieves connection information for the new game session. As an alternative, consider using the Amazon GameLift game session placement feature with StartGameSessionPlacement, which uses the FleetIQ algorithm and queues to optimize the placement process. When creating a game session, you specify exactly where you want to place it and provide a set of game session configuration settings. The target fleet must be in ACTIVE status. You can use this operation in the following ways: To create a game session on an instance in a fleet's home Region, provide a fleet or alias ID along with your game session configuration. To create a game session on an instance in a fleet's remote location, provide a fleet or alias ID and a location name, along with your game session configuration. To create a game session on an instance in an Anywhere fleet, specify the fleet's custom location. If successful, Amazon GameLift initiates a workflow to start a new game session and returns a GameSession object containing the game session configuration and status. When the game session status is ACTIVE, it is updated with connection information and you can create player sessions for the game session. By default, newly created game sessions are open to new players. You can restrict new player access by using UpdateGameSession to change the game session's player session creation policy. Amazon GameLift retains logs for active for 14 days. To access the logs, call GetGameSessionLogUrl to download the log files. Available in Amazon GameLift Local. Learn more Start a game session All APIs by task /// /// Parameters: /// - aliasId: A unique identifier for the alias associated with the fleet to create a game session in. You can use either the alias ID or ARN value. Each request must reference either a fleet ID or alias ID, but not both. @@ -863,7 +863,7 @@ public struct GameLift: AWSService { return try await self.createPlayerSessions(input, logger: logger) } - /// Creates a script resource for your Realtime Servers script. Realtime scripts are JavaScript files that provide configuration settings and optional custom game logic for your game. Script logic is executed during an active game session. To deploy Realtime Servers for hosting, create an Amazon GameLift managed fleet with the script. To create a script resource, specify a script name and provide the script file(s). The script files and all dependencies must be combined into a single .zip file. You can upload the .zip file from either of these locations: A locally available directory. Use the ZipFile parameter for this option. An Amazon Simple Storage Service (Amazon S3) bucket under your Amazon Web Services account. Use the StorageLocation parameter for this option. You'll need to have an Identity Access Management (IAM) role that allows the Amazon GameLift service to access your S3 bucket. If the call is successful, Amazon GameLift creates a new script resource with a unique script ID. The script is uploaded to an Amazon S3 bucket that is owned by Amazon GameLift. Learn more Amazon GameLift Realtime Servers Set Up a Role for Amazon GameLift Access Related actions All APIs by task + /// Creates a new script record for your Realtime Servers script. Realtime scripts are JavaScript that provide configuration settings and optional custom game logic for your game. The script is deployed when you create a Realtime Servers fleet to host your game sessions. Script logic is executed during an active game session. To create a new script record, specify a script name and provide the script file(s). The script files and all dependencies must be zipped into a single file. You can pull the zip file from either of these locations: A locally available directory. Use the ZipFile parameter for this option. An Amazon Simple Storage Service (Amazon S3) bucket under your Amazon Web Services account. Use the StorageLocation parameter for this option. You'll need to have an Identity Access Management (IAM) role that allows the Amazon GameLift service to access your S3 bucket. If the call is successful, a new script record is created with a unique script ID. If the script file is provided as a local file, the file is uploaded to an Amazon GameLift-owned S3 bucket and the script record's storage location reflects this location. If the script file is provided as an S3 bucket, Amazon GameLift accesses the file at this storage location as needed for deployment. Learn more Amazon GameLift Realtime Servers Set Up a Role for Amazon GameLift Access Related actions All APIs by task @Sendable @inlinable public func createScript(_ input: CreateScriptInput, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateScriptOutput { @@ -876,7 +876,7 @@ public struct GameLift: AWSService { logger: logger ) } - /// Creates a script resource for your Realtime Servers script. Realtime scripts are JavaScript files that provide configuration settings and optional custom game logic for your game. Script logic is executed during an active game session. To deploy Realtime Servers for hosting, create an Amazon GameLift managed fleet with the script. To create a script resource, specify a script name and provide the script file(s). The script files and all dependencies must be combined into a single .zip file. You can upload the .zip file from either of these locations: A locally available directory. Use the ZipFile parameter for this option. An Amazon Simple Storage Service (Amazon S3) bucket under your Amazon Web Services account. Use the StorageLocation parameter for this option. You'll need to have an Identity Access Management (IAM) role that allows the Amazon GameLift service to access your S3 bucket. If the call is successful, Amazon GameLift creates a new script resource with a unique script ID. The script is uploaded to an Amazon S3 bucket that is owned by Amazon GameLift. Learn more Amazon GameLift Realtime Servers Set Up a Role for Amazon GameLift Access Related actions All APIs by task + /// Creates a new script record for your Realtime Servers script. Realtime scripts are JavaScript that provide configuration settings and optional custom game logic for your game. The script is deployed when you create a Realtime Servers fleet to host your game sessions. Script logic is executed during an active game session. To create a new script record, specify a script name and provide the script file(s). The script files and all dependencies must be zipped into a single file. You can pull the zip file from either of these locations: A locally available directory. Use the ZipFile parameter for this option. An Amazon Simple Storage Service (Amazon S3) bucket under your Amazon Web Services account. Use the StorageLocation parameter for this option. You'll need to have an Identity Access Management (IAM) role that allows the Amazon GameLift service to access your S3 bucket. If the call is successful, a new script record is created with a unique script ID. If the script file is provided as a local file, the file is uploaded to an Amazon GameLift-owned S3 bucket and the script record's storage location reflects this location. If the script file is provided as an S3 bucket, Amazon GameLift accesses the file at this storage location as needed for deployment. Learn more Amazon GameLift Realtime Servers Set Up a Role for Amazon GameLift Access Related actions All APIs by task /// /// Parameters: /// - name: A descriptive label that is associated with a script. Script names do not need to be unique. You can use UpdateScript to change this value later. @@ -1058,7 +1058,7 @@ public struct GameLift: AWSService { return try await self.deleteContainerFleet(input, logger: logger) } - /// Deletes a container group definition. You can delete a container group definition if there are no fleets using the definition. Request options: Delete an entire container group definition, including all versions. Specify the container group definition name, or use an ARN value without the version number. Delete a particular version. Specify the container group definition name and a version number, or use an ARN value that includes the version number. Keep the newest versions and delete all older versions. Specify the container group definition name and the number of versions to retain. For example, set VersionCountToRetain to 5 to delete all but the five most recent versions. Learn more Manage a container group definition + /// Deletes a container group definition. Request options: Delete an entire container group definition, including all versions. Specify the container group definition name, or use an ARN value without the version number. Delete a particular version. Specify the container group definition name and a version number, or use an ARN value that includes the version number. Keep the newest versions and delete all older versions. Specify the container group definition name and the number of versions to retain. For example, set VersionCountToRetain to 5 to delete all but the five most recent versions. Result If successful, Amazon GameLift removes the container group definition versions that you request deletion for. This request will fail for any requested versions if the following is true: If the version is being used in an active fleet If the version is being deployed to a fleet in a deployment that's currently in progress. If the version is designated as a rollback definition in a fleet deployment that's currently in progress. Learn more Manage a container group definition @Sendable @inlinable public func deleteContainerGroupDefinition(_ input: DeleteContainerGroupDefinitionInput, logger: Logger = AWSClient.loggingDisabled) async throws -> DeleteContainerGroupDefinitionOutput { @@ -1071,7 +1071,7 @@ public struct GameLift: AWSService { logger: logger ) } - /// Deletes a container group definition. You can delete a container group definition if there are no fleets using the definition. Request options: Delete an entire container group definition, including all versions. Specify the container group definition name, or use an ARN value without the version number. Delete a particular version. Specify the container group definition name and a version number, or use an ARN value that includes the version number. Keep the newest versions and delete all older versions. Specify the container group definition name and the number of versions to retain. For example, set VersionCountToRetain to 5 to delete all but the five most recent versions. Learn more Manage a container group definition + /// Deletes a container group definition. Request options: Delete an entire container group definition, including all versions. Specify the container group definition name, or use an ARN value without the version number. Delete a particular version. Specify the container group definition name and a version number, or use an ARN value that includes the version number. Keep the newest versions and delete all older versions. Specify the container group definition name and the number of versions to retain. For example, set VersionCountToRetain to 5 to delete all but the five most recent versions. Result If successful, Amazon GameLift removes the container group definition versions that you request deletion for. This request will fail for any requested versions if the following is true: If the version is being used in an active fleet If the version is being deployed to a fleet in a deployment that's currently in progress. If the version is designated as a rollback definition in a fleet deployment that's currently in progress. Learn more Manage a container group definition /// /// Parameters: /// - name: The unique identifier for the container group definition to delete. You can use either the Name or ARN value. @@ -1549,7 +1549,7 @@ public struct GameLift: AWSService { return try await self.describeBuild(input, logger: logger) } - /// Retrieves properties for a compute resource in an Amazon GameLift fleet. To get a list of all computes in a fleet, call ListCompute. To request information on a specific compute, provide the fleet ID and compute name. If successful, this operation returns details for the requested compute resource. Depending on the fleet's compute type, the result includes the following information: For managed EC2 fleets, this operation returns information about the EC2 instance. For Anywhere fleets, this operation returns information about the registered compute. + /// Retrieves properties for a compute resource in an Amazon GameLift fleet. To get a list of all computes in a fleet, call https://docs.aws.amazon.com/gamelift/latest/apireference/API_ListCompute.html. To request information on a specific compute, provide the fleet ID and compute name. If successful, this operation returns details for the requested compute resource. Depending on the fleet's compute type, the result includes the following information: For managed EC2 fleets, this operation returns information about the EC2 instance. For Anywhere fleets, this operation returns information about the registered compute. @Sendable @inlinable public func describeCompute(_ input: DescribeComputeInput, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeComputeOutput { @@ -1562,7 +1562,7 @@ public struct GameLift: AWSService { logger: logger ) } - /// Retrieves properties for a compute resource in an Amazon GameLift fleet. To get a list of all computes in a fleet, call ListCompute. To request information on a specific compute, provide the fleet ID and compute name. If successful, this operation returns details for the requested compute resource. Depending on the fleet's compute type, the result includes the following information: For managed EC2 fleets, this operation returns information about the EC2 instance. For Anywhere fleets, this operation returns information about the registered compute. + /// Retrieves properties for a compute resource in an Amazon GameLift fleet. To get a list of all computes in a fleet, call https://docs.aws.amazon.com/gamelift/latest/apireference/API_ListCompute.html. To request information on a specific compute, provide the fleet ID and compute name. If successful, this operation returns details for the requested compute resource. Depending on the fleet's compute type, the result includes the following information: For managed EC2 fleets, this operation returns information about the EC2 instance. For Anywhere fleets, this operation returns information about the registered compute. /// /// Parameters: /// - computeName: The unique identifier of the compute resource to retrieve properties for. For an Anywhere fleet compute, use the registered compute name. For an EC2 fleet instance, use the instance ID. @@ -1709,7 +1709,7 @@ public struct GameLift: AWSService { return try await self.describeFleetAttributes(input, logger: logger) } - /// Retrieves the resource capacity settings for one or more fleets. For a container fleet, this operation also returns counts for game server container groups. With multi-location fleets, this operation retrieves data for the fleet's home Region only. To retrieve capacity for remote locations, see DescribeFleetLocationCapacity. This operation can be used in the following ways: To get capacity data for one or more specific fleets, provide a list of fleet IDs or fleet ARNs. To get capacity data for all fleets, do not provide a fleet identifier. When requesting multiple fleets, use the pagination parameters to retrieve results as a set of sequential pages. If successful, a FleetCapacity object is returned for each requested fleet ID. Each FleetCapacity object includes a Location property, which is set to the fleet's home Region. Capacity values are returned only for fleets that currently exist. Some API operations may limit the number of fleet IDs that are allowed in one request. If a request exceeds this limit, the request fails and the error message includes the maximum allowed. Learn more Setting up Amazon GameLift fleets GameLift metrics for fleets + /// Retrieves the resource capacity settings for one or more fleets. For a container fleet, this operation also returns counts for game server container groups. With multi-location fleets, this operation retrieves data for the fleet's home Region only. To retrieve capacity for remote locations, see https://docs.aws.amazon.com/gamelift/latest/apireference/API_DescribeFleetLocationCapacity.html. This operation can be used in the following ways: To get capacity data for one or more specific fleets, provide a list of fleet IDs or fleet ARNs. To get capacity data for all fleets, do not provide a fleet identifier. When requesting multiple fleets, use the pagination parameters to retrieve results as a set of sequential pages. If successful, a FleetCapacity object is returned for each requested fleet ID. Each FleetCapacity object includes a Location property, which is set to the fleet's home Region. Capacity values are returned only for fleets that currently exist. Some API operations may limit the number of fleet IDs that are allowed in one request. If a request exceeds this limit, the request fails and the error message includes the maximum allowed. Learn more Setting up Amazon GameLift fleets GameLift metrics for fleets @Sendable @inlinable public func describeFleetCapacity(_ input: DescribeFleetCapacityInput, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeFleetCapacityOutput { @@ -1722,7 +1722,7 @@ public struct GameLift: AWSService { logger: logger ) } - /// Retrieves the resource capacity settings for one or more fleets. For a container fleet, this operation also returns counts for game server container groups. With multi-location fleets, this operation retrieves data for the fleet's home Region only. To retrieve capacity for remote locations, see DescribeFleetLocationCapacity. This operation can be used in the following ways: To get capacity data for one or more specific fleets, provide a list of fleet IDs or fleet ARNs. To get capacity data for all fleets, do not provide a fleet identifier. When requesting multiple fleets, use the pagination parameters to retrieve results as a set of sequential pages. If successful, a FleetCapacity object is returned for each requested fleet ID. Each FleetCapacity object includes a Location property, which is set to the fleet's home Region. Capacity values are returned only for fleets that currently exist. Some API operations may limit the number of fleet IDs that are allowed in one request. If a request exceeds this limit, the request fails and the error message includes the maximum allowed. Learn more Setting up Amazon GameLift fleets GameLift metrics for fleets + /// Retrieves the resource capacity settings for one or more fleets. For a container fleet, this operation also returns counts for game server container groups. With multi-location fleets, this operation retrieves data for the fleet's home Region only. To retrieve capacity for remote locations, see https://docs.aws.amazon.com/gamelift/latest/apireference/API_DescribeFleetLocationCapacity.html. This operation can be used in the following ways: To get capacity data for one or more specific fleets, provide a list of fleet IDs or fleet ARNs. To get capacity data for all fleets, do not provide a fleet identifier. When requesting multiple fleets, use the pagination parameters to retrieve results as a set of sequential pages. If successful, a FleetCapacity object is returned for each requested fleet ID. Each FleetCapacity object includes a Location property, which is set to the fleet's home Region. Capacity values are returned only for fleets that currently exist. Some API operations may limit the number of fleet IDs that are allowed in one request. If a request exceeds this limit, the request fails and the error message includes the maximum allowed. Learn more Setting up Amazon GameLift fleets GameLift metrics for fleets /// /// Parameters: /// - fleetIds: A unique identifier for the fleet to retrieve capacity information for. You can use either the fleet ID or ARN value. Leave this parameter empty to retrieve capacity information for all fleets. @@ -1919,7 +1919,7 @@ public struct GameLift: AWSService { return try await self.describeFleetLocationUtilization(input, logger: logger) } - /// Retrieves a fleet's inbound connection permissions. Inbound permissions specify IP addresses and port settings that incoming traffic can use to access server processes in the fleet. Game server processes that are running in the fleet must use a port that falls within this range. To connect to game server processes on a managed container fleet, the port settings should include one or more of the container fleet's connection ports. Use this operation in the following ways: To retrieve the port settings for a fleet, identify the fleet's unique identifier. To check the status of recent updates to a fleet remote location, specify the fleet ID and a location. Port setting updates can take time to propagate across all locations. If successful, a set of IpPermission objects is returned for the requested fleet ID. When specifying a location, this operation returns a pending status. If the requested fleet has been deleted, the result set is empty. Learn more Setting up Amazon GameLift fleets + /// Retrieves a fleet's inbound connection permissions. Connection permissions specify IP addresses and port settings that incoming traffic can use to access server processes in the fleet. Game server processes that are running in the fleet must use a port that falls within this range. Use this operation in the following ways: To retrieve the port settings for a fleet, identify the fleet's unique identifier. To check the status of recent updates to a fleet remote location, specify the fleet ID and a location. Port setting updates can take time to propagate across all locations. If successful, a set of IpPermission objects is returned for the requested fleet ID. When specifying a location, this operation returns a pending status. If the requested fleet has been deleted, the result set is empty. Learn more Setting up Amazon GameLift fleets @Sendable @inlinable public func describeFleetPortSettings(_ input: DescribeFleetPortSettingsInput, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeFleetPortSettingsOutput { @@ -1932,7 +1932,7 @@ public struct GameLift: AWSService { logger: logger ) } - /// Retrieves a fleet's inbound connection permissions. Inbound permissions specify IP addresses and port settings that incoming traffic can use to access server processes in the fleet. Game server processes that are running in the fleet must use a port that falls within this range. To connect to game server processes on a managed container fleet, the port settings should include one or more of the container fleet's connection ports. Use this operation in the following ways: To retrieve the port settings for a fleet, identify the fleet's unique identifier. To check the status of recent updates to a fleet remote location, specify the fleet ID and a location. Port setting updates can take time to propagate across all locations. If successful, a set of IpPermission objects is returned for the requested fleet ID. When specifying a location, this operation returns a pending status. If the requested fleet has been deleted, the result set is empty. Learn more Setting up Amazon GameLift fleets + /// Retrieves a fleet's inbound connection permissions. Connection permissions specify IP addresses and port settings that incoming traffic can use to access server processes in the fleet. Game server processes that are running in the fleet must use a port that falls within this range. Use this operation in the following ways: To retrieve the port settings for a fleet, identify the fleet's unique identifier. To check the status of recent updates to a fleet remote location, specify the fleet ID and a location. Port setting updates can take time to propagate across all locations. If successful, a set of IpPermission objects is returned for the requested fleet ID. When specifying a location, this operation returns a pending status. If the requested fleet has been deleted, the result set is empty. Learn more Setting up Amazon GameLift fleets /// /// Parameters: /// - fleetId: A unique identifier for the fleet to retrieve port settings for. You can use either the fleet ID or ARN value. @@ -2132,7 +2132,7 @@ public struct GameLift: AWSService { return try await self.describeGameSessionDetails(input, logger: logger) } - /// Retrieves information, including current status, about a game session placement request. To get game session placement details, specify the placement ID. This operation is not designed to be continually called to track game session status. This practice can cause you to exceed your API limit, which results in errors. Instead, you must configure configure an Amazon Simple Notification Service (SNS) topic to receive notifications from FlexMatch or queues. Continuously polling with DescribeGameSessionPlacement should only be used for games in development with low game session usage. + /// Retrieves information, including current status, about a game session placement request. To get game session placement details, specify the placement ID. This operation is not designed to be continually called to track game session status. This practice can cause you to exceed your API limit, which results in errors. Instead, you must configure an Amazon Simple Notification Service (SNS) topic to receive notifications from FlexMatch or queues. Continuously polling with DescribeGameSessionPlacement should only be used for games in development with low game session usage. @Sendable @inlinable public func describeGameSessionPlacement(_ input: DescribeGameSessionPlacementInput, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeGameSessionPlacementOutput { @@ -2145,7 +2145,7 @@ public struct GameLift: AWSService { logger: logger ) } - /// Retrieves information, including current status, about a game session placement request. To get game session placement details, specify the placement ID. This operation is not designed to be continually called to track game session status. This practice can cause you to exceed your API limit, which results in errors. Instead, you must configure configure an Amazon Simple Notification Service (SNS) topic to receive notifications from FlexMatch or queues. Continuously polling with DescribeGameSessionPlacement should only be used for games in development with low game session usage. + /// Retrieves information, including current status, about a game session placement request. To get game session placement details, specify the placement ID. This operation is not designed to be continually called to track game session status. This practice can cause you to exceed your API limit, which results in errors. Instead, you must configure an Amazon Simple Notification Service (SNS) topic to receive notifications from FlexMatch or queues. Continuously polling with DescribeGameSessionPlacement should only be used for games in development with low game session usage. /// /// Parameters: /// - placementId: A unique identifier for a game session placement to retrieve. @@ -2243,7 +2243,7 @@ public struct GameLift: AWSService { return try await self.describeGameSessions(input, logger: logger) } - /// Retrieves information about the EC2 instances in an Amazon GameLift managed fleet, including instance ID, connection data, and status. You can use this operation with a multi-location fleet to get location-specific instance information. As an alternative, use the operations ListCompute and DescribeCompute to retrieve information for compute resources, including EC2 and Anywhere fleets. You can call this operation in the following ways: To get information on all instances in a fleet's home Region, specify the fleet ID. To get information on all instances in a fleet's remote location, specify the fleet ID and location name. To get information on a specific instance in a fleet, specify the fleet ID and instance ID. Use the pagination parameters to retrieve results as a set of sequential pages. If successful, this operation returns Instance objects for each requested instance, listed in no particular order. If you call this operation for an Anywhere fleet, you receive an InvalidRequestException. Learn more Remotely connect to fleet instances Debug fleet issues Related actions All APIs by task + /// Retrieves information about the EC2 instances in an Amazon GameLift managed fleet, including instance ID, connection data, and status. You can use this operation with a multi-location fleet to get location-specific instance information. As an alternative, use the operations https://docs.aws.amazon.com/gamelift/latest/apireference/API_ListCompute and https://docs.aws.amazon.com/gamelift/latest/apireference/API_DescribeCompute to retrieve information for compute resources, including EC2 and Anywhere fleets. You can call this operation in the following ways: To get information on all instances in a fleet's home Region, specify the fleet ID. To get information on all instances in a fleet's remote location, specify the fleet ID and location name. To get information on a specific instance in a fleet, specify the fleet ID and instance ID. Use the pagination parameters to retrieve results as a set of sequential pages. If successful, this operation returns Instance objects for each requested instance, listed in no particular order. If you call this operation for an Anywhere fleet, you receive an InvalidRequestException. Learn more Remotely connect to fleet instances Debug fleet issues Related actions All APIs by task @Sendable @inlinable public func describeInstances(_ input: DescribeInstancesInput, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeInstancesOutput { @@ -2256,7 +2256,7 @@ public struct GameLift: AWSService { logger: logger ) } - /// Retrieves information about the EC2 instances in an Amazon GameLift managed fleet, including instance ID, connection data, and status. You can use this operation with a multi-location fleet to get location-specific instance information. As an alternative, use the operations ListCompute and DescribeCompute to retrieve information for compute resources, including EC2 and Anywhere fleets. You can call this operation in the following ways: To get information on all instances in a fleet's home Region, specify the fleet ID. To get information on all instances in a fleet's remote location, specify the fleet ID and location name. To get information on a specific instance in a fleet, specify the fleet ID and instance ID. Use the pagination parameters to retrieve results as a set of sequential pages. If successful, this operation returns Instance objects for each requested instance, listed in no particular order. If you call this operation for an Anywhere fleet, you receive an InvalidRequestException. Learn more Remotely connect to fleet instances Debug fleet issues Related actions All APIs by task + /// Retrieves information about the EC2 instances in an Amazon GameLift managed fleet, including instance ID, connection data, and status. You can use this operation with a multi-location fleet to get location-specific instance information. As an alternative, use the operations https://docs.aws.amazon.com/gamelift/latest/apireference/API_ListCompute and https://docs.aws.amazon.com/gamelift/latest/apireference/API_DescribeCompute to retrieve information for compute resources, including EC2 and Anywhere fleets. You can call this operation in the following ways: To get information on all instances in a fleet's home Region, specify the fleet ID. To get information on all instances in a fleet's remote location, specify the fleet ID and location name. To get information on a specific instance in a fleet, specify the fleet ID and instance ID. Use the pagination parameters to retrieve results as a set of sequential pages. If successful, this operation returns Instance objects for each requested instance, listed in no particular order. If you call this operation for an Anywhere fleet, you receive an InvalidRequestException. Learn more Remotely connect to fleet instances Debug fleet issues Related actions All APIs by task /// /// Parameters: /// - fleetId: A unique identifier for the fleet to retrieve instance information for. You can use either the fleet ID or ARN value. @@ -2430,7 +2430,7 @@ public struct GameLift: AWSService { return try await self.describePlayerSessions(input, logger: logger) } - /// Retrieves a fleet's runtime configuration settings. The runtime configuration determines which server processes run, and how they run, and how many run concurrently on computes in managed EC2 and Anywhere fleets. You can update a fleet's runtime configuration at any time using UpdateRuntimeConfiguration. To get the current runtime configuration for a fleet, provide the fleet ID. If successful, a RuntimeConfiguration object is returned for the requested fleet. If the requested fleet has been deleted, the result set is empty. Learn more Setting up Amazon GameLift fleets Running multiple processes on a fleet + /// Retrieves a fleet's runtime configuration settings. The runtime configuration determines which server processes run, and how, on computes in the fleet. For managed EC2 fleets, the runtime configuration describes server processes that run on each fleet instance. can update a fleet's runtime configuration at any time using UpdateRuntimeConfiguration. To get the current runtime configuration for a fleet, provide the fleet ID. If successful, a RuntimeConfiguration object is returned for the requested fleet. If the requested fleet has been deleted, the result set is empty. Learn more Setting up Amazon GameLift fleets Running multiple processes on a fleet @Sendable @inlinable public func describeRuntimeConfiguration(_ input: DescribeRuntimeConfigurationInput, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeRuntimeConfigurationOutput { @@ -2443,7 +2443,7 @@ public struct GameLift: AWSService { logger: logger ) } - /// Retrieves a fleet's runtime configuration settings. The runtime configuration determines which server processes run, and how they run, and how many run concurrently on computes in managed EC2 and Anywhere fleets. You can update a fleet's runtime configuration at any time using UpdateRuntimeConfiguration. To get the current runtime configuration for a fleet, provide the fleet ID. If successful, a RuntimeConfiguration object is returned for the requested fleet. If the requested fleet has been deleted, the result set is empty. Learn more Setting up Amazon GameLift fleets Running multiple processes on a fleet + /// Retrieves a fleet's runtime configuration settings. The runtime configuration determines which server processes run, and how, on computes in the fleet. For managed EC2 fleets, the runtime configuration describes server processes that run on each fleet instance. can update a fleet's runtime configuration at any time using UpdateRuntimeConfiguration. To get the current runtime configuration for a fleet, provide the fleet ID. If successful, a RuntimeConfiguration object is returned for the requested fleet. If the requested fleet has been deleted, the result set is empty. Learn more Setting up Amazon GameLift fleets Running multiple processes on a fleet /// /// Parameters: /// - fleetId: A unique identifier for the fleet to get the runtime configuration for. You can use either the fleet ID or ARN value. @@ -2600,7 +2600,7 @@ public struct GameLift: AWSService { /// Requests authorization to remotely connect to a hosting resource in a Amazon GameLift managed fleet. This operation is not used with Amazon GameLift Anywhere fleets. Request options To request access to a compute, specify the compute name and the fleet ID. Results If successful, this operation returns a set of temporary Amazon Web Services credentials, including a two-part access key and a session token. With a managed EC2 fleet (where compute type is EC2), use these credentials with Amazon EC2 Systems Manager (SSM) to start a session with the compute. For more details, see Starting a session (CLI) in the Amazon EC2 Systems Manager User Guide. /// /// Parameters: - /// - computeName: A unique identifier for the compute resource that you want to connect to. For an EC2 fleet compute, use the instance ID. Use ListCompute to retrieve compute identifiers. + /// - computeName: A unique identifier for the compute resource that you want to connect to. For an EC2 fleet compute, use the instance ID. Use https://docs.aws.amazon.com/gamelift/latest/apireference/API_ListCompute.html to retrieve compute identifiers. /// - fleetId: A unique identifier for the fleet that holds the compute resource that you want to connect to. You can use either the fleet ID or ARN value. /// - logger: Logger use during operation @inlinable @@ -2677,7 +2677,7 @@ public struct GameLift: AWSService { return try await self.getGameSessionLogUrl(input, logger: logger) } - /// Requests authorization to remotely connect to an instance in an Amazon GameLift managed fleet. Use this operation to connect to instances with game servers that use Amazon GameLift server SDK 4.x or earlier. To connect to instances with game servers that use server SDK 5.x or later, call GetComputeAccess. To request access to an instance, specify IDs for the instance and the fleet it belongs to. You can retrieve instance IDs for a fleet by calling DescribeInstances with the fleet ID. If successful, this operation returns an IP address and credentials. The returned credentials match the operating system of the instance, as follows: For a Windows instance: returns a user name and secret (password) for use with a Windows Remote Desktop client. For a Linux instance: returns a user name and secret (RSA private key) for use with an SSH client. You must save the secret to a .pem file. If you're using the CLI, see the example Get credentials for a Linux instance for tips on automatically saving the secret to a .pem file. Learn more Remotely connect to fleet instances Debug fleet issues Related actions All APIs by task + /// Requests authorization to remotely connect to an instance in an Amazon GameLift managed fleet. Use this operation to connect to instances with game servers that use Amazon GameLift server SDK 4.x or earlier. To connect to instances with game servers that use server SDK 5.x or later, call https://docs.aws.amazon.com/gamelift/latest/apireference/API_GetComputeAccess. To request access to an instance, specify IDs for the instance and the fleet it belongs to. You can retrieve instance IDs for a fleet by calling DescribeInstances with the fleet ID. If successful, this operation returns an IP address and credentials. The returned credentials match the operating system of the instance, as follows: For a Windows instance: returns a user name and secret (password) for use with a Windows Remote Desktop client. For a Linux instance: returns a user name and secret (RSA private key) for use with an SSH client. You must save the secret to a .pem file. If you're using the CLI, see the example Get credentials for a Linux instance for tips on automatically saving the secret to a .pem file. Learn more Remotely connect to fleet instances Debug fleet issues Related actions All APIs by task @Sendable @inlinable public func getInstanceAccess(_ input: GetInstanceAccessInput, logger: Logger = AWSClient.loggingDisabled) async throws -> GetInstanceAccessOutput { @@ -2690,7 +2690,7 @@ public struct GameLift: AWSService { logger: logger ) } - /// Requests authorization to remotely connect to an instance in an Amazon GameLift managed fleet. Use this operation to connect to instances with game servers that use Amazon GameLift server SDK 4.x or earlier. To connect to instances with game servers that use server SDK 5.x or later, call GetComputeAccess. To request access to an instance, specify IDs for the instance and the fleet it belongs to. You can retrieve instance IDs for a fleet by calling DescribeInstances with the fleet ID. If successful, this operation returns an IP address and credentials. The returned credentials match the operating system of the instance, as follows: For a Windows instance: returns a user name and secret (password) for use with a Windows Remote Desktop client. For a Linux instance: returns a user name and secret (RSA private key) for use with an SSH client. You must save the secret to a .pem file. If you're using the CLI, see the example Get credentials for a Linux instance for tips on automatically saving the secret to a .pem file. Learn more Remotely connect to fleet instances Debug fleet issues Related actions All APIs by task + /// Requests authorization to remotely connect to an instance in an Amazon GameLift managed fleet. Use this operation to connect to instances with game servers that use Amazon GameLift server SDK 4.x or earlier. To connect to instances with game servers that use server SDK 5.x or later, call https://docs.aws.amazon.com/gamelift/latest/apireference/API_GetComputeAccess. To request access to an instance, specify IDs for the instance and the fleet it belongs to. You can retrieve instance IDs for a fleet by calling DescribeInstances with the fleet ID. If successful, this operation returns an IP address and credentials. The returned credentials match the operating system of the instance, as follows: For a Windows instance: returns a user name and secret (password) for use with a Windows Remote Desktop client. For a Linux instance: returns a user name and secret (RSA private key) for use with an SSH client. You must save the secret to a .pem file. If you're using the CLI, see the example Get credentials for a Linux instance for tips on automatically saving the secret to a .pem file. Learn more Remotely connect to fleet instances Debug fleet issues Related actions All APIs by task /// /// Parameters: /// - fleetId: A unique identifier for the fleet that contains the instance you want to access. You can request access to instances in EC2 fleets with the following statuses: ACTIVATING, ACTIVE, or ERROR. Use either a fleet ID or an ARN value. You can access fleets in ERROR status for a short period of time before Amazon GameLift deletes them. @@ -2896,7 +2896,7 @@ public struct GameLift: AWSService { return try await self.listContainerGroupDefinitionVersions(input, logger: logger) } - /// Retrieves container group definitions for the Amazon Web Services account and Amazon Web Services Region. Use the pagination parameters to retrieve results in a set of sequential pages. This operation returns only the latest version of each definition. To retrieve all versions of a container group definition, use ListContainerGroupDefinitionVersions. Request options: Retrieve the most recent versions of all container group definitions. Retrieve the most recent versions of all container group definitions, filtered by type. Specify the container group type to filter on. Results: If successful, this operation returns the complete properties of a set of container group definition versions that match the request. This operation returns the list of container group definitions in no particular order. Learn more Manage a container group definition + /// Retrieves container group definitions for the Amazon Web Services account and Amazon Web Services Region. Use the pagination parameters to retrieve results in a set of sequential pages. This operation returns only the latest version of each definition. To retrieve all versions of a container group definition, use ListContainerGroupDefinitionVersions. Request options: Retrieve the most recent versions of all container group definitions. Retrieve the most recent versions of all container group definitions, filtered by type. Specify the container group type to filter on. Results: If successful, this operation returns the complete properties of a set of container group definition versions that match the request. This operation returns the list of container group definitions in no particular order. @Sendable @inlinable public func listContainerGroupDefinitions(_ input: ListContainerGroupDefinitionsInput, logger: Logger = AWSClient.loggingDisabled) async throws -> ListContainerGroupDefinitionsOutput { @@ -2909,7 +2909,7 @@ public struct GameLift: AWSService { logger: logger ) } - /// Retrieves container group definitions for the Amazon Web Services account and Amazon Web Services Region. Use the pagination parameters to retrieve results in a set of sequential pages. This operation returns only the latest version of each definition. To retrieve all versions of a container group definition, use ListContainerGroupDefinitionVersions. Request options: Retrieve the most recent versions of all container group definitions. Retrieve the most recent versions of all container group definitions, filtered by type. Specify the container group type to filter on. Results: If successful, this operation returns the complete properties of a set of container group definition versions that match the request. This operation returns the list of container group definitions in no particular order. Learn more Manage a container group definition + /// Retrieves container group definitions for the Amazon Web Services account and Amazon Web Services Region. Use the pagination parameters to retrieve results in a set of sequential pages. This operation returns only the latest version of each definition. To retrieve all versions of a container group definition, use ListContainerGroupDefinitionVersions. Request options: Retrieve the most recent versions of all container group definitions. Retrieve the most recent versions of all container group definitions, filtered by type. Specify the container group type to filter on. Results: If successful, this operation returns the complete properties of a set of container group definition versions that match the request. This operation returns the list of container group definitions in no particular order. /// /// Parameters: /// - containerGroupType: The type of container group to retrieve. Container group type determines how Amazon GameLift deploys the container group on each fleet instance. @@ -2931,7 +2931,7 @@ public struct GameLift: AWSService { return try await self.listContainerGroupDefinitions(input, logger: logger) } - /// Retrieves a collection of container fleet deployments in an Amazon Web Services Region. Request options Get a list of all deployments. Call this operation without specifying a fleet ID. Get a list of all deployments for a fleet. Specify the container fleet ID or ARN value. To get a list of all Realtime Servers fleets with a specific configuration script, provide the script ID. Use the pagination parameters to retrieve results as a set of sequential pages. Results If successful, this operation returns a list of deployments that match the request parameters. A NextToken value is also returned if there are more result pages to retrieve. Fleet IDs are returned in no particular order. + /// Retrieves a collection of container fleet deployments in an Amazon Web Services Region. Use the pagination parameters to retrieve results as a set of sequential pages. Request options Get a list of all deployments. Call this operation without specifying a fleet ID. Get a list of all deployments for a fleet. Specify the container fleet ID or ARN value. Results If successful, this operation returns a list of deployments that match the request parameters. A NextToken value is also returned if there are more result pages to retrieve. Deployments are returned starting with the latest. @Sendable @inlinable public func listFleetDeployments(_ input: ListFleetDeploymentsInput, logger: Logger = AWSClient.loggingDisabled) async throws -> ListFleetDeploymentsOutput { @@ -2944,7 +2944,7 @@ public struct GameLift: AWSService { logger: logger ) } - /// Retrieves a collection of container fleet deployments in an Amazon Web Services Region. Request options Get a list of all deployments. Call this operation without specifying a fleet ID. Get a list of all deployments for a fleet. Specify the container fleet ID or ARN value. To get a list of all Realtime Servers fleets with a specific configuration script, provide the script ID. Use the pagination parameters to retrieve results as a set of sequential pages. Results If successful, this operation returns a list of deployments that match the request parameters. A NextToken value is also returned if there are more result pages to retrieve. Fleet IDs are returned in no particular order. + /// Retrieves a collection of container fleet deployments in an Amazon Web Services Region. Use the pagination parameters to retrieve results as a set of sequential pages. Request options Get a list of all deployments. Call this operation without specifying a fleet ID. Get a list of all deployments for a fleet. Specify the container fleet ID or ARN value. Results If successful, this operation returns a list of deployments that match the request parameters. A NextToken value is also returned if there are more result pages to retrieve. Deployments are returned starting with the latest. /// /// Parameters: /// - fleetId: A unique identifier for the container fleet. You can use either the fleet ID or ARN value. @@ -3401,7 +3401,7 @@ public struct GameLift: AWSService { return try await self.resumeGameServerGroup(input, logger: logger) } - /// Retrieves all active game sessions that match a set of search criteria and sorts them into a specified order. This operation is not designed to continually track game session status because that practice can cause you to exceed your API limit and generate errors. Instead, configure an Amazon Simple Notification Service (Amazon SNS) topic to receive notifications from a matchmaker or a game session placement queue. When searching for game sessions, you specify exactly where you want to search and provide a search filter expression, a sort expression, or both. A search request can search only one fleet, but it can search all of a fleet's locations. This operation can be used in the following ways: To search all game sessions that are currently running on all locations in a fleet, provide a fleet or alias ID. This approach returns game sessions in the fleet's home Region and all remote locations that fit the search criteria. To search all game sessions that are currently running on a specific fleet location, provide a fleet or alias ID and a location name. For location, you can specify a fleet's home Region or any remote location. Use the pagination parameters to retrieve results as a set of sequential pages. If successful, a GameSession object is returned for each game session that matches the request. Search finds game sessions that are in ACTIVE status only. To retrieve information on game sessions in other statuses, use DescribeGameSessions . To set search and sort criteria, create a filter expression using the following game session attributes. For game session search examples, see the Examples section of this topic. gameSessionId -- A unique identifier for the game session. You can use either a GameSessionId or GameSessionArn value. gameSessionName -- Name assigned to a game session. Game session names do not need to be unique to a game session. gameSessionProperties -- A set of key-value pairs that can store custom data in a game session. For example: {"Key": "difficulty", "Value": "novice"}. The filter expression must specify the GameProperty -- a Key and a string Value to search for the game sessions. For example, to search for the above key-value pair, specify the following search filter: gameSessionProperties.difficulty = "novice". All game property values are searched as strings. For examples of searching game sessions, see the ones below, and also see Search game sessions by game property. maximumSessions -- Maximum number of player sessions allowed for a game session. creationTimeMillis -- Value indicating when a game session was created. It is expressed in Unix time as milliseconds. playerSessionCount -- Number of players currently connected to a game session. This value changes rapidly as players join the session or drop out. hasAvailablePlayerSessions -- Boolean value indicating whether a game session has reached its maximum number of players. It is highly recommended that all search requests include this filter attribute to optimize search performance and return only sessions that players can join. Returned values for playerSessionCount and hasAvailablePlayerSessions change quickly as players join sessions and others drop out. Results should be considered a snapshot in time. Be sure to refresh search results often, and handle sessions that fill up before a player can join. All APIs by task + /// Retrieves all active game sessions that match a set of search criteria and sorts them into a specified order. This operation is not designed to continually track game session status because that practice can cause you to exceed your API limit and generate errors. Instead, configure an Amazon Simple Notification Service (Amazon SNS) topic to receive notifications from a matchmaker or a game session placement queue. When searching for game sessions, you specify exactly where you want to search and provide a search filter expression, a sort expression, or both. A search request can search only one fleet, but it can search all of a fleet's locations. This operation can be used in the following ways: To search all game sessions that are currently running on all locations in a fleet, provide a fleet or alias ID. This approach returns game sessions in the fleet's home Region and all remote locations that fit the search criteria. To search all game sessions that are currently running on a specific fleet location, provide a fleet or alias ID and a location name. For location, you can specify a fleet's home Region or any remote location. Use the pagination parameters to retrieve results as a set of sequential pages. If successful, a GameSession object is returned for each game session that matches the request. Search finds game sessions that are in ACTIVE status only. To retrieve information on game sessions in other statuses, use DescribeGameSessions. To set search and sort criteria, create a filter expression using the following game session attributes. For game session search examples, see the Examples section of this topic. gameSessionId -- A unique identifier for the game session. You can use either a GameSessionId or GameSessionArn value. gameSessionName -- Name assigned to a game session. Game session names do not need to be unique to a game session. gameSessionProperties -- A set of key-value pairs that can store custom data in a game session. For example: {"Key": "difficulty", "Value": "novice"}. The filter expression must specify the https://docs.aws.amazon.com/gamelift/latest/apireference/API_GameProperty -- a Key and a string Value to search for the game sessions. For example, to search for the above key-value pair, specify the following search filter: gameSessionProperties.difficulty = "novice". All game property values are searched as strings. For examples of searching game sessions, see the ones below, and also see Search game sessions by game property. maximumSessions -- Maximum number of player sessions allowed for a game session. creationTimeMillis -- Value indicating when a game session was created. It is expressed in Unix time as milliseconds. playerSessionCount -- Number of players currently connected to a game session. This value changes rapidly as players join the session or drop out. hasAvailablePlayerSessions -- Boolean value indicating whether a game session has reached its maximum number of players. It is highly recommended that all search requests include this filter attribute to optimize search performance and return only sessions that players can join. Returned values for playerSessionCount and hasAvailablePlayerSessions change quickly as players join sessions and others drop out. Results should be considered a snapshot in time. Be sure to refresh search results often, and handle sessions that fill up before a player can join. All APIs by task @Sendable @inlinable public func searchGameSessions(_ input: SearchGameSessionsInput, logger: Logger = AWSClient.loggingDisabled) async throws -> SearchGameSessionsOutput { @@ -3414,7 +3414,7 @@ public struct GameLift: AWSService { logger: logger ) } - /// Retrieves all active game sessions that match a set of search criteria and sorts them into a specified order. This operation is not designed to continually track game session status because that practice can cause you to exceed your API limit and generate errors. Instead, configure an Amazon Simple Notification Service (Amazon SNS) topic to receive notifications from a matchmaker or a game session placement queue. When searching for game sessions, you specify exactly where you want to search and provide a search filter expression, a sort expression, or both. A search request can search only one fleet, but it can search all of a fleet's locations. This operation can be used in the following ways: To search all game sessions that are currently running on all locations in a fleet, provide a fleet or alias ID. This approach returns game sessions in the fleet's home Region and all remote locations that fit the search criteria. To search all game sessions that are currently running on a specific fleet location, provide a fleet or alias ID and a location name. For location, you can specify a fleet's home Region or any remote location. Use the pagination parameters to retrieve results as a set of sequential pages. If successful, a GameSession object is returned for each game session that matches the request. Search finds game sessions that are in ACTIVE status only. To retrieve information on game sessions in other statuses, use DescribeGameSessions . To set search and sort criteria, create a filter expression using the following game session attributes. For game session search examples, see the Examples section of this topic. gameSessionId -- A unique identifier for the game session. You can use either a GameSessionId or GameSessionArn value. gameSessionName -- Name assigned to a game session. Game session names do not need to be unique to a game session. gameSessionProperties -- A set of key-value pairs that can store custom data in a game session. For example: {"Key": "difficulty", "Value": "novice"}. The filter expression must specify the GameProperty -- a Key and a string Value to search for the game sessions. For example, to search for the above key-value pair, specify the following search filter: gameSessionProperties.difficulty = "novice". All game property values are searched as strings. For examples of searching game sessions, see the ones below, and also see Search game sessions by game property. maximumSessions -- Maximum number of player sessions allowed for a game session. creationTimeMillis -- Value indicating when a game session was created. It is expressed in Unix time as milliseconds. playerSessionCount -- Number of players currently connected to a game session. This value changes rapidly as players join the session or drop out. hasAvailablePlayerSessions -- Boolean value indicating whether a game session has reached its maximum number of players. It is highly recommended that all search requests include this filter attribute to optimize search performance and return only sessions that players can join. Returned values for playerSessionCount and hasAvailablePlayerSessions change quickly as players join sessions and others drop out. Results should be considered a snapshot in time. Be sure to refresh search results often, and handle sessions that fill up before a player can join. All APIs by task + /// Retrieves all active game sessions that match a set of search criteria and sorts them into a specified order. This operation is not designed to continually track game session status because that practice can cause you to exceed your API limit and generate errors. Instead, configure an Amazon Simple Notification Service (Amazon SNS) topic to receive notifications from a matchmaker or a game session placement queue. When searching for game sessions, you specify exactly where you want to search and provide a search filter expression, a sort expression, or both. A search request can search only one fleet, but it can search all of a fleet's locations. This operation can be used in the following ways: To search all game sessions that are currently running on all locations in a fleet, provide a fleet or alias ID. This approach returns game sessions in the fleet's home Region and all remote locations that fit the search criteria. To search all game sessions that are currently running on a specific fleet location, provide a fleet or alias ID and a location name. For location, you can specify a fleet's home Region or any remote location. Use the pagination parameters to retrieve results as a set of sequential pages. If successful, a GameSession object is returned for each game session that matches the request. Search finds game sessions that are in ACTIVE status only. To retrieve information on game sessions in other statuses, use DescribeGameSessions. To set search and sort criteria, create a filter expression using the following game session attributes. For game session search examples, see the Examples section of this topic. gameSessionId -- A unique identifier for the game session. You can use either a GameSessionId or GameSessionArn value. gameSessionName -- Name assigned to a game session. Game session names do not need to be unique to a game session. gameSessionProperties -- A set of key-value pairs that can store custom data in a game session. For example: {"Key": "difficulty", "Value": "novice"}. The filter expression must specify the https://docs.aws.amazon.com/gamelift/latest/apireference/API_GameProperty -- a Key and a string Value to search for the game sessions. For example, to search for the above key-value pair, specify the following search filter: gameSessionProperties.difficulty = "novice". All game property values are searched as strings. For examples of searching game sessions, see the ones below, and also see Search game sessions by game property. maximumSessions -- Maximum number of player sessions allowed for a game session. creationTimeMillis -- Value indicating when a game session was created. It is expressed in Unix time as milliseconds. playerSessionCount -- Number of players currently connected to a game session. This value changes rapidly as players join the session or drop out. hasAvailablePlayerSessions -- Boolean value indicating whether a game session has reached its maximum number of players. It is highly recommended that all search requests include this filter attribute to optimize search performance and return only sessions that players can join. Returned values for playerSessionCount and hasAvailablePlayerSessions change quickly as players join sessions and others drop out. Results should be considered a snapshot in time. Be sure to refresh search results often, and handle sessions that fill up before a player can join. All APIs by task /// /// Parameters: /// - aliasId: A unique identifier for the alias associated with the fleet to search for active game sessions. You can use either the alias ID or ARN value. Each request must reference either a fleet ID or alias ID, but not both. @@ -3483,7 +3483,7 @@ public struct GameLift: AWSService { return try await self.startFleetActions(input, logger: logger) } - /// Places a request for a new game session in a queue. When processing a placement request, Amazon GameLift searches for available resources on the queue's destinations, scanning each until it finds resources or the placement request times out. A game session placement request can also request player sessions. When a new game session is successfully created, Amazon GameLift creates a player session for each player included in the request. When placing a game session, by default Amazon GameLift tries each fleet in the order they are listed in the queue configuration. Ideally, a queue's destinations are listed in preference order. Alternatively, when requesting a game session with players, you can also provide latency data for each player in relevant Regions. Latency data indicates the performance lag a player experiences when connected to a fleet in the Region. Amazon GameLift uses latency data to reorder the list of destinations to place the game session in a Region with minimal lag. If latency data is provided for multiple players, Amazon GameLift calculates each Region's average lag for all players and reorders to get the best game play across all players. To place a new game session request, specify the following: The queue name and a set of game session properties and settings A unique ID (such as a UUID) for the placement. You use this ID to track the status of the placement request (Optional) A set of player data and a unique player ID for each player that you are joining to the new game session (player data is optional, but if you include it, you must also provide a unique ID for each player) Latency data for all players (if you want to optimize game play for the players) If successful, a new game session placement is created. To track the status of a placement request, call DescribeGameSessionPlacement and check the request's status. If the status is FULFILLED, a new game session has been created and a game session ARN and Region are referenced. If the placement request times out, submit a new request to the same queue or a different queue. + /// Makes a request to start a new game session using a game session queue. When processing a placement request in a queue, Amazon GameLift finds the best possible available resource to host the game session and prompts the resource to start the game session. Request options Call this API with the following minimum parameters: GameSessionQueueName, MaximumPlayerSessionCount, and PlacementID. You can also include game session data (data formatted as strings) or game properties (data formatted as key-value pairs) to pass to the new game session. You can change how Amazon GameLift chooses a hosting resource for the new game session. Prioritizing resources for game session placements is defined when you configure a game session queue. You can use the default prioritization process or specify a custom process by providing a PriorityConfiguration when you create or update a queue. Prioritize based on resource cost and location, using the queue's configured priority settings. Call this API with the minimum parameters. Prioritize based on latency. Include a set of values for PlayerLatencies. You can provide latency data with or without player session data. This option instructs Amazon GameLift to reorder the queue's prioritized locations list based on the latency data. If latency data is provided for multiple players, Amazon GameLift calculates each location's average latency for all players and reorders to find the lowest latency across all players. Don't include latency data if you're providing a custom list of locations. Prioritize based on a custom list of locations. If you're using a queue that's configured to prioritize location first (see PriorityConfiguration for game session queues), use the PriorityConfigurationOverride parameter to substitute a different location list for this placement request. When prioritizing placements by location, Amazon GameLift searches each location in prioritized order to find an available hosting resource for the new game session. You can choose whether to use the override list for the first placement attempt only or for all attempts. You can request new player sessions for a group of players. Include the DesiredPlayerSessions parameter and include at minimum a unique player ID for each. You can also include player-specific data to pass to the new game session. Result If successful, this request generates a new game session placement request and adds it to the game session queue for Amazon GameLift to process in turn. You can track the status of individual placement requests by calling DescribeGameSessionPlacement. A new game session is running if the status is FULFILLED and the request returns the game session connection information (IP address and port). If you include player session data, Amazon GameLift creates a player session for each player ID in the request. The request results in a BadRequestException in the following situations: If the request includes both PlayerLatencies and PriorityConfigurationOverride parameters. If the request includes the PriorityConfigurationOverride parameter and designates a queue doesn't prioritize locations. Amazon GameLift continues to retry each placement request until it reaches the queue's timeout setting. If a request times out, you can resubmit the request to the same queue or try a different queue. @Sendable @inlinable public func startGameSessionPlacement(_ input: StartGameSessionPlacementInput, logger: Logger = AWSClient.loggingDisabled) async throws -> StartGameSessionPlacementOutput { @@ -3496,7 +3496,7 @@ public struct GameLift: AWSService { logger: logger ) } - /// Places a request for a new game session in a queue. When processing a placement request, Amazon GameLift searches for available resources on the queue's destinations, scanning each until it finds resources or the placement request times out. A game session placement request can also request player sessions. When a new game session is successfully created, Amazon GameLift creates a player session for each player included in the request. When placing a game session, by default Amazon GameLift tries each fleet in the order they are listed in the queue configuration. Ideally, a queue's destinations are listed in preference order. Alternatively, when requesting a game session with players, you can also provide latency data for each player in relevant Regions. Latency data indicates the performance lag a player experiences when connected to a fleet in the Region. Amazon GameLift uses latency data to reorder the list of destinations to place the game session in a Region with minimal lag. If latency data is provided for multiple players, Amazon GameLift calculates each Region's average lag for all players and reorders to get the best game play across all players. To place a new game session request, specify the following: The queue name and a set of game session properties and settings A unique ID (such as a UUID) for the placement. You use this ID to track the status of the placement request (Optional) A set of player data and a unique player ID for each player that you are joining to the new game session (player data is optional, but if you include it, you must also provide a unique ID for each player) Latency data for all players (if you want to optimize game play for the players) If successful, a new game session placement is created. To track the status of a placement request, call DescribeGameSessionPlacement and check the request's status. If the status is FULFILLED, a new game session has been created and a game session ARN and Region are referenced. If the placement request times out, submit a new request to the same queue or a different queue. + /// Makes a request to start a new game session using a game session queue. When processing a placement request in a queue, Amazon GameLift finds the best possible available resource to host the game session and prompts the resource to start the game session. Request options Call this API with the following minimum parameters: GameSessionQueueName, MaximumPlayerSessionCount, and PlacementID. You can also include game session data (data formatted as strings) or game properties (data formatted as key-value pairs) to pass to the new game session. You can change how Amazon GameLift chooses a hosting resource for the new game session. Prioritizing resources for game session placements is defined when you configure a game session queue. You can use the default prioritization process or specify a custom process by providing a PriorityConfiguration when you create or update a queue. Prioritize based on resource cost and location, using the queue's configured priority settings. Call this API with the minimum parameters. Prioritize based on latency. Include a set of values for PlayerLatencies. You can provide latency data with or without player session data. This option instructs Amazon GameLift to reorder the queue's prioritized locations list based on the latency data. If latency data is provided for multiple players, Amazon GameLift calculates each location's average latency for all players and reorders to find the lowest latency across all players. Don't include latency data if you're providing a custom list of locations. Prioritize based on a custom list of locations. If you're using a queue that's configured to prioritize location first (see PriorityConfiguration for game session queues), use the PriorityConfigurationOverride parameter to substitute a different location list for this placement request. When prioritizing placements by location, Amazon GameLift searches each location in prioritized order to find an available hosting resource for the new game session. You can choose whether to use the override list for the first placement attempt only or for all attempts. You can request new player sessions for a group of players. Include the DesiredPlayerSessions parameter and include at minimum a unique player ID for each. You can also include player-specific data to pass to the new game session. Result If successful, this request generates a new game session placement request and adds it to the game session queue for Amazon GameLift to process in turn. You can track the status of individual placement requests by calling DescribeGameSessionPlacement. A new game session is running if the status is FULFILLED and the request returns the game session connection information (IP address and port). If you include player session data, Amazon GameLift creates a player session for each player ID in the request. The request results in a BadRequestException in the following situations: If the request includes both PlayerLatencies and PriorityConfigurationOverride parameters. If the request includes the PriorityConfigurationOverride parameter and designates a queue doesn't prioritize locations. Amazon GameLift continues to retry each placement request until it reaches the queue's timeout setting. If a request times out, you can resubmit the request to the same queue or try a different queue. /// /// Parameters: /// - desiredPlayerSessions: Set of information on each player to create a player session for. @@ -3506,7 +3506,8 @@ public struct GameLift: AWSService { /// - gameSessionQueueName: Name of the queue to use to place the new game session. You can use either the queue name or ARN value. /// - maximumPlayerSessionCount: The maximum number of players that can be connected simultaneously to the game session. /// - placementId: A unique identifier to assign to the new game session placement. This value is developer-defined. The value must be unique across all Regions and cannot be reused. - /// - playerLatencies: A set of values, expressed in milliseconds, that indicates the amount of latency that a player experiences when connected to @aws; Regions. This information is used to try to place the new game session where it can offer the best possible gameplay experience for the players. + /// - playerLatencies: A set of values, expressed in milliseconds, that indicates the amount of latency that a player experiences when connected to Amazon Web Services Regions. This information is used to try to place the new game session where it can offer the best possible gameplay experience for the players. + /// - priorityConfigurationOverride: A prioritized list of locations to use for the game session placement and instructions on how to use it. This list overrides a queue's prioritized location list for this game session placement request only. You can include Amazon Web Services Regions, local zones, and custom locations (for Anywhere fleets). Choose a fallback strategy to instruct Amazon GameLift to use the override list for the first placement attempt only or for all placement attempts. /// - logger: Logger use during operation @inlinable public func startGameSessionPlacement( @@ -3518,6 +3519,7 @@ public struct GameLift: AWSService { maximumPlayerSessionCount: Int? = nil, placementId: String? = nil, playerLatencies: [PlayerLatency]? = nil, + priorityConfigurationOverride: PriorityConfigurationOverride? = nil, logger: Logger = AWSClient.loggingDisabled ) async throws -> StartGameSessionPlacementOutput { let input = StartGameSessionPlacementInput( @@ -3528,7 +3530,8 @@ public struct GameLift: AWSService { gameSessionQueueName: gameSessionQueueName, maximumPlayerSessionCount: maximumPlayerSessionCount, placementId: placementId, - playerLatencies: playerLatencies + playerLatencies: playerLatencies, + priorityConfigurationOverride: priorityConfigurationOverride ) return try await self.startGameSessionPlacement(input, logger: logger) } @@ -3763,6 +3766,38 @@ public struct GameLift: AWSService { return try await self.tagResource(input, logger: logger) } + /// Ends a game session that's currently in progress. Use this action to terminate any game session that isn't in ERROR status. Terminating a game session is the most efficient way to free up a server process when it's hosting a game session that's in a bad state or not ending properly. You can use this action to terminate a game session that's being hosted on any type of Amazon GameLift fleet compute, including computes for managed EC2, managed container, and Anywhere fleets. The game server must be integrated with Amazon GameLift server SDK 5.x or greater. Request options Request termination for a single game session. Provide the game session ID and the termination mode. There are two potential methods for terminating a game session: Initiate a graceful termination using the normal game session shutdown sequence. With this mode, the Amazon GameLift service prompts the server process that's hosting the game session by calling the server SDK callback method OnProcessTerminate(). The callback implementation is part of the custom game server code. It might involve a variety of actions to gracefully end a game session, such as notifying players, before stopping the server process. Force an immediate game session termination. With this mode, the Amazon GameLift service takes action to stop the server process, which ends the game session without the normal game session shutdown sequence. Results If successful, game session termination is initiated. During this activity, the game session status is changed to TERMINATING. When completed, the server process that was hosting the game session has been stopped and replaced with a new server process that's ready to host a new game session. The old game session's status is changed to TERMINATED with a status reason that indicates the termination method used. Learn more Add Amazon GameLift to your game server Amazon GameLift server SDK 5 reference guide for OnProcessTerminate() (C++) (C#) (Unreal) (Go) + @Sendable + @inlinable + public func terminateGameSession(_ input: TerminateGameSessionInput, logger: Logger = AWSClient.loggingDisabled) async throws -> TerminateGameSessionOutput { + try await self.client.execute( + operation: "TerminateGameSession", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Ends a game session that's currently in progress. Use this action to terminate any game session that isn't in ERROR status. Terminating a game session is the most efficient way to free up a server process when it's hosting a game session that's in a bad state or not ending properly. You can use this action to terminate a game session that's being hosted on any type of Amazon GameLift fleet compute, including computes for managed EC2, managed container, and Anywhere fleets. The game server must be integrated with Amazon GameLift server SDK 5.x or greater. Request options Request termination for a single game session. Provide the game session ID and the termination mode. There are two potential methods for terminating a game session: Initiate a graceful termination using the normal game session shutdown sequence. With this mode, the Amazon GameLift service prompts the server process that's hosting the game session by calling the server SDK callback method OnProcessTerminate(). The callback implementation is part of the custom game server code. It might involve a variety of actions to gracefully end a game session, such as notifying players, before stopping the server process. Force an immediate game session termination. With this mode, the Amazon GameLift service takes action to stop the server process, which ends the game session without the normal game session shutdown sequence. Results If successful, game session termination is initiated. During this activity, the game session status is changed to TERMINATING. When completed, the server process that was hosting the game session has been stopped and replaced with a new server process that's ready to host a new game session. The old game session's status is changed to TERMINATED with a status reason that indicates the termination method used. Learn more Add Amazon GameLift to your game server Amazon GameLift server SDK 5 reference guide for OnProcessTerminate() (C++) (C#) (Unreal) (Go) + /// + /// Parameters: + /// - gameSessionId: A unique identifier for the game session to be terminated. A game session ARN has the following format: arn:aws:gamelift:::gamesession//. + /// - terminationMode: The method to use to terminate the game session. Available methods include: TRIGGER_ON_PROCESS_TERMINATE – Prompts the Amazon GameLift service to send an OnProcessTerminate() callback to the server process and initiate the normal game session shutdown sequence. The OnProcessTerminate method, which is implemented in the game server code, must include a call to the server SDK action ProcessEnding(), which is how the server process signals to Amazon GameLift that a game session is ending. If the server process doesn't call ProcessEnding(), the game session termination won't conclude successfully. FORCE_TERMINATE – Prompts the Amazon GameLift service to stop the server process immediately. Amazon GameLift takes action (depending on the type of fleet) to shut down the server process without the normal game session shutdown sequence. This method is not available for game sessions that are running on Anywhere fleets unless the fleet is deployed with the Amazon GameLift Agent. In this scenario, a force terminate request results in an invalid or bad request exception. + /// - logger: Logger use during operation + @inlinable + public func terminateGameSession( + gameSessionId: String? = nil, + terminationMode: TerminationMode? = nil, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> TerminateGameSessionOutput { + let input = TerminateGameSessionInput( + gameSessionId: gameSessionId, + terminationMode: terminationMode + ) + return try await self.terminateGameSession(input, logger: logger) + } + /// Removes a tag assigned to a Amazon GameLift resource. You can use resource tags to organize Amazon Web Services resources for a range of purposes. This operation handles the permissions necessary to manage tags for Amazon GameLift resources that support tagging. To remove a tag from a resource, specify the unique ARN value for the resource and provide a string list containing one or more tags to remove. This operation succeeds even if the list includes tags that aren't assigned to the resource. Learn more Tagging Amazon Web Services Resources in the Amazon Web Services General Reference Amazon Web Services Tagging Strategies Related actions All APIs by task @Sendable @inlinable @@ -3868,7 +3903,7 @@ public struct GameLift: AWSService { return try await self.updateBuild(input, logger: logger) } - /// Updates the properties of a managed container fleet. Depending on the properties being updated, this operation might initiate a fleet deployment. You can track deployments for a fleet using DescribeFleetDeployment. Request options As with CreateContainerFleet, many fleet properties use common defaults or are calculated based on the fleet's container group definitions. Update fleet properties that result in a fleet deployment. Include only those properties that you want to change. Specify deployment configuration settings. Update fleet properties that don't result in a fleet deployment. Include only those properties that you want to change. Changes to the following properties initiate a fleet deployment: GameServerContainerGroupDefinition PerInstanceContainerGroupDefinition GameServerContainerGroupsPerInstance InstanceInboundPermissions InstanceConnectionPortRange LogConfiguration Results If successful, this operation updates the container fleet resource, and might initiate a new deployment of fleet resources using the deployment configuration provided. A deployment replaces existing fleet instances with new instances that are deployed with the updated fleet properties. The fleet is placed in UPDATING status until the deployment is complete, then return to ACTIVE. You can have only one update deployment active at a time for a fleet. If a second update request initiates a deployment while another deployment is in progress, the first deployment is cancelled. + /// Updates the properties of a managed container fleet. Depending on the properties being updated, this operation might initiate a fleet deployment. You can track deployments for a fleet using https://docs.aws.amazon.com/gamelift/latest/apireference/API_DescribeFleetDeployment.html. Request options As with CreateContainerFleet, many fleet properties use common defaults or are calculated based on the fleet's container group definitions. Update fleet properties that result in a fleet deployment. Include only those properties that you want to change. Specify deployment configuration settings. Update fleet properties that don't result in a fleet deployment. Include only those properties that you want to change. Changes to the following properties initiate a fleet deployment: GameServerContainerGroupDefinition PerInstanceContainerGroupDefinition GameServerContainerGroupsPerInstance InstanceInboundPermissions InstanceConnectionPortRange LogConfiguration Results If successful, this operation updates the container fleet resource, and might initiate a new deployment of fleet resources using the deployment configuration provided. A deployment replaces existing fleet instances with new instances that are deployed with the updated fleet properties. The fleet is placed in UPDATING status until the deployment is complete, then return to ACTIVE. You can have only one update deployment active at a time for a fleet. If a second update request initiates a deployment while another deployment is in progress, the first deployment is cancelled. @Sendable @inlinable public func updateContainerFleet(_ input: UpdateContainerFleetInput, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateContainerFleetOutput { @@ -3881,13 +3916,13 @@ public struct GameLift: AWSService { logger: logger ) } - /// Updates the properties of a managed container fleet. Depending on the properties being updated, this operation might initiate a fleet deployment. You can track deployments for a fleet using DescribeFleetDeployment. Request options As with CreateContainerFleet, many fleet properties use common defaults or are calculated based on the fleet's container group definitions. Update fleet properties that result in a fleet deployment. Include only those properties that you want to change. Specify deployment configuration settings. Update fleet properties that don't result in a fleet deployment. Include only those properties that you want to change. Changes to the following properties initiate a fleet deployment: GameServerContainerGroupDefinition PerInstanceContainerGroupDefinition GameServerContainerGroupsPerInstance InstanceInboundPermissions InstanceConnectionPortRange LogConfiguration Results If successful, this operation updates the container fleet resource, and might initiate a new deployment of fleet resources using the deployment configuration provided. A deployment replaces existing fleet instances with new instances that are deployed with the updated fleet properties. The fleet is placed in UPDATING status until the deployment is complete, then return to ACTIVE. You can have only one update deployment active at a time for a fleet. If a second update request initiates a deployment while another deployment is in progress, the first deployment is cancelled. + /// Updates the properties of a managed container fleet. Depending on the properties being updated, this operation might initiate a fleet deployment. You can track deployments for a fleet using https://docs.aws.amazon.com/gamelift/latest/apireference/API_DescribeFleetDeployment.html. Request options As with CreateContainerFleet, many fleet properties use common defaults or are calculated based on the fleet's container group definitions. Update fleet properties that result in a fleet deployment. Include only those properties that you want to change. Specify deployment configuration settings. Update fleet properties that don't result in a fleet deployment. Include only those properties that you want to change. Changes to the following properties initiate a fleet deployment: GameServerContainerGroupDefinition PerInstanceContainerGroupDefinition GameServerContainerGroupsPerInstance InstanceInboundPermissions InstanceConnectionPortRange LogConfiguration Results If successful, this operation updates the container fleet resource, and might initiate a new deployment of fleet resources using the deployment configuration provided. A deployment replaces existing fleet instances with new instances that are deployed with the updated fleet properties. The fleet is placed in UPDATING status until the deployment is complete, then return to ACTIVE. You can have only one update deployment active at a time for a fleet. If a second update request initiates a deployment while another deployment is in progress, the first deployment is cancelled. /// /// Parameters: /// - deploymentConfiguration: Instructions for how to deploy updates to a container fleet, if the fleet update initiates a deployment. The deployment configuration lets you determine how to replace fleet instances and what actions to take if the deployment fails. /// - description: A meaningful description of the container fleet. /// - fleetId: A unique identifier for the container fleet to update. You can use either the fleet ID or ARN value. - /// - gameServerContainerGroupDefinitionName: The name or ARN value of a new game server container group definition to deploy on the fleet. If you're updating the fleet to a specific version of a container group definition, use the ARN value and include the version number. If you're updating the fleet to the latest version of a container group definition, you can use the name value. You can't remove a fleet's game server container group definition, you can only update or replace it with another definition. Update a container group definition by calling UpdateContainerGroupDefinition. This operation creates a ContainerGroupDefinition resource with an incremented version. + /// - gameServerContainerGroupDefinitionName: The name or ARN value of a new game server container group definition to deploy on the fleet. If you're updating the fleet to a specific version of a container group definition, use the ARN value and include the version number. If you're updating the fleet to the latest version of a container group definition, you can use the name value. You can't remove a fleet's game server container group definition, you can only update or replace it with another definition. Update a container group definition by calling UpdateContainerGroupDefinition. This operation creates a ContainerGroupDefinition resource with an incremented version. /// - gameServerContainerGroupsPerInstance: The number of times to replicate the game server container group on each fleet instance. By default, Amazon GameLift calculates the maximum number of game server container groups that can fit on each instance. You can remove this property value to use the calculated value, or set it manually. If you set this number manually, Amazon GameLift uses your value as long as it's less than the calculated maximum. /// - gameSessionCreationLimitPolicy: A policy that limits the number of game sessions that each individual player can create on instances in this fleet. The limit applies for a specified span of time. /// - instanceConnectionPortRange: A revised set of port numbers to open on each fleet instance. By default, Amazon GameLift calculates an optimal port range based on your fleet configuration. If you previously set this parameter manually, you can't reset this to use the calculated settings. @@ -3896,7 +3931,7 @@ public struct GameLift: AWSService { /// - logConfiguration: The method for collecting container logs for the fleet. /// - metricGroups: The name of an Amazon Web Services CloudWatch metric group to add this fleet to. /// - newGameSessionProtectionPolicy: The game session protection policy to apply to all new game sessions that are started in this fleet. Game sessions that already exist are not affected. - /// - perInstanceContainerGroupDefinitionName: The name or ARN value of a new per-instance container group definition to deploy on the fleet. If you're updating the fleet to a specific version of a container group definition, use the ARN value and include the version number. If you're updating the fleet to the latest version of a container group definition, you can use the name value. Update a container group definition by calling UpdateContainerGroupDefinition. This operation creates a ContainerGroupDefinition resource with an incremented version. To remove a fleet's per-instance container group definition, leave this parameter empty and use the parameter RemoveAttributes. + /// - perInstanceContainerGroupDefinitionName: The name or ARN value of a new per-instance container group definition to deploy on the fleet. If you're updating the fleet to a specific version of a container group definition, use the ARN value and include the version number. If you're updating the fleet to the latest version of a container group definition, you can use the name value. Update a container group definition by calling UpdateContainerGroupDefinition. This operation creates a ContainerGroupDefinition resource with an incremented version. To remove a fleet's per-instance container group definition, leave this parameter empty and use the parameter RemoveAttributes. /// - removeAttributes: If set, this update removes a fleet's per-instance container group definition. You can't remove a fleet's game server container group definition. /// - logger: Logger use during operation @inlinable @@ -4007,7 +4042,7 @@ public struct GameLift: AWSService { /// - fleetId: A unique identifier for the fleet to update attribute metadata for. You can use either the fleet ID or ARN value. /// - metricGroups: The name of a metric group to add this fleet to. Use a metric group in Amazon CloudWatch to aggregate the metrics from multiple fleets. Provide an existing metric group name, or create a new metric group by providing a new name. A fleet can only be in one metric group at a time. /// - name: A descriptive label that is associated with a fleet. Fleet names do not need to be unique. - /// - newGameSessionProtectionPolicy: The game session protection policy to apply to all new game sessions created in this fleet. Game sessions that already exist are not affected. You can set protection for individual game sessions using UpdateGameSession. NoProtection -- The game session can be terminated during a scale-down event. FullProtection -- If the game session is in an ACTIVE status, it cannot be terminated during a scale-down event. + /// - newGameSessionProtectionPolicy: The game session protection policy to apply to all new game sessions created in this fleet. Game sessions that already exist are not affected. You can set protection for individual game sessions using UpdateGameSession . NoProtection -- The game session can be terminated during a scale-down event. FullProtection -- If the game session is in an ACTIVE status, it cannot be terminated during a scale-down event. /// - resourceCreationLimitPolicy: Policy settings that limit the number of game sessions an individual player can create over a span of time. /// - logger: Logger use during operation @inlinable @@ -4074,7 +4109,7 @@ public struct GameLift: AWSService { return try await self.updateFleetCapacity(input, logger: logger) } - /// Updates permissions that allow inbound traffic to connect to game sessions in the fleet. To update settings, specify the fleet ID to be updated and specify the changes to be made. List the permissions you want to add in InboundPermissionAuthorizations, and permissions you want to remove in InboundPermissionRevocations. Permissions to be removed must match existing fleet permissions. For a container fleet, inbound permissions must specify port numbers that are defined in the fleet's connection port settings. If successful, the fleet ID for the updated fleet is returned. For fleets with remote locations, port setting updates can take time to propagate across all locations. You can check the status of updates in each location by calling DescribeFleetPortSettings with a location name. Learn more Setting up Amazon GameLift fleets + /// Updates permissions that allow inbound traffic to connect to game sessions in the fleet. To update settings, specify the fleet ID to be updated and specify the changes to be made. List the permissions you want to add in InboundPermissionAuthorizations, and permissions you want to remove in InboundPermissionRevocations. Permissions to be removed must match existing fleet permissions. If successful, the fleet ID for the updated fleet is returned. For fleets with remote locations, port setting updates can take time to propagate across all locations. You can check the status of updates in each location by calling DescribeFleetPortSettings with a location name. Learn more Setting up Amazon GameLift fleets @Sendable @inlinable public func updateFleetPortSettings(_ input: UpdateFleetPortSettingsInput, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateFleetPortSettingsOutput { @@ -4087,7 +4122,7 @@ public struct GameLift: AWSService { logger: logger ) } - /// Updates permissions that allow inbound traffic to connect to game sessions in the fleet. To update settings, specify the fleet ID to be updated and specify the changes to be made. List the permissions you want to add in InboundPermissionAuthorizations, and permissions you want to remove in InboundPermissionRevocations. Permissions to be removed must match existing fleet permissions. For a container fleet, inbound permissions must specify port numbers that are defined in the fleet's connection port settings. If successful, the fleet ID for the updated fleet is returned. For fleets with remote locations, port setting updates can take time to propagate across all locations. You can check the status of updates in each location by calling DescribeFleetPortSettings with a location name. Learn more Setting up Amazon GameLift fleets + /// Updates permissions that allow inbound traffic to connect to game sessions in the fleet. To update settings, specify the fleet ID to be updated and specify the changes to be made. List the permissions you want to add in InboundPermissionAuthorizations, and permissions you want to remove in InboundPermissionRevocations. Permissions to be removed must match existing fleet permissions. If successful, the fleet ID for the updated fleet is returned. For fleets with remote locations, port setting updates can take time to propagate across all locations. You can check the status of updates in each location by calling DescribeFleetPortSettings with a location name. Learn more Setting up Amazon GameLift fleets /// /// Parameters: /// - fleetId: A unique identifier for the fleet to update port settings for. You can use either the fleet ID or ARN value. diff --git a/Sources/Soto/Services/GameLift/GameLift_shapes.swift b/Sources/Soto/Services/GameLift/GameLift_shapes.swift index 6332b1188e..b77089166f 100644 --- a/Sources/Soto/Services/GameLift/GameLift_shapes.swift +++ b/Sources/Soto/Services/GameLift/GameLift_shapes.swift @@ -607,7 +607,9 @@ extension GameLift { } public enum GameSessionStatusReason: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case forceTerminated = "FORCE_TERMINATED" case interrupted = "INTERRUPTED" + case triggeredOnProcessTerminate = "TRIGGERED_ON_PROCESS_TERMINATE" public var description: String { return self.rawValue } } @@ -690,6 +692,12 @@ extension GameLift { public var description: String { return self.rawValue } } + public enum PlacementFallbackStrategy: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case defaultAfterSinglePass = "DEFAULT_AFTER_SINGLE_PASS" + case none = "NONE" + public var description: String { return self.rawValue } + } + public enum PlayerSessionCreationPolicy: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case acceptAll = "ACCEPT_ALL" case denyAll = "DENY_ALL" @@ -754,6 +762,12 @@ extension GameLift { public var description: String { return self.rawValue } } + public enum TerminationMode: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case forceTerminate = "FORCE_TERMINATE" + case triggerOnProcessTerminate = "TRIGGER_ON_PROCESS_TERMINATE" + public var description: String { return self.rawValue } + } + // MARK: Shapes public struct AcceptMatchInput: AWSEncodableShape { @@ -957,7 +971,7 @@ extension GameLift { } public struct CertificateConfiguration: AWSEncodableShape & AWSDecodableShape { - /// Indicates whether a TLS/SSL certificate is generated for a fleet. Valid values include: GENERATED -- Generate a TLS/SSL certificate for this fleet. DISABLED -- (default) Do not generate a TLS/SSL certificate for this fleet. + /// Indicates whether a TLS/SSL certificate is generated for a fleet. Valid values include: GENERATED - Generate a TLS/SSL certificate for this fleet. DISABLED - (default) Do not generate a TLS/SSL certificate for this fleet. public let certificateType: CertificateType? @inlinable @@ -1584,7 +1598,7 @@ extension GameLift { public struct CreateBuildInput: AWSEncodableShape { /// A descriptive label that is associated with a build. Build names do not need to be unique. You can change this value later. public let name: String? - /// The environment that your game server binaries run on. This value determines the type of fleet resources that you use for this build. If your game build contains multiple executables, they all must run on the same operating system. This parameter is required, and there's no default value. You can't change a build's operating system later. Amazon Linux 2 (AL2) will reach end of support on 6/30/2025. See more details in the Amazon Linux 2 FAQs. For game servers that are hosted on AL2 and use Amazon GameLift server SDK 4.x., first update the game server build to server SDK 5.x, and then deploy to AL2023 instances. See Migrate to Amazon GameLift server SDK version 5. + /// The operating system that your game server binaries run on. This value determines the type of fleet resources that you use for this build. If your game build contains multiple executables, they all must run on the same operating system. You must specify a valid operating system in this request. There is no default value. You can't change a build's operating system later. Amazon Linux 2 (AL2) will reach end of support on 6/30/2025. See more details in the Amazon Linux 2 FAQs. For game servers that are hosted on AL2 and use Amazon GameLift server SDK 4.x., first update the game server build to server SDK 5.x, and then deploy to AL2023 instances. See Migrate to Amazon GameLift server SDK version 5. public let operatingSystem: OperatingSystem? /// A server SDK version you used when integrating your game server build with Amazon GameLift. For more information see Integrate games with custom game servers. By default Amazon GameLift sets this value to 4.0.2. public let serverSdkVersion: String? @@ -1658,7 +1672,7 @@ extension GameLift { public let description: String? /// The unique identifier for an Identity and Access Management (IAM) role with permissions to run your containers on resources that are managed by Amazon GameLift. Use an IAM service role with the GameLiftContainerFleetPolicy managed policy attached. For more information, see Set up an IAM service role. You can't change this fleet property after the fleet is created. IAM role ARN values use the following pattern: arn:aws:iam::[Amazon Web Services account]:role/[role name]. public let fleetRoleArn: String? - /// A container group definition resource that describes how to deploy containers with your game server build and support software onto each fleet instance. You can specify the container group definition's name to use the latest version. Alternatively, provide an ARN value with a specific version number. Create a container group definition by calling CreateContainerGroupDefinition. This operation creates a ContainerGroupDefinition resource. + /// A container group definition resource that describes how to deploy containers with your game server build and support software onto each fleet instance. You can specify the container group definition's name to use the latest version. Alternatively, provide an ARN value with a specific version number. Create a container group definition by calling CreateContainerGroupDefinition. This operation creates a ContainerGroupDefinition resource. public let gameServerContainerGroupDefinitionName: String? /// The number of times to replicate the game server container group on each fleet instance. By default, Amazon GameLift calculates the maximum number of game server container groups that can fit on each instance. This calculation is based on the CPU and memory resources of the fleet's instance type). To use the calculated maximum, don't set this parameter. If you set this number manually, Amazon GameLift uses your value as long as it's less than the calculated maximum. public let gameServerContainerGroupsPerInstance: Int? @@ -1678,7 +1692,7 @@ extension GameLift { public let metricGroups: [String]? /// Determines whether Amazon GameLift can shut down game sessions on the fleet that are actively running and hosting players. Amazon GameLift might prompt an instance shutdown when scaling down fleet capacity or when retiring unhealthy instances. You can also set game session protection for individual game sessions using UpdateGameSession. NoProtection -- Game sessions can be shut down during active gameplay. FullProtection -- Game sessions in ACTIVE status can't be shut down. By default, this property is set to NoProtection. public let newGameSessionProtectionPolicy: ProtectionPolicy? - /// The name of a container group definition resource that describes a set of axillary software. A fleet instance has one process for executables in this container group. A per-instance container group is optional. You can update the fleet to add or remove a per-instance container group at any time. You can specify the container group definition's name to use the latest version. Alternatively, provide an ARN value with a specific version number. Create a container group definition by calling CreateContainerGroupDefinition. This operation creates a ContainerGroupDefinition resource. + /// The name of a container group definition resource that describes a set of axillary software. A fleet instance has one process for executables in this container group. A per-instance container group is optional. You can update the fleet to add or remove a per-instance container group at any time. You can specify the container group definition's name to use the latest version. Alternatively, provide an ARN value with a specific version number. Create a container group definition by calling https://docs.aws.amazon.com/gamelift/latest/apireference/API_CreateContainerGroupDefinition.html. This operation creates a https://docs.aws.amazon.com/gamelift/latest/apireference/API_ContainerGroupDefinition.html resource. public let perInstanceContainerGroupDefinitionName: String? /// A list of labels to assign to the new fleet resource. Tags are developer-defined key-value pairs. Tagging Amazon Web Services resources are useful for resource management, access management and cost allocation. For more information, see Tagging Amazon Web Services Resources in the Amazon Web Services General Reference. public let tags: [Tag]? @@ -1705,9 +1719,9 @@ extension GameLift { public func validate(name: String) throws { try self.validate(self.description, name: "description", parent: name, max: 1024) try self.validate(self.description, name: "description", parent: name, min: 1) - try self.validate(self.fleetRoleArn, name: "fleetRoleArn", parent: name, max: 512) + try self.validate(self.fleetRoleArn, name: "fleetRoleArn", parent: name, max: 256) try self.validate(self.fleetRoleArn, name: "fleetRoleArn", parent: name, min: 1) - try self.validate(self.fleetRoleArn, name: "fleetRoleArn", parent: name, pattern: "^[a-zA-Z0-9:/-]+$") + try self.validate(self.fleetRoleArn, name: "fleetRoleArn", parent: name, pattern: "^arn:.*:role\\/[\\w+=,.@-]+$") try self.validate(self.gameServerContainerGroupDefinitionName, name: "gameServerContainerGroupDefinitionName", parent: name, max: 512) try self.validate(self.gameServerContainerGroupDefinitionName, name: "gameServerContainerGroupDefinitionName", parent: name, min: 1) try self.validate(self.gameServerContainerGroupDefinitionName, name: "gameServerContainerGroupDefinitionName", parent: name, pattern: "^[a-zA-Z0-9\\-]+$|^arn:.*:containergroupdefinition\\/[a-zA-Z0-9\\-]+(:[0-9]+)?$") @@ -1862,11 +1876,11 @@ extension GameLift { public let buildId: String? /// Prompts Amazon GameLift to generate a TLS/SSL certificate for the fleet. Amazon GameLift uses the certificates to encrypt traffic between game clients and the game servers running on Amazon GameLift. By default, the CertificateConfiguration is DISABLED. You can't change this property after you create the fleet. Certificate Manager (ACM) certificates expire after 13 months. Certificate expiration can cause fleets to fail, preventing players from connecting to instances in the fleet. We recommend you replace fleets before 13 months, consider using fleet aliases for a smooth transition. ACM isn't available in all Amazon Web Services regions. A fleet creation request with certificate generation enabled in an unsupported Region, fails with a 4xx error. For more information about the supported Regions, see Supported Regions in the Certificate Manager User Guide. public let certificateConfiguration: CertificateConfiguration? - /// The type of compute resource used to host your game servers. EC2 – The game server build is deployed to Amazon EC2 instances for cloud hosting. This is the default setting. ANYWHERE – Your game server and supporting software is deployed to compute resources that are provided and managed by you. With this compute type, you can also set the AnywhereConfiguration parameter. + /// The type of compute resource used to host your game servers. EC2 – The game server build is deployed to Amazon EC2 instances for cloud hosting. This is the default setting. ANYWHERE – Game servers and supporting software are deployed to compute resources that you provide and manage. With this compute type, you can also set the AnywhereConfiguration parameter. public let computeType: ComputeType? /// A description for the fleet. public let description: String? - /// The IP address ranges and port settings that allow inbound traffic to access game server processes and other processes on this fleet. Set this parameter for managed EC2 fleets. You can leave this parameter empty when creating the fleet, but you must call UpdateFleetPortSettings to set it before players can connect to game sessions. As a best practice, we recommend opening ports for remote access only when you need them and closing them when you're finished. For Realtime Servers fleets, Amazon GameLift automatically sets TCP and UDP ranges. + /// The IP address ranges and port settings that allow inbound traffic to access game server processes and other processes on this fleet. Set this parameter for managed EC2 fleets. You can leave this parameter empty when creating the fleet, but you must call https://docs.aws.amazon.com/gamelift/latest/apireference/API_UpdateFleetPortSettings to set it before players can connect to game sessions. As a best practice, we recommend opening ports for remote access only when you need them and closing them when you're finished. For Realtime Servers fleets, Amazon GameLift automatically sets TCP and UDP ranges. public let ec2InboundPermissions: [IpPermission]? /// The Amazon GameLift-supported Amazon EC2 instance type to use with managed EC2 fleets. Instance type determines the computing resources that will be used to host your game servers, including CPU, memory, storage, and networking capacity. See Amazon Elastic Compute Cloud Instance Types for detailed descriptions of Amazon EC2 instance types. public let ec2InstanceType: EC2InstanceType? @@ -4876,7 +4890,7 @@ extension GameLift { public struct Event: AWSDecodableShape { /// The number of times that this event occurred. public let count: Int64? - /// The type of event being logged. Fleet state transition events: FLEET_CREATED -- A fleet resource was successfully created with a status of NEW. Event messaging includes the fleet ID. FLEET_STATE_DOWNLOADING -- Fleet status changed from NEW to DOWNLOADING. Amazon GameLift is downloading the compressed build and running install scripts. FLEET_STATE_VALIDATING -- Fleet status changed from DOWNLOADING to VALIDATING. Amazon GameLift has successfully installed build and is now validating the build files. FLEET_STATE_BUILDING -- Fleet status changed from VALIDATING to BUILDING. Amazon GameLift has successfully verified the build files and is now launching a fleet instance. FLEET_STATE_ACTIVATING -- Fleet status changed from BUILDING to ACTIVATING. Amazon GameLift is launching a game server process on the fleet instance and is testing its connectivity with the Amazon GameLift service. FLEET_STATE_ACTIVE -- The fleet's status changed from ACTIVATING to ACTIVE. The fleet is now ready to host game sessions. FLEET_STATE_ERROR -- The Fleet's status changed to ERROR. Describe the fleet event message for more details. Fleet creation events (ordered by fleet creation activity): FLEET_BINARY_DOWNLOAD_FAILED -- The build failed to download to the fleet instance. FLEET_CREATION_EXTRACTING_BUILD -- The game server build was successfully downloaded to an instance, and Amazon GameLiftis now extracting the build files from the uploaded build. Failure at this stage prevents a fleet from moving to ACTIVE status. Logs for this stage display a list of the files that are extracted and saved on the instance. Access the logs by using the URL in PreSignedLogUrl. FLEET_CREATION_RUNNING_INSTALLER -- The game server build files were successfully extracted, and Amazon GameLift is now running the build's install script (if one is included). Failure in this stage prevents a fleet from moving to ACTIVE status. Logs for this stage list the installation steps and whether or not the install completed successfully. Access the logs by using the URL in PreSignedLogUrl. FLEET_CREATION_COMPLETED_INSTALLER -- The game server build files were successfully installed and validation of the installation will begin soon. FLEET_CREATION_FAILED_INSTALLER -- The installed failed while attempting to install the build files. This event indicates that the failure occurred before Amazon GameLift could start validation. FLEET_CREATION_VALIDATING_RUNTIME_CONFIG -- The build process was successful, and the GameLift is now verifying that the game server launch paths, which are specified in the fleet's runtime configuration, exist. If any listed launch path exists, Amazon GameLift tries to launch a game server process and waits for the process to report ready. Failures in this stage prevent a fleet from moving to ACTIVE status. Logs for this stage list the launch paths in the runtime configuration and indicate whether each is found. Access the logs by using the URL in PreSignedLogUrl. FLEET_VALIDATION_LAUNCH_PATH_NOT_FOUND -- Validation of the runtime configuration failed because the executable specified in a launch path does not exist on the instance. FLEET_VALIDATION_EXECUTABLE_RUNTIME_FAILURE -- Validation of the runtime configuration failed because the executable specified in a launch path failed to run on the fleet instance. FLEET_VALIDATION_TIMED_OUT -- Validation of the fleet at the end of creation timed out. Try fleet creation again. FLEET_ACTIVATION_FAILED -- The fleet failed to successfully complete one of the steps in the fleet activation process. This event code indicates that the game build was successfully downloaded to a fleet instance, built, and validated, but was not able to start a server process. For more information, see Debug Fleet Creation Issues. FLEET_ACTIVATION_FAILED_NO_INSTANCES -- Fleet creation was not able to obtain any instances based on the input fleet attributes. Try again at a different time or choose a different combination of fleet attributes such as fleet type, instance type, etc. FLEET_INITIALIZATION_FAILED -- A generic exception occurred during fleet creation. Describe the fleet event message for more details. VPC peering events: FLEET_VPC_PEERING_SUCCEEDED -- A VPC peering connection has been established between the VPC for an Amazon GameLift fleet and a VPC in your Amazon Web Services account. FLEET_VPC_PEERING_FAILED -- A requested VPC peering connection has failed. Event details and status information provide additional detail. A common reason for peering failure is that the two VPCs have overlapping CIDR blocks of IPv4 addresses. To resolve this, change the CIDR block for the VPC in your Amazon Web Services account. For more information on VPC peering failures, see https://docs.aws.amazon.com/AmazonVPC/latest/PeeringGuide/invalid-peering-configurations.html FLEET_VPC_PEERING_DELETED -- A VPC peering connection has been successfully deleted. Container group events: CONTAINER_GROUP_REGISTRATION_FAILED – A game server container group started, but timed out before calling RegisterCompute. CONTAINER_GROUP_CRASHED A game server container group started and terminated without calling RegisterCompute. Spot instance events: INSTANCE_INTERRUPTED -- A spot instance was interrupted by EC2 with a two-minute notification. INSTANCE_RECYCLED -- A spot instance was determined to have a high risk of interruption and is scheduled to be recycled once it has no active game sessions. Server process events: SERVER_PROCESS_INVALID_PATH -- The game server executable or script could not be found based on the Fleet runtime configuration. Check that the launch path is correct based on the operating system of the Fleet. SERVER_PROCESS_SDK_INITIALIZATION_TIMEOUT -- The server process did not call InitSDK() within the time expected (5 minutes). Check your game session log to see why InitSDK() was not called in time. SERVER_PROCESS_PROCESS_READY_TIMEOUT -- The server process did not call ProcessReady() within the time expected (5 minutes) after calling InitSDK(). Check your game session log to see why ProcessReady() was not called in time. SERVER_PROCESS_CRASHED -- The server process exited without calling ProcessEnding(). Check your game session log to see why ProcessEnding() was not called. SERVER_PROCESS_TERMINATED_UNHEALTHY -- The server process did not report a valid health check for too long and was therefore terminated by GameLift. Check your game session log to see if the thread became stuck processing a synchronous task for too long. SERVER_PROCESS_FORCE_TERMINATED -- The server process did not exit cleanly within the time expected after OnProcessTerminate() was sent. Check your game session log to see why termination took longer than expected. SERVER_PROCESS_PROCESS_EXIT_TIMEOUT -- The server process did not exit cleanly within the time expected (30 seconds) after calling ProcessEnding(). Check your game session log to see why termination took longer than expected. Game session events: GAME_SESSION_ACTIVATION_TIMEOUT -- GameSession failed to activate within the expected time. Check your game session log to see why ActivateGameSession() took longer to complete than expected. Other fleet events: FLEET_SCALING_EVENT -- A change was made to the fleet's capacity settings (desired instances, minimum/maximum scaling limits). Event messaging includes the new capacity settings. FLEET_NEW_GAME_SESSION_PROTECTION_POLICY_UPDATED -- A change was made to the fleet's game session protection policy setting. Event messaging includes both the old and new policy setting. FLEET_DELETED -- A request to delete a fleet was initiated. GENERIC_EVENT -- An unspecified event has occurred. + /// The type of event being logged. Fleet state transition events: FLEET_CREATED -- A fleet resource was successfully created with a status of NEW. Event messaging includes the fleet ID. FLEET_STATE_DOWNLOADING -- Fleet status changed from NEW to DOWNLOADING. Amazon GameLift is downloading the compressed build and running install scripts. FLEET_STATE_VALIDATING -- Fleet status changed from DOWNLOADING to VALIDATING. Amazon GameLift has successfully installed build and is now validating the build files. FLEET_STATE_BUILDING -- Fleet status changed from VALIDATING to BUILDING. Amazon GameLift has successfully verified the build files and is now launching a fleet instance. FLEET_STATE_ACTIVATING -- Fleet status changed from BUILDING to ACTIVATING. Amazon GameLift is launching a game server process on the fleet instance and is testing its connectivity with the Amazon GameLift service. FLEET_STATE_ACTIVE -- The fleet's status changed from ACTIVATING to ACTIVE. The fleet is now ready to host game sessions. FLEET_STATE_ERROR -- The Fleet's status changed to ERROR. Describe the fleet event message for more details. Fleet creation events (ordered by fleet creation activity): FLEET_BINARY_DOWNLOAD_FAILED -- The build failed to download to the fleet instance. FLEET_CREATION_EXTRACTING_BUILD -- The game server build was successfully downloaded to an instance, and Amazon GameLiftis now extracting the build files from the uploaded build. Failure at this stage prevents a fleet from moving to ACTIVE status. Logs for this stage display a list of the files that are extracted and saved on the instance. Access the logs by using the URL in PreSignedLogUrl. FLEET_CREATION_RUNNING_INSTALLER -- The game server build files were successfully extracted, and Amazon GameLift is now running the build's install script (if one is included). Failure in this stage prevents a fleet from moving to ACTIVE status. Logs for this stage list the installation steps and whether or not the install completed successfully. Access the logs by using the URL in PreSignedLogUrl. FLEET_CREATION_COMPLETED_INSTALLER -- The game server build files were successfully installed and validation of the installation will begin soon. FLEET_CREATION_FAILED_INSTALLER -- The installed failed while attempting to install the build files. This event indicates that the failure occurred before Amazon GameLift could start validation. FLEET_CREATION_VALIDATING_RUNTIME_CONFIG -- The build process was successful, and the GameLift is now verifying that the game server launch paths, which are specified in the fleet's runtime configuration, exist. If any listed launch path exists, Amazon GameLift tries to launch a game server process and waits for the process to report ready. Failures in this stage prevent a fleet from moving to ACTIVE status. Logs for this stage list the launch paths in the runtime configuration and indicate whether each is found. Access the logs by using the URL in PreSignedLogUrl. FLEET_VALIDATION_LAUNCH_PATH_NOT_FOUND -- Validation of the runtime configuration failed because the executable specified in a launch path does not exist on the instance. FLEET_VALIDATION_EXECUTABLE_RUNTIME_FAILURE -- Validation of the runtime configuration failed because the executable specified in a launch path failed to run on the fleet instance. FLEET_VALIDATION_TIMED_OUT -- Validation of the fleet at the end of creation timed out. Try fleet creation again. FLEET_ACTIVATION_FAILED -- The fleet failed to successfully complete one of the steps in the fleet activation process. This event code indicates that the game build was successfully downloaded to a fleet instance, built, and validated, but was not able to start a server process. For more information, see Debug Fleet Creation Issues. FLEET_ACTIVATION_FAILED_NO_INSTANCES -- Fleet creation was not able to obtain any instances based on the input fleet attributes. Try again at a different time or choose a different combination of fleet attributes such as fleet type, instance type, etc. FLEET_INITIALIZATION_FAILED -- A generic exception occurred during fleet creation. Describe the fleet event message for more details. VPC peering events: FLEET_VPC_PEERING_SUCCEEDED -- A VPC peering connection has been established between the VPC for an Amazon GameLift fleet and a VPC in your Amazon Web Services account. FLEET_VPC_PEERING_FAILED -- A requested VPC peering connection has failed. Event details and status information provide additional detail. A common reason for peering failure is that the two VPCs have overlapping CIDR blocks of IPv4 addresses. To resolve this, change the CIDR block for the VPC in your Amazon Web Services account. For more information on VPC peering failures, see https://docs.aws.amazon.com/AmazonVPC/latest/PeeringGuide/invalid-peering-configurations.html FLEET_VPC_PEERING_DELETED -- A VPC peering connection has been successfully deleted. Spot instance events: INSTANCE_INTERRUPTED -- A spot instance was interrupted by EC2 with a two-minute notification. INSTANCE_RECYCLED -- A spot instance was determined to have a high risk of interruption and is scheduled to be recycled once it has no active game sessions. Server process events: SERVER_PROCESS_INVALID_PATH -- The game server executable or script could not be found based on the Fleet runtime configuration. Check that the launch path is correct based on the operating system of the Fleet. SERVER_PROCESS_SDK_INITIALIZATION_TIMEOUT -- The server process did not call InitSDK() within the time expected (5 minutes). Check your game session log to see why InitSDK() was not called in time. SERVER_PROCESS_PROCESS_READY_TIMEOUT -- The server process did not call ProcessReady() within the time expected (5 minutes) after calling InitSDK(). Check your game session log to see why ProcessReady() was not called in time. SERVER_PROCESS_CRASHED -- The server process exited without calling ProcessEnding(). Check your game session log to see why ProcessEnding() was not called. SERVER_PROCESS_TERMINATED_UNHEALTHY -- The server process did not report a valid health check for too long and was therefore terminated by GameLift. Check your game session log to see if the thread became stuck processing a synchronous task for too long. SERVER_PROCESS_FORCE_TERMINATED -- The server process did not exit cleanly within the time expected after OnProcessTerminate() was sent. Check your game session log to see why termination took longer than expected. SERVER_PROCESS_PROCESS_EXIT_TIMEOUT -- The server process did not exit cleanly within the time expected (30 seconds) after calling ProcessEnding(). Check your game session log to see why termination took longer than expected. Game session events: GAME_SESSION_ACTIVATION_TIMEOUT -- GameSession failed to activate within the expected time. Check your game session log to see why ActivateGameSession() took longer to complete than expected. Other fleet events: FLEET_SCALING_EVENT -- A change was made to the fleet's capacity settings (desired instances, minimum/maximum scaling limits). Event messaging includes the new capacity settings. FLEET_NEW_GAME_SESSION_PROTECTION_POLICY_UPDATED -- A change was made to the fleet's game session protection policy setting. Event messaging includes both the old and new policy setting. FLEET_DELETED -- A request to delete a fleet was initiated. GENERIC_EVENT -- An unspecified event has occurred. public let eventCode: EventCode? /// A unique identifier for a fleet event. public let eventId: String? @@ -4936,7 +4950,7 @@ extension GameLift { } public struct FleetAttributes: AWSDecodableShape { - /// Amazon GameLift Anywhere configuration options. + /// A set of attributes that are specific to an Anywhere fleet. public let anywhereConfiguration: AnywhereConfiguration? /// The Amazon Resource Name (ARN) associated with the Amazon GameLift build resource that is deployed on instances in this fleet. In a GameLift build ARN, the resource ID matches the BuildId value. This attribute is used with fleets where ComputeType is "EC2". public let buildArn: String? @@ -4956,21 +4970,21 @@ extension GameLift { public let fleetId: String? /// Indicates whether the fleet uses On-Demand or Spot instances. For more information, see On-Demand versus Spot Instances. This fleet property can't be changed after the fleet is created. public let fleetType: FleetType? - /// A unique identifier for an IAM role that manages access to your Amazon Web Services services. With an instance role ARN set, any application that runs on an instance in this fleet can assume the role, including install scripts, server processes, and daemons (background processes). Create a role or look up a role's ARN by using the IAM dashboard in the Amazon Web Services Management Console. Learn more about using on-box credentials for your game servers at Access external resources from a game server. This attribute is used with fleets where ComputeType is "EC2". + /// A unique identifier for an IAM role that manages access to your Amazon Web Services services. With an instance role ARN set, any application that runs on an instance in this fleet can assume the role, including install scripts, server processes, and daemons (background processes). Create a role or look up a role's ARN by using the IAM dashboard in the Amazon Web Services Management Console. Learn more about using on-box credentials for your game servers at Access external resources from a game server. This attribute is used with fleets where ComputeType is EC2. public let instanceRoleArn: String? - /// Indicates that fleet instances maintain a shared credentials file for the IAM role defined in InstanceRoleArn. Shared credentials allow applications that are deployed with the game server executable to communicate with other Amazon Web Services resources. This property is used only when the game server is integrated with the server SDK version 5.x. For more information about using shared credentials, see Communicate with other Amazon Web Services resources from your fleets. This attribute is used with fleets where ComputeType is "EC2". + /// Indicates that fleet instances maintain a shared credentials file for the IAM role defined in InstanceRoleArn. Shared credentials allow applications that are deployed with the game server executable to communicate with other Amazon Web Services resources. This property is used only when the game server is integrated with the server SDK version 5.x. For more information about using shared credentials, see Communicate with other Amazon Web Services resources from your fleets. This attribute is used with fleets where ComputeType is EC2. public let instanceRoleCredentialsProvider: InstanceRoleCredentialsProvider? - /// The Amazon EC2 instance type that the fleet uses. Instance type determines the computing resources of each instance in the fleet, including CPU, memory, storage, and networking capacity. See Amazon Elastic Compute Cloud Instance Types for detailed descriptions. This attribute is used with fleets where ComputeType is "EC2". + /// The Amazon EC2 instance type that the fleet uses. Instance type determines the computing resources of each instance in the fleet, including CPU, memory, storage, and networking capacity. See Amazon Elastic Compute Cloud Instance Types for detailed descriptions. This attribute is used with fleets where ComputeType is EC2. public let instanceType: EC2InstanceType? /// This parameter is no longer used. Game session log paths are now defined using the Amazon GameLift server API ProcessReady() logParameters. See more information in the Server API Reference. public let logPaths: [String]? - /// Name of a metric group that metrics for this fleet are added to. In Amazon CloudWatch, you can view aggregated metrics for fleets that are in a metric group. A fleet can be included in only one metric group at a time. This attribute is used with fleets where ComputeType is "EC2". + /// Name of a metric group that metrics for this fleet are added to. In Amazon CloudWatch, you can view aggregated metrics for fleets that are in a metric group. A fleet can be included in only one metric group at a time. This attribute is used with fleets where ComputeType is EC2. public let metricGroups: [String]? /// A descriptive label that is associated with a fleet. Fleet names do not need to be unique. public let name: String? - /// The type of game session protection to set on all new instances that are started in the fleet. This attribute is used with fleets where ComputeType is "EC2". NoProtection -- The game session can be terminated during a scale-down event. FullProtection -- If the game session is in an ACTIVE status, it cannot be terminated during a scale-down event. + /// The type of game session protection to set on all new instances that are started in the fleet. This attribute is used with fleets where ComputeType is EC2. NoProtection -- The game session can be terminated during a scale-down event. FullProtection -- If the game session is in an ACTIVE status, it cannot be terminated during a scale-down event. public let newGameSessionProtectionPolicy: ProtectionPolicy? - /// The operating system of the fleet's computing resources. A fleet's operating system is determined by the OS of the build or script that is deployed on this fleet. This attribute is used with fleets where ComputeType is "EC2". Amazon Linux 2 (AL2) will reach end of support on 6/30/2025. See more details in the Amazon Linux 2 FAQs. For game servers that are hosted on AL2 and use Amazon GameLift server SDK 4.x, first update the game server build to server SDK 5.x, and then deploy to AL2023 instances. See Migrate to Amazon GameLift server SDK version 5. + /// The operating system of the fleet's computing resources. A fleet's operating system is determined by the OS of the build or script that is deployed on this fleet. This attribute is used with fleets where ComputeType is EC2. Amazon Linux 2 (AL2) will reach end of support on 6/30/2025. See more details in the Amazon Linux 2 FAQs. For game servers that are hosted on AL2 and use Amazon GameLift server SDK 4.x., first update the game server build to server SDK 5.x, and then deploy to AL2023 instances. See Migrate to Amazon GameLift server SDK version 5. public let operatingSystem: OperatingSystem? public let resourceCreationLimitPolicy: ResourceCreationLimitPolicy? /// The Amazon Resource Name (ARN) associated with the GameLift script resource that is deployed on instances in this fleet. In a GameLift script ARN, the resource ID matches the ScriptId value. @@ -4983,7 +4997,7 @@ extension GameLift { public let serverLaunchPath: String? /// Current status of the fleet. Possible fleet statuses include the following: NEW -- A new fleet resource has been defined and Amazon GameLift has started creating the fleet. Desired instances is set to 1. DOWNLOADING/VALIDATING/BUILDING -- Amazon GameLift is download the game server build, running install scripts, and then validating the build files. When complete, Amazon GameLift launches a fleet instance. ACTIVATING -- Amazon GameLift is launching a game server process and testing its connectivity with the Amazon GameLift service. ACTIVE -- The fleet is now ready to host game sessions. ERROR -- An error occurred when downloading, validating, building, or activating the fleet. DELETING -- Hosts are responding to a delete fleet request. TERMINATED -- The fleet no longer exists. public let status: FleetStatus? - /// A list of fleet activity that has been suspended using StopFleetActions. This includes fleet auto-scaling. This attribute is used with fleets where ComputeType is "EC2". + /// A list of fleet activity that has been suspended using StopFleetActions. This includes fleet auto-scaling. This attribute is used with fleets where ComputeType is EC2. public let stoppedActions: [FleetAction]? /// A time stamp indicating when this data object was terminated. Format is a number expressed in Unix time as milliseconds (for example "1469498468.057"). public let terminationTime: Date? @@ -5097,9 +5111,9 @@ extension GameLift { public let gameServerBinaryArn: String? /// The unique identifier for the version of the per-instance container group definition that is being deployed. public let perInstanceBinaryArn: String? - /// The unique identifier for the version of the game server container group definition to roll back to if deployment fails. + /// The unique identifier for the version of the game server container group definition to roll back to if deployment fails. Amazon GameLift sets this property to the container group definition version that the fleet used when it was last active. public let rollbackGameServerBinaryArn: String? - /// The unique identifier for the version of the per-instance container group definition to roll back to if deployment fails. + /// The unique identifier for the version of the per-instance container group definition to roll back to if deployment fails. Amazon GameLift sets this property to the container group definition version that the fleet used when it was last active. public let rollbackPerInstanceBinaryArn: String? @inlinable @@ -5512,13 +5526,13 @@ extension GameLift { public let maximumPlayerSessionCount: Int? /// A descriptive label that is associated with a game session. Session names do not need to be unique. public let name: String? - /// Indicates whether or not the game session is accepting new players. + /// Indicates whether the game session is accepting new players. public let playerSessionCreationPolicy: PlayerSessionCreationPolicy? /// The port number for the game session. To connect to a Amazon GameLift game server, an app needs both the IP address and port number. public let port: Int? /// Current status of the game session. A game session must have an ACTIVE status to have player sessions. public let status: GameSessionStatus? - /// Provides additional information about game session status. INTERRUPTED indicates that the game session was hosted on a spot instance that was reclaimed, causing the active game session to be terminated. + /// Provides additional information about game session status. INTERRUPTED -- The game session was hosted on an EC2 Spot instance that was reclaimed, causing the active game session to be stopped. TRIGGERED_ON_PROCESS_TERMINATE – The game session was stopped by calling TerminateGameSession with the termination mode TRIGGER_ON_PROCESS_TERMINATE. FORCE_TERMINATED – The game session was stopped by calling TerminateGameSession with the termination mode FORCE_TERMINATE. public let statusReason: GameSessionStatusReason? /// A time stamp indicating when this data object was terminated. Format is a number expressed in Unix time as milliseconds (for example "1469498468.057"). public let terminationTime: Date? @@ -5669,17 +5683,19 @@ extension GameLift { public let placedPlayerSessions: [PlacedPlayerSession]? /// A unique identifier for a game session placement. public let placementId: String? - /// A set of values, expressed in milliseconds, that indicates the amount of latency that a player experiences when connected to @aws; Regions. + /// A set of values, expressed in milliseconds, that indicates the amount of latency that a player experiences when connected to Amazon Web Services Regions. public let playerLatencies: [PlayerLatency]? /// The port number for the game session. To connect to a Amazon GameLift game server, an app needs both the IP address and port number. This value isn't final until placement status is FULFILLED. public let port: Int? + /// A prioritized list of locations to use with a game session placement request and instructions on how to use it. This list overrides a queue's prioritized location list for a single game session placement request only. The list can include Amazon Web Services Regions, local zones, and custom locations (for Anywhere fleets). The fallback strategy instructs Amazon GameLift to use the override list for the first placement attempt only or for all placement attempts. + public let priorityConfigurationOverride: PriorityConfigurationOverride? /// Time stamp indicating when this request was placed in the queue. Format is a number expressed in Unix time as milliseconds (for example "1469498468.057"). public let startTime: Date? - /// Current status of the game session placement request. PENDING -- The placement request is in the queue waiting to be processed. Game session properties are not yet final. FULFILLED -- A new game session has been successfully placed. Game session properties are now final. CANCELLED -- The placement request was canceled. TIMED_OUT -- A new game session was not successfully created before the time limit expired. You can resubmit as a new placement request as needed. FAILED -- Amazon GameLift is not able to complete the process of placing the game session. Common reasons are the game session terminated before the placement process was completed, or an unexpected internal error. + /// Current status of the game session placement request. PENDING -- The placement request is in the queue waiting to be processed. Game session properties are not yet final. FULFILLED -- A new game session has been successfully placed. Game session properties are now final. CANCELLED -- The placement request was canceled. TIMED_OUT -- A new game session was not successfully created before the time limit expired. You can resubmit the placement request as needed. FAILED -- Amazon GameLift is not able to complete the process of placing the game session. Common reasons are the game session terminated before the placement process was completed, or an unexpected internal error. public let status: GameSessionPlacementState? @inlinable - public init(dnsName: String? = nil, endTime: Date? = nil, gameProperties: [GameProperty]? = nil, gameSessionArn: String? = nil, gameSessionData: String? = nil, gameSessionId: String? = nil, gameSessionName: String? = nil, gameSessionQueueName: String? = nil, gameSessionRegion: String? = nil, ipAddress: String? = nil, matchmakerData: String? = nil, maximumPlayerSessionCount: Int? = nil, placedPlayerSessions: [PlacedPlayerSession]? = nil, placementId: String? = nil, playerLatencies: [PlayerLatency]? = nil, port: Int? = nil, startTime: Date? = nil, status: GameSessionPlacementState? = nil) { + public init(dnsName: String? = nil, endTime: Date? = nil, gameProperties: [GameProperty]? = nil, gameSessionArn: String? = nil, gameSessionData: String? = nil, gameSessionId: String? = nil, gameSessionName: String? = nil, gameSessionQueueName: String? = nil, gameSessionRegion: String? = nil, ipAddress: String? = nil, matchmakerData: String? = nil, maximumPlayerSessionCount: Int? = nil, placedPlayerSessions: [PlacedPlayerSession]? = nil, placementId: String? = nil, playerLatencies: [PlayerLatency]? = nil, port: Int? = nil, priorityConfigurationOverride: PriorityConfigurationOverride? = nil, startTime: Date? = nil, status: GameSessionPlacementState? = nil) { self.dnsName = dnsName self.endTime = endTime self.gameProperties = gameProperties @@ -5696,6 +5712,7 @@ extension GameLift { self.placementId = placementId self.playerLatencies = playerLatencies self.port = port + self.priorityConfigurationOverride = priorityConfigurationOverride self.startTime = startTime self.status = status } @@ -5717,6 +5734,7 @@ extension GameLift { case placementId = "PlacementId" case playerLatencies = "PlayerLatencies" case port = "Port" + case priorityConfigurationOverride = "PriorityConfigurationOverride" case startTime = "StartTime" case status = "Status" } @@ -5791,7 +5809,7 @@ extension GameLift { } public struct GetComputeAccessInput: AWSEncodableShape { - /// A unique identifier for the compute resource that you want to connect to. For an EC2 fleet compute, use the instance ID. Use ListCompute to retrieve compute identifiers. + /// A unique identifier for the compute resource that you want to connect to. For an EC2 fleet compute, use the instance ID. Use https://docs.aws.amazon.com/gamelift/latest/apireference/API_ListCompute.html to retrieve compute identifiers. public let computeName: String? /// A unique identifier for the fleet that holds the compute resource that you want to connect to. You can use either the fleet ID or ARN value. public let fleetId: String? @@ -7120,7 +7138,7 @@ extension GameLift { } public struct Player: AWSEncodableShape & AWSDecodableShape { - /// A set of values, expressed in milliseconds, that indicates the amount of latency that a player experiences when connected to @aws; Regions. If this property is present, FlexMatch considers placing the match only in Regions for which latency is reported. If a matchmaker has a rule that evaluates player latency, players must report latency in order to be matched. If no latency is reported in this scenario, FlexMatch assumes that no Regions are available to the player and the ticket is not matchable. + /// A set of values, expressed in milliseconds, that indicates the amount of latency that a player experiences when connected to Amazon Web Services Regions. If this property is present, FlexMatch considers placing the match only in Regions for which latency is reported. If a matchmaker has a rule that evaluates player latency, players must report latency in order to be matched. If no latency is reported in this scenario, FlexMatch assumes that no Regions are available to the player and the ticket is not matchable. public let latencyInMs: [String: Int]? /// A collection of key:value pairs containing player information for use in matchmaking. Player attribute keys must match the playerAttributes used in a matchmaking rule set. Example: "PlayerAttributes": {"skill": {"N": "23"}, "gameMode": {"S": "deathmatch"}}. You can provide up to 10 PlayerAttributes. public let playerAttributes: [String: AttributeValue]? @@ -7272,9 +7290,9 @@ extension GameLift { } public struct PriorityConfiguration: AWSEncodableShape & AWSDecodableShape { - /// The prioritization order to use for fleet locations, when the PriorityOrder property includes LOCATION. Locations are identified by Amazon Web Services Region codes such as us-west-2. Each location can only be listed once. + /// The prioritization order to use for fleet locations, when the PriorityOrder property includes LOCATION. Locations can include Amazon Web Services Region codes (such as us-west-2), local zones, and custom locations (for Anywhere fleets). Each location must be listed only once. For details, see Amazon GameLift service locations. public let locationOrder: [String]? - /// The recommended sequence to use when prioritizing where to place new game sessions. Each type can only be listed once. LATENCY -- FleetIQ prioritizes locations where the average player latency (provided in each game session request) is lowest. COST -- FleetIQ prioritizes destinations with the lowest current hosting costs. Cost is evaluated based on the location, instance type, and fleet type (Spot or On-Demand) for each destination in the queue. DESTINATION -- FleetIQ prioritizes based on the order that destinations are listed in the queue configuration. LOCATION -- FleetIQ prioritizes based on the provided order of locations, as defined in LocationOrder. + /// A custom sequence to use when prioritizing where to place new game sessions. Each priority type is listed once. LATENCY -- Amazon GameLift prioritizes locations where the average player latency is lowest. Player latency data is provided in each game session placement request. COST -- Amazon GameLift prioritizes destinations with the lowest current hosting costs. Cost is evaluated based on the location, instance type, and fleet type (Spot or On-Demand) of each destination in the queue. DESTINATION -- Amazon GameLift prioritizes based on the list order of destinations in the queue configuration. LOCATION -- Amazon GameLift prioritizes based on the provided order of locations, as defined in LocationOrder. public let priorityOrder: [PriorityType]? @inlinable @@ -7301,6 +7319,34 @@ extension GameLift { } } + public struct PriorityConfigurationOverride: AWSEncodableShape & AWSDecodableShape { + /// A prioritized list of hosting locations. The list can include Amazon Web Services Regions (such as us-west-2), local zones, and custom locations (for Anywhere fleets). Each location must be listed only once. For details, see Amazon GameLift service locations. + public let locationOrder: [String]? + /// Instructions for how to use the override list if the first round of placement attempts fails. The first round is a failure if Amazon GameLift searches all listed locations, in all of the queue's destinations, without finding an available hosting resource for a new game session. Valid strategies include: DEFAULT_AFTER_SINGLE_PASS -- After the first round of placement attempts, discard the override list and use the queue's default location priority list. Continue to use the queue's default list until the placement request times out. NONE -- Continue to use the override list for all rounds of placement attempts until the placement request times out. + public let placementFallbackStrategy: PlacementFallbackStrategy? + + @inlinable + public init(locationOrder: [String]? = nil, placementFallbackStrategy: PlacementFallbackStrategy? = nil) { + self.locationOrder = locationOrder + self.placementFallbackStrategy = placementFallbackStrategy + } + + public func validate(name: String) throws { + try self.locationOrder?.forEach { + try validate($0, name: "locationOrder[]", parent: name, max: 64) + try validate($0, name: "locationOrder[]", parent: name, min: 1) + try validate($0, name: "locationOrder[]", parent: name, pattern: "^[A-Za-z0-9\\-]+$") + } + try self.validate(self.locationOrder, name: "locationOrder", parent: name, max: 10) + try self.validate(self.locationOrder, name: "locationOrder", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case locationOrder = "LocationOrder" + case placementFallbackStrategy = "PlacementFallbackStrategy" + } + } + public struct PutScalingPolicyInput: AWSEncodableShape { /// Comparison operator to use when measuring the metric against the threshold value. public let comparisonOperator: ComparisonOperatorType? @@ -7670,7 +7716,7 @@ extension GameLift { public struct RuntimeConfiguration: AWSEncodableShape & AWSDecodableShape { /// The maximum amount of time (in seconds) allowed to launch a new game session and have it report ready to host players. During this time, the game session is in status ACTIVATING. If the game session does not become active before the timeout, it is ended and the game session status is changed to TERMINATED. public let gameSessionActivationTimeoutSeconds: Int? - /// The number of game sessions in status ACTIVATING to allow on an instance. This setting limits the instance resources that can be used for new game activations at any one time. + /// The number of game sessions in status ACTIVATING to allow on an instance or compute. This setting limits the instance resources that can be used for new game activations at any one time. public let maxConcurrentGameSessionActivations: Int? /// A collection of server process configurations that identify what server processes to run on fleet computes. public let serverProcesses: [ServerProcess]? @@ -8009,11 +8055,13 @@ extension GameLift { public let maximumPlayerSessionCount: Int? /// A unique identifier to assign to the new game session placement. This value is developer-defined. The value must be unique across all Regions and cannot be reused. public let placementId: String? - /// A set of values, expressed in milliseconds, that indicates the amount of latency that a player experiences when connected to @aws; Regions. This information is used to try to place the new game session where it can offer the best possible gameplay experience for the players. + /// A set of values, expressed in milliseconds, that indicates the amount of latency that a player experiences when connected to Amazon Web Services Regions. This information is used to try to place the new game session where it can offer the best possible gameplay experience for the players. public let playerLatencies: [PlayerLatency]? + /// A prioritized list of locations to use for the game session placement and instructions on how to use it. This list overrides a queue's prioritized location list for this game session placement request only. You can include Amazon Web Services Regions, local zones, and custom locations (for Anywhere fleets). Choose a fallback strategy to instruct Amazon GameLift to use the override list for the first placement attempt only or for all placement attempts. + public let priorityConfigurationOverride: PriorityConfigurationOverride? @inlinable - public init(desiredPlayerSessions: [DesiredPlayerSession]? = nil, gameProperties: [GameProperty]? = nil, gameSessionData: String? = nil, gameSessionName: String? = nil, gameSessionQueueName: String? = nil, maximumPlayerSessionCount: Int? = nil, placementId: String? = nil, playerLatencies: [PlayerLatency]? = nil) { + public init(desiredPlayerSessions: [DesiredPlayerSession]? = nil, gameProperties: [GameProperty]? = nil, gameSessionData: String? = nil, gameSessionName: String? = nil, gameSessionQueueName: String? = nil, maximumPlayerSessionCount: Int? = nil, placementId: String? = nil, playerLatencies: [PlayerLatency]? = nil, priorityConfigurationOverride: PriorityConfigurationOverride? = nil) { self.desiredPlayerSessions = desiredPlayerSessions self.gameProperties = gameProperties self.gameSessionData = gameSessionData @@ -8022,6 +8070,7 @@ extension GameLift { self.maximumPlayerSessionCount = maximumPlayerSessionCount self.placementId = placementId self.playerLatencies = playerLatencies + self.priorityConfigurationOverride = priorityConfigurationOverride } public func validate(name: String) throws { @@ -8046,6 +8095,7 @@ extension GameLift { try self.playerLatencies?.forEach { try $0.validate(name: "\(name).playerLatencies[]") } + try self.priorityConfigurationOverride?.validate(name: "\(name).priorityConfigurationOverride") } private enum CodingKeys: String, CodingKey { @@ -8057,6 +8107,7 @@ extension GameLift { case maximumPlayerSessionCount = "MaximumPlayerSessionCount" case placementId = "PlacementId" case playerLatencies = "PlayerLatencies" + case priorityConfigurationOverride = "PriorityConfigurationOverride" } } @@ -8296,7 +8347,7 @@ extension GameLift { public let healthCheck: ContainerHealthCheck? /// The URI to the image that Amazon GameLift deploys to a container fleet. For a more specific identifier, see ResolvedImageDigest. public let imageUri: String? - /// The amount of memory that Amazon GameLift makes available to the container. If memory limits aren't set for an individual container, the container shares the container group's total memory allocation. Related data type: ContainerGroupDefinition$TotalMemoryLimitMebibytes + /// The amount of memory that Amazon GameLift makes available to the container. If memory limits aren't set for an individual container, the container shares the container group's total memory allocation. Related data type: ContainerGroupDefinition TotalMemoryLimitMebibytes public let memoryHardLimitMebibytes: Int? /// A mount point that binds a path inside the container to a file or directory on the host system and lets it access the file or directory. public let mountPoints: [ContainerMountPoint]? @@ -8304,7 +8355,7 @@ extension GameLift { public let portConfiguration: ContainerPortConfiguration? /// A unique and immutable identifier for the container image. The digest is a SHA 256 hash of the container image manifest. public let resolvedImageDigest: String? - /// The number of vCPU units that are reserved for the container. If no resources are reserved, the container shares the total vCPU limit for the container group. Related data type: ContainerGroupDefinition$TotalVcpuLimit + /// The number of vCPU units that are reserved for the container. If no resources are reserved, the container shares the total vCPU limit for the container group. Related data type: ContainerGroupDefinition TotalVcpuLimit public let vcpu: Double? @inlinable @@ -8350,7 +8401,7 @@ extension GameLift { public let healthCheck: ContainerHealthCheck? /// The location of the container image to deploy to a container fleet. Provide an image in an Amazon Elastic Container Registry public or private repository. The repository must be in the same Amazon Web Services account and Amazon Web Services Region where you're creating the container group definition. For limits on image size, see Amazon GameLift endpoints and quotas. You can use any of the following image URI formats: Image ID only: [AWS account].dkr.ecr.[AWS region].amazonaws.com/[repository ID] Image ID and digest: [AWS account].dkr.ecr.[AWS region].amazonaws.com/[repository ID]@[digest] Image ID and tag: [AWS account].dkr.ecr.[AWS region].amazonaws.com/[repository ID]:[tag] public let imageUri: String? - /// A specified amount of memory (in MiB) to reserve for this container. If you don't specify a container-specific memory limit, the container shares the container group's total memory allocation. Related data type: ContainerGroupDefinition TotalMemoryLimitMebibytes + /// A specified amount of memory (in MiB) to reserve for this container. If you don't specify a container-specific memory limit, the container shares the container group's total memory allocation. Related data type: ContainerGroupDefinitionTotalMemoryLimitMebibytes public let memoryHardLimitMebibytes: Int? /// A mount point that binds a path inside the container to a file or directory on the host system and lets it access the file or directory. public let mountPoints: [ContainerMountPoint]? @@ -8544,6 +8595,43 @@ extension GameLift { } } + public struct TerminateGameSessionInput: AWSEncodableShape { + /// A unique identifier for the game session to be terminated. A game session ARN has the following format: arn:aws:gamelift:::gamesession//. + public let gameSessionId: String? + /// The method to use to terminate the game session. Available methods include: TRIGGER_ON_PROCESS_TERMINATE – Prompts the Amazon GameLift service to send an OnProcessTerminate() callback to the server process and initiate the normal game session shutdown sequence. The OnProcessTerminate method, which is implemented in the game server code, must include a call to the server SDK action ProcessEnding(), which is how the server process signals to Amazon GameLift that a game session is ending. If the server process doesn't call ProcessEnding(), the game session termination won't conclude successfully. FORCE_TERMINATE – Prompts the Amazon GameLift service to stop the server process immediately. Amazon GameLift takes action (depending on the type of fleet) to shut down the server process without the normal game session shutdown sequence. This method is not available for game sessions that are running on Anywhere fleets unless the fleet is deployed with the Amazon GameLift Agent. In this scenario, a force terminate request results in an invalid or bad request exception. + public let terminationMode: TerminationMode? + + @inlinable + public init(gameSessionId: String? = nil, terminationMode: TerminationMode? = nil) { + self.gameSessionId = gameSessionId + self.terminationMode = terminationMode + } + + public func validate(name: String) throws { + try self.validate(self.gameSessionId, name: "gameSessionId", parent: name, max: 512) + try self.validate(self.gameSessionId, name: "gameSessionId", parent: name, min: 1) + try self.validate(self.gameSessionId, name: "gameSessionId", parent: name, pattern: "^[a-zA-Z0-9:/-]+$") + } + + private enum CodingKeys: String, CodingKey { + case gameSessionId = "GameSessionId" + case terminationMode = "TerminationMode" + } + } + + public struct TerminateGameSessionOutput: AWSDecodableShape { + public let gameSession: GameSession? + + @inlinable + public init(gameSession: GameSession? = nil) { + self.gameSession = gameSession + } + + private enum CodingKeys: String, CodingKey { + case gameSession = "GameSession" + } + } + public struct UntagResourceRequest: AWSEncodableShape { /// The Amazon Resource Name (ARN) that uniquely identifies the Amazon GameLift resource that you want to remove tags from. Amazon GameLift includes resource ARNs in the data object for the resource. You can retrieve the ARN by calling a List or Describe operation for the resource type. public let resourceARN: String? @@ -8677,7 +8765,7 @@ extension GameLift { public let description: String? /// A unique identifier for the container fleet to update. You can use either the fleet ID or ARN value. public let fleetId: String? - /// The name or ARN value of a new game server container group definition to deploy on the fleet. If you're updating the fleet to a specific version of a container group definition, use the ARN value and include the version number. If you're updating the fleet to the latest version of a container group definition, you can use the name value. You can't remove a fleet's game server container group definition, you can only update or replace it with another definition. Update a container group definition by calling UpdateContainerGroupDefinition. This operation creates a ContainerGroupDefinition resource with an incremented version. + /// The name or ARN value of a new game server container group definition to deploy on the fleet. If you're updating the fleet to a specific version of a container group definition, use the ARN value and include the version number. If you're updating the fleet to the latest version of a container group definition, you can use the name value. You can't remove a fleet's game server container group definition, you can only update or replace it with another definition. Update a container group definition by calling UpdateContainerGroupDefinition. This operation creates a ContainerGroupDefinition resource with an incremented version. public let gameServerContainerGroupDefinitionName: String? /// The number of times to replicate the game server container group on each fleet instance. By default, Amazon GameLift calculates the maximum number of game server container groups that can fit on each instance. You can remove this property value to use the calculated value, or set it manually. If you set this number manually, Amazon GameLift uses your value as long as it's less than the calculated maximum. public let gameServerContainerGroupsPerInstance: Int? @@ -8695,7 +8783,7 @@ extension GameLift { public let metricGroups: [String]? /// The game session protection policy to apply to all new game sessions that are started in this fleet. Game sessions that already exist are not affected. public let newGameSessionProtectionPolicy: ProtectionPolicy? - /// The name or ARN value of a new per-instance container group definition to deploy on the fleet. If you're updating the fleet to a specific version of a container group definition, use the ARN value and include the version number. If you're updating the fleet to the latest version of a container group definition, you can use the name value. Update a container group definition by calling UpdateContainerGroupDefinition. This operation creates a ContainerGroupDefinition resource with an incremented version. To remove a fleet's per-instance container group definition, leave this parameter empty and use the parameter RemoveAttributes. + /// The name or ARN value of a new per-instance container group definition to deploy on the fleet. If you're updating the fleet to a specific version of a container group definition, use the ARN value and include the version number. If you're updating the fleet to the latest version of a container group definition, you can use the name value. Update a container group definition by calling UpdateContainerGroupDefinition. This operation creates a ContainerGroupDefinition resource with an incremented version. To remove a fleet's per-instance container group definition, leave this parameter empty and use the parameter RemoveAttributes. public let perInstanceContainerGroupDefinitionName: String? /// If set, this update removes a fleet's per-instance container group definition. You can't remove a fleet's game server container group definition. public let removeAttributes: [ContainerFleetRemoveAttribute]? @@ -8870,7 +8958,7 @@ extension GameLift { public let metricGroups: [String]? /// A descriptive label that is associated with a fleet. Fleet names do not need to be unique. public let name: String? - /// The game session protection policy to apply to all new game sessions created in this fleet. Game sessions that already exist are not affected. You can set protection for individual game sessions using UpdateGameSession. NoProtection -- The game session can be terminated during a scale-down event. FullProtection -- If the game session is in an ACTIVE status, it cannot be terminated during a scale-down event. + /// The game session protection policy to apply to all new game sessions created in this fleet. Game sessions that already exist are not affected. You can set protection for individual game sessions using UpdateGameSession . NoProtection -- The game session can be terminated during a scale-down event. FullProtection -- If the game session is in an ACTIVE status, it cannot be terminated during a scale-down event. public let newGameSessionProtectionPolicy: ProtectionPolicy? /// Policy settings that limit the number of game sessions an individual player can create over a span of time. public let resourceCreationLimitPolicy: ResourceCreationLimitPolicy? diff --git a/Sources/Soto/Services/Glue/Glue_api.swift b/Sources/Soto/Services/Glue/Glue_api.swift index 1ac60a7196..1087fcde99 100644 --- a/Sources/Soto/Services/Glue/Glue_api.swift +++ b/Sources/Soto/Services/Glue/Glue_api.swift @@ -81,12 +81,48 @@ public struct Glue: AWSService { /// FIPS and dualstack endpoints static var variantEndpoints: [EndpointVariantType: AWSServiceConfig.EndpointVariant] {[ [.dualstack]: .init(endpoints: [ + "af-south-1": "glue.af-south-1.api.aws", + "ap-east-1": "glue.ap-east-1.api.aws", + "ap-northeast-1": "glue.ap-northeast-1.api.aws", + "ap-northeast-2": "glue.ap-northeast-2.api.aws", + "ap-northeast-3": "glue.ap-northeast-3.api.aws", + "ap-south-1": "glue.ap-south-1.api.aws", + "ap-south-2": "glue.ap-south-2.api.aws", + "ap-southeast-1": "glue.ap-southeast-1.api.aws", + "ap-southeast-2": "glue.ap-southeast-2.api.aws", + "ap-southeast-3": "glue.ap-southeast-3.api.aws", + "ap-southeast-4": "glue.ap-southeast-4.api.aws", + "ap-southeast-5": "glue.ap-southeast-5.api.aws", + "ca-central-1": "glue.ca-central-1.api.aws", + "ca-west-1": "glue.ca-west-1.api.aws", + "cn-north-1": "glue.cn-north-1.api.amazonwebservices.com.cn", + "cn-northwest-1": "glue.cn-northwest-1.api.amazonwebservices.com.cn", + "eu-central-1": "glue.eu-central-1.api.aws", + "eu-central-2": "glue.eu-central-2.api.aws", + "eu-north-1": "glue.eu-north-1.api.aws", + "eu-south-1": "glue.eu-south-1.api.aws", + "eu-south-2": "glue.eu-south-2.api.aws", + "eu-west-1": "glue.eu-west-1.api.aws", + "eu-west-2": "glue.eu-west-2.api.aws", + "eu-west-3": "glue.eu-west-3.api.aws", + "il-central-1": "glue.il-central-1.api.aws", + "me-central-1": "glue.me-central-1.api.aws", + "me-south-1": "glue.me-south-1.api.aws", + "sa-east-1": "glue.sa-east-1.api.aws", + "us-east-1": "glue.us-east-1.api.aws", + "us-east-2": "glue.us-east-2.api.aws", "us-gov-east-1": "glue.us-gov-east-1.api.aws", - "us-gov-west-1": "glue.us-gov-west-1.api.aws" + "us-gov-west-1": "glue.us-gov-west-1.api.aws", + "us-west-1": "glue.us-west-1.api.aws", + "us-west-2": "glue.us-west-2.api.aws" ]), [.dualstack, .fips]: .init(endpoints: [ + "us-east-1": "glue-fips.us-east-1.api.aws", + "us-east-2": "glue-fips.us-east-2.api.aws", "us-gov-east-1": "glue-fips.us-gov-east-1.api.aws", - "us-gov-west-1": "glue-fips.us-gov-west-1.api.aws" + "us-gov-west-1": "glue-fips.us-gov-west-1.api.aws", + "us-west-1": "glue-fips.us-west-1.api.aws", + "us-west-2": "glue-fips.us-west-2.api.aws" ]), [.fips]: .init(endpoints: [ "us-east-1": "glue-fips.us-east-1.amazonaws.com", @@ -1473,7 +1509,7 @@ public struct Glue: AWSService { /// - securityConfiguration: The name of the SecurityConfiguration structure to be used with this job. /// - sourceControlDetails: The details for a source control configuration for a job, allowing synchronization of job artifacts to or from a remote repository. /// - tags: The tags to use with this job. You may use tags to limit access to the job. For more information about tags in Glue, see Amazon Web Services Tags in Glue in the developer guide. - /// - timeout: The job timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours) for batch jobs. Streaming jobs must have timeout values less than 7 days or 10080 minutes. When the value is left blank, the job will be restarted after 7 days based if you have not setup a maintenance window. If you have setup maintenance window, it will be restarted during the maintenance window after 7 days. + /// - timeout: The job timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. Jobs must have timeout values less than 7 days or 10080 minutes. Otherwise, the jobs will throw an exception. When the value is left blank, the timeout is defaulted to 2880 minutes. Any existing Glue jobs that had a timeout value greater than 7 days will be defaulted to 7 days. For instance if you have specified a timeout of 20 days for a batch job, it will be stopped on the 7th day. For streaming jobs, if you have set up a maintenance window, it will be restarted during the maintenance window after 7 days. /// - workerType: The type of predefined worker that is allocated when a job runs. Accepts a value of G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs. For the G.1X worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 94GB disk, and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs. For the G.2X worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 138GB disk, and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs. For the G.4X worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk, and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm). For the G.8X worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB disk, and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services Regions as supported for the G.4X worker type. For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of memory) with 84GB disk, and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 or later streaming jobs. For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk, and provides up to 8 Ray workers based on the autoscaler. /// - logger: Logger use during operation @inlinable @@ -1852,7 +1888,7 @@ public struct Glue: AWSService { /// - role: The IAM Role ARN /// - securityConfiguration: The name of the SecurityConfiguration structure to be used with the session /// - tags: The map of key value pairs (tags) belonging to the session. - /// - timeout: The number of minutes before session times out. Default for Spark ETL jobs is 48 hours (2880 minutes), the maximum session lifetime for this job type. Consult the documentation for other job types. + /// - timeout: The number of minutes before session times out. Default for Spark ETL jobs is 48 hours (2880 minutes). Consult the documentation for other job types. /// - workerType: The type of predefined worker that is allocated when a job runs. Accepts a value of G.1X, G.2X, G.4X, or G.8X for Spark jobs. Accepts the value Z.2X for Ray notebooks. For the G.1X worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 94GB disk, and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs. For the G.2X worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 138GB disk, and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs. For the G.4X worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk, and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm). For the G.8X worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB disk, and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services Regions as supported for the G.4X worker type. For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk, and provides up to 8 Ray workers based on the autoscaler. /// - logger: Logger use during operation @inlinable @@ -3463,13 +3499,15 @@ public struct Glue: AWSService { /// Retrieves all catalogs defined in a catalog in the Glue Data Catalog. For a Redshift-federated catalog use case, this operation returns the list of catalogs mapped to Redshift databases in the Redshift namespace catalog. /// /// Parameters: + /// - includeRoot: Whether to list the default catalog in the account and region in the response. Defaults to false. When true and ParentCatalogId = NULL | Amazon Web Services Account ID, all catalogs and the default catalog are enumerated in the response. When the ParentCatalogId is not equal to null, and this attribute is passed as false or true, an InvalidInputException is thrown. /// - maxResults: The maximum number of catalogs to return in one response. /// - nextToken: A continuation token, if this is a continuation call. /// - parentCatalogId: The ID of the parent catalog in which the catalog resides. If none is provided, the Amazon Web Services Account Number is used by default. - /// - recursive: When specified as true, iterates through the account and returns all catalog resources (including top-level resources and child resources) + /// - recursive: Whether to list all catalogs across the catalog hierarchy, starting from the ParentCatalogId. Defaults to false . When true, all catalog objects in the ParentCatalogID hierarchy are enumerated in the response. /// - logger: Logger use during operation @inlinable public func getCatalogs( + includeRoot: Bool? = nil, maxResults: Int? = nil, nextToken: String? = nil, parentCatalogId: String? = nil, @@ -3477,6 +3515,7 @@ public struct Glue: AWSService { logger: Logger = AWSClient.loggingDisabled ) async throws -> GetCatalogsResponse { let input = GetCatalogsRequest( + includeRoot: includeRoot, maxResults: maxResults, nextToken: nextToken, parentCatalogId: parentCatalogId, @@ -7777,7 +7816,7 @@ public struct Glue: AWSService { /// - notificationProperty: Specifies configuration properties of a job run notification. /// - numberOfWorkers: The number of workers of a defined workerType that are allocated when a job runs. /// - securityConfiguration: The name of the SecurityConfiguration structure to be used with this job run. - /// - timeout: The JobRun timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. This value overrides the timeout value set in the parent job. Streaming jobs must have timeout values less than 7 days or 10080 minutes. When the value is left blank, the job will be restarted after 7 days based if you have not setup a maintenance window. If you have setup maintenance window, it will be restarted during the maintenance window after 7 days. + /// - timeout: The JobRun timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. This value overrides the timeout value set in the parent job. Jobs must have timeout values less than 7 days or 10080 minutes. Otherwise, the jobs will throw an exception. When the value is left blank, the timeout is defaulted to 2880 minutes. Any existing Glue jobs that had a timeout value greater than 7 days will be defaulted to 7 days. For instance if you have specified a timeout of 20 days for a batch job, it will be stopped on the 7th day. For streaming jobs, if you have set up a maintenance window, it will be restarted during the maintenance window after 7 days. /// - workerType: The type of predefined worker that is allocated when a job runs. Accepts a value of G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs. For the G.1X worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 94GB disk, and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs. For the G.2X worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 138GB disk, and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs. For the G.4X worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk, and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm). For the G.8X worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB disk, and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services Regions as supported for the G.4X worker type. For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of memory) with 84GB disk, and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 or later streaming jobs. For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk, and provides up to 8 Ray workers based on the autoscaler. /// - logger: Logger use during operation @inlinable diff --git a/Sources/Soto/Services/Glue/Glue_shapes.swift b/Sources/Soto/Services/Glue/Glue_shapes.swift index 3af22a0d2f..616b7c7978 100644 --- a/Sources/Soto/Services/Glue/Glue_shapes.swift +++ b/Sources/Soto/Services/Glue/Glue_shapes.swift @@ -1059,7 +1059,7 @@ extension Glue { public let notificationProperty: NotificationProperty? /// The name of the SecurityConfiguration structure to be used with this action. public let securityConfiguration: String? - /// The JobRun timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours). This overrides the timeout value set in the parent job. + /// The JobRun timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. This overrides the timeout value set in the parent job. Jobs must have timeout values less than 7 days or 10080 minutes. Otherwise, the jobs will throw an exception. When the value is left blank, the timeout is defaulted to 2880 minutes. Any existing Glue jobs that had a timeout value greater than 7 days will be defaulted to 7 days. For instance if you have specified a timeout of 20 days for a batch job, it will be stopped on the 7th day. For streaming jobs, if you have set up a maintenance window, it will be restarted during the maintenance window after 7 days. public let timeout: Int? @inlinable @@ -6341,7 +6341,7 @@ extension Glue { public let sourceControlDetails: SourceControlDetails? /// The tags to use with this job. You may use tags to limit access to the job. For more information about tags in Glue, see Amazon Web Services Tags in Glue in the developer guide. public let tags: [String: String]? - /// The job timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours) for batch jobs. Streaming jobs must have timeout values less than 7 days or 10080 minutes. When the value is left blank, the job will be restarted after 7 days based if you have not setup a maintenance window. If you have setup maintenance window, it will be restarted during the maintenance window after 7 days. + /// The job timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. Jobs must have timeout values less than 7 days or 10080 minutes. Otherwise, the jobs will throw an exception. When the value is left blank, the timeout is defaulted to 2880 minutes. Any existing Glue jobs that had a timeout value greater than 7 days will be defaulted to 7 days. For instance if you have specified a timeout of 20 days for a batch job, it will be stopped on the 7th day. For streaming jobs, if you have set up a maintenance window, it will be restarted during the maintenance window after 7 days. public let timeout: Int? /// The type of predefined worker that is allocated when a job runs. Accepts a value of G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs. For the G.1X worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 94GB disk, and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs. For the G.2X worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 138GB disk, and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs. For the G.4X worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk, and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm). For the G.8X worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB disk, and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services Regions as supported for the G.4X worker type. For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of memory) with 84GB disk, and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 or later streaming jobs. For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk, and provides up to 8 Ray workers based on the autoscaler. public let workerType: WorkerType? @@ -6990,7 +6990,7 @@ extension Glue { public let securityConfiguration: String? /// The map of key value pairs (tags) belonging to the session. public let tags: [String: String]? - /// The number of minutes before session times out. Default for Spark ETL jobs is 48 hours (2880 minutes), the maximum session lifetime for this job type. Consult the documentation for other job types. + /// The number of minutes before session times out. Default for Spark ETL jobs is 48 hours (2880 minutes). Consult the documentation for other job types. public let timeout: Int? /// The type of predefined worker that is allocated when a job runs. Accepts a value of G.1X, G.2X, G.4X, or G.8X for Spark jobs. Accepts the value Z.2X for Ray notebooks. For the G.1X worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 94GB disk, and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs. For the G.2X worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 138GB disk, and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs. For the G.4X worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk, and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm). For the G.8X worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB disk, and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services Regions as supported for the G.4X worker type. For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk, and provides up to 8 Ray workers based on the autoscaler. public let workerType: WorkerType? @@ -11371,17 +11371,20 @@ extension Glue { } public struct GetCatalogsRequest: AWSEncodableShape { + /// Whether to list the default catalog in the account and region in the response. Defaults to false. When true and ParentCatalogId = NULL | Amazon Web Services Account ID, all catalogs and the default catalog are enumerated in the response. When the ParentCatalogId is not equal to null, and this attribute is passed as false or true, an InvalidInputException is thrown. + public let includeRoot: Bool? /// The maximum number of catalogs to return in one response. public let maxResults: Int? /// A continuation token, if this is a continuation call. public let nextToken: String? /// The ID of the parent catalog in which the catalog resides. If none is provided, the Amazon Web Services Account Number is used by default. public let parentCatalogId: String? - /// When specified as true, iterates through the account and returns all catalog resources (including top-level resources and child resources) + /// Whether to list all catalogs across the catalog hierarchy, starting from the ParentCatalogId. Defaults to false . When true, all catalog objects in the ParentCatalogID hierarchy are enumerated in the response. public let recursive: Bool? @inlinable - public init(maxResults: Int? = nil, nextToken: String? = nil, parentCatalogId: String? = nil, recursive: Bool? = nil) { + public init(includeRoot: Bool? = nil, maxResults: Int? = nil, nextToken: String? = nil, parentCatalogId: String? = nil, recursive: Bool? = nil) { + self.includeRoot = includeRoot self.maxResults = maxResults self.nextToken = nextToken self.parentCatalogId = parentCatalogId @@ -11397,6 +11400,7 @@ extension Glue { } private enum CodingKeys: String, CodingKey { + case includeRoot = "IncludeRoot" case maxResults = "MaxResults" case nextToken = "NextToken" case parentCatalogId = "ParentCatalogId" @@ -16224,7 +16228,7 @@ extension Glue { public let securityConfiguration: String? /// The details for a source control configuration for a job, allowing synchronization of job artifacts to or from a remote repository. public let sourceControlDetails: SourceControlDetails? - /// The job timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours) for batch jobs. Streaming jobs must have timeout values less than 7 days or 10080 minutes. When the value is left blank, the job will be restarted after 7 days based if you have not setup a maintenance window. If you have setup maintenance window, it will be restarted during the maintenance window after 7 days. + /// The job timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. Jobs must have timeout values less than 7 days or 10080 minutes. Otherwise, the jobs will throw an exception. When the value is left blank, the timeout is defaulted to 2880 minutes. Any existing Glue jobs that had a timeout value greater than 7 days will be defaulted to 7 days. For instance if you have specified a timeout of 20 days for a batch job, it will be stopped on the 7th day. For streaming jobs, if you have set up a maintenance window, it will be restarted during the maintenance window after 7 days. public let timeout: Int? /// The type of predefined worker that is allocated when a job runs. Accepts a value of G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs. For the G.1X worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 94GB disk, and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs. For the G.2X worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 138GB disk, and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs. For the G.4X worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk, and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm). For the G.8X worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB disk, and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services Regions as supported for the G.4X worker type. For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of memory) with 84GB disk, and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 or later streaming jobs. For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk, and provides up to 8 Ray workers based on the autoscaler. public let workerType: WorkerType? @@ -16483,7 +16487,7 @@ extension Glue { public let startedOn: Date? /// This field holds details that pertain to the state of a job run. The field is nullable. For example, when a job run is in a WAITING state as a result of job run queuing, the field has the reason why the job run is in that state. public let stateDetail: String? - /// The JobRun timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. This value overrides the timeout value set in the parent job. Streaming jobs must have timeout values less than 7 days or 10080 minutes. When the value is left blank, the job will be restarted after 7 days based if you have not setup a maintenance window. If you have setup maintenance window, it will be restarted during the maintenance window after 7 days. + /// The JobRun timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. This value overrides the timeout value set in the parent job. Jobs must have timeout values less than 7 days or 10080 minutes. Otherwise, the jobs will throw an exception. When the value is left blank, the timeout is defaulted to 2880 minutes. Any existing Glue jobs that had a timeout value greater than 7 days will be defaulted to 7 days. For instance if you have specified a timeout of 20 days for a batch job, it will be stopped on the 7th day. For streaming jobs, if you have set up a maintenance window, it will be restarted during the maintenance window after 7 days. public let timeout: Int? /// The name of the trigger that started this job run. public let triggerName: String? @@ -16633,7 +16637,7 @@ extension Glue { public let securityConfiguration: String? /// The details for a source control configuration for a job, allowing synchronization of job artifacts to or from a remote repository. public let sourceControlDetails: SourceControlDetails? - /// The job timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours) for batch jobs. Streaming jobs must have timeout values less than 7 days or 10080 minutes. When the value is left blank, the job will be restarted after 7 days based if you have not setup a maintenance window. If you have setup maintenance window, it will be restarted during the maintenance window after 7 days. + /// The job timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. Jobs must have timeout values less than 7 days or 10080 minutes. Otherwise, the jobs will throw an exception. When the value is left blank, the timeout is defaulted to 2880 minutes. Any existing Glue jobs that had a timeout value greater than 7 days will be defaulted to 7 days. For instance if you have specified a timeout of 20 days for a batch job, it will be stopped on the 7th day. For streaming jobs, if you have set up a maintenance window, it will be restarted during the maintenance window after 7 days. public let timeout: Int? /// The type of predefined worker that is allocated when a job runs. Accepts a value of G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs. For the G.1X worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 94GB disk, and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs. For the G.2X worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 138GB disk, and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs. For the G.4X worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk, and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm). For the G.8X worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB disk, and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services Regions as supported for the G.4X worker type. For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of memory) with 84GB disk, and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 or later streaming jobs. For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk, and provides up to 8 Ray workers based on the autoscaler. public let workerType: WorkerType? @@ -23610,7 +23614,7 @@ extension Glue { public let numberOfWorkers: Int? /// The name of the SecurityConfiguration structure to be used with this job run. public let securityConfiguration: String? - /// The JobRun timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. This value overrides the timeout value set in the parent job. Streaming jobs must have timeout values less than 7 days or 10080 minutes. When the value is left blank, the job will be restarted after 7 days based if you have not setup a maintenance window. If you have setup maintenance window, it will be restarted during the maintenance window after 7 days. + /// The JobRun timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. This value overrides the timeout value set in the parent job. Jobs must have timeout values less than 7 days or 10080 minutes. Otherwise, the jobs will throw an exception. When the value is left blank, the timeout is defaulted to 2880 minutes. Any existing Glue jobs that had a timeout value greater than 7 days will be defaulted to 7 days. For instance if you have specified a timeout of 20 days for a batch job, it will be stopped on the 7th day. For streaming jobs, if you have set up a maintenance window, it will be restarted during the maintenance window after 7 days. public let timeout: Int? /// The type of predefined worker that is allocated when a job runs. Accepts a value of G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs. For the G.1X worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 94GB disk, and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs. For the G.2X worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 138GB disk, and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs. For the G.4X worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk, and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm). For the G.8X worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB disk, and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services Regions as supported for the G.4X worker type. For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of memory) with 84GB disk, and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 or later streaming jobs. For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk, and provides up to 8 Ray workers based on the autoscaler. public let workerType: WorkerType? diff --git a/Sources/Soto/Services/HealthLake/HealthLake_api.swift b/Sources/Soto/Services/HealthLake/HealthLake_api.swift index bb9f485321..7c04d23924 100644 --- a/Sources/Soto/Services/HealthLake/HealthLake_api.swift +++ b/Sources/Soto/Services/HealthLake/HealthLake_api.swift @@ -431,7 +431,7 @@ public struct HealthLake: AWSService { /// - logger: Logger use during operation @inlinable public func startFHIRExportJob( - clientToken: String = StartFHIRExportJobRequest.idempotencyToken(), + clientToken: String? = StartFHIRExportJobRequest.idempotencyToken(), dataAccessRoleArn: String, datastoreId: String, jobName: String? = nil, @@ -473,7 +473,7 @@ public struct HealthLake: AWSService { /// - logger: Logger use during operation @inlinable public func startFHIRImportJob( - clientToken: String = StartFHIRImportJobRequest.idempotencyToken(), + clientToken: String? = StartFHIRImportJobRequest.idempotencyToken(), dataAccessRoleArn: String, datastoreId: String, inputDataConfig: InputDataConfig, diff --git a/Sources/Soto/Services/HealthLake/HealthLake_shapes.swift b/Sources/Soto/Services/HealthLake/HealthLake_shapes.swift index 04a81d3f3f..9fa700389b 100644 --- a/Sources/Soto/Services/HealthLake/HealthLake_shapes.swift +++ b/Sources/Soto/Services/HealthLake/HealthLake_shapes.swift @@ -28,6 +28,7 @@ extension HealthLake { public enum AuthorizationStrategy: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case awsAuth = "AWS_AUTH" + case smartOnFhir = "SMART_ON_FHIR" case smartv1 = "SMART_ON_FHIR_V1" public var description: String { return self.rawValue } } @@ -67,6 +68,7 @@ extension HealthLake { case completedWithErrors = "COMPLETED_WITH_ERRORS" case failed = "FAILED" case inProgress = "IN_PROGRESS" + case queued = "QUEUED" case submitted = "SUBMITTED" public var description: String { return self.rawValue } } @@ -900,7 +902,7 @@ extension HealthLake { public struct StartFHIRExportJobRequest: AWSEncodableShape { /// An optional user provided token used for ensuring idempotency. - public let clientToken: String + public let clientToken: String? /// The Amazon Resource Name used during the initiation of the job. public let dataAccessRoleArn: String /// The AWS generated ID for the data store from which files are being exported for an export job. @@ -911,7 +913,7 @@ extension HealthLake { public let outputDataConfig: OutputDataConfig @inlinable - public init(clientToken: String = StartFHIRExportJobRequest.idempotencyToken(), dataAccessRoleArn: String, datastoreId: String, jobName: String? = nil, outputDataConfig: OutputDataConfig) { + public init(clientToken: String? = StartFHIRExportJobRequest.idempotencyToken(), dataAccessRoleArn: String, datastoreId: String, jobName: String? = nil, outputDataConfig: OutputDataConfig) { self.clientToken = clientToken self.dataAccessRoleArn = dataAccessRoleArn self.datastoreId = datastoreId @@ -968,7 +970,7 @@ extension HealthLake { public struct StartFHIRImportJobRequest: AWSEncodableShape { /// Optional user provided token used for ensuring idempotency. - public let clientToken: String + public let clientToken: String? /// The Amazon Resource Name (ARN) that gives AWS HealthLake access permission. public let dataAccessRoleArn: String /// The AWS-generated data store ID. @@ -980,7 +982,7 @@ extension HealthLake { public let jobOutputDataConfig: OutputDataConfig @inlinable - public init(clientToken: String = StartFHIRImportJobRequest.idempotencyToken(), dataAccessRoleArn: String, datastoreId: String, inputDataConfig: InputDataConfig, jobName: String? = nil, jobOutputDataConfig: OutputDataConfig) { + public init(clientToken: String? = StartFHIRImportJobRequest.idempotencyToken(), dataAccessRoleArn: String, datastoreId: String, inputDataConfig: InputDataConfig, jobName: String? = nil, jobOutputDataConfig: OutputDataConfig) { self.clientToken = clientToken self.dataAccessRoleArn = dataAccessRoleArn self.datastoreId = datastoreId diff --git a/Sources/Soto/Services/Imagebuilder/Imagebuilder_api.swift b/Sources/Soto/Services/Imagebuilder/Imagebuilder_api.swift index 3afc670396..30bbf28b01 100644 --- a/Sources/Soto/Services/Imagebuilder/Imagebuilder_api.swift +++ b/Sources/Soto/Services/Imagebuilder/Imagebuilder_api.swift @@ -1552,6 +1552,64 @@ public struct Imagebuilder: AWSService { return try await self.importComponent(input, logger: logger) } + /// Import a Windows operating system image from a verified Microsoft ISO disk + /// file. The following disk images are supported: Windows 11 Enterprise + @Sendable + @inlinable + public func importDiskImage(_ input: ImportDiskImageRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ImportDiskImageResponse { + try await self.client.execute( + operation: "ImportDiskImage", + path: "/ImportDiskImage", + httpMethod: .PUT, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Import a Windows operating system image from a verified Microsoft ISO disk + /// file. The following disk images are supported: Windows 11 Enterprise + /// + /// Parameters: + /// - clientToken: Unique, case-sensitive identifier you provide to ensure idempotency of the request. For more information, see Ensuring idempotency in the Amazon EC2 API Reference. + /// - description: The description for your disk image import. + /// - executionRole: The name or Amazon Resource Name (ARN) for the IAM role you create that grants Image Builder access + /// - infrastructureConfigurationArn: The Amazon Resource Name (ARN) of the infrastructure configuration resource that's used for + /// - name: The name of the image resource that's created from the import. + /// - osVersion: The operating system version for the imported image. Allowed values include + /// - platform: The operating system platform for the imported image. Allowed values include + /// - semanticVersion: The semantic version to attach to the image that's created during the import + /// - tags: Tags that are attached to image resources created from the import. + /// - uri: The uri of the ISO disk file that's stored in Amazon S3. + /// - logger: Logger use during operation + @inlinable + public func importDiskImage( + clientToken: String = ImportDiskImageRequest.idempotencyToken(), + description: String? = nil, + executionRole: String? = nil, + infrastructureConfigurationArn: String, + name: String, + osVersion: String, + platform: String, + semanticVersion: String, + tags: [String: String]? = nil, + uri: String, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> ImportDiskImageResponse { + let input = ImportDiskImageRequest( + clientToken: clientToken, + description: description, + executionRole: executionRole, + infrastructureConfigurationArn: infrastructureConfigurationArn, + name: name, + osVersion: osVersion, + platform: platform, + semanticVersion: semanticVersion, + tags: tags, + uri: uri + ) + return try await self.importDiskImage(input, logger: logger) + } + /// When you export your virtual machine (VM) from its virtualization environment, that /// process creates a set of one or more disk container files that act as snapshots of your /// VM’s environment, settings, and data. The Amazon EC2 API ImportImage diff --git a/Sources/Soto/Services/Imagebuilder/Imagebuilder_shapes.swift b/Sources/Soto/Services/Imagebuilder/Imagebuilder_shapes.swift index fc83d7ab2b..7ad1e2db87 100644 --- a/Sources/Soto/Services/Imagebuilder/Imagebuilder_shapes.swift +++ b/Sources/Soto/Services/Imagebuilder/Imagebuilder_shapes.swift @@ -28,6 +28,7 @@ extension Imagebuilder { public enum BuildType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case `import` = "IMPORT" + case importIso = "IMPORT_ISO" case scheduled = "SCHEDULED" case userInitiated = "USER_INITIATED" public var description: String { return self.rawValue } @@ -3668,7 +3669,8 @@ extension Imagebuilder { /// following ways: USER_INITIATED – A manual /// pipeline build request. SCHEDULED – A pipeline build /// initiated by a cron expression in the Image Builder pipeline, or from EventBridge. IMPORT – A VM import created - /// the image to use as the base image for the recipe. + /// the image to use as the base image for the recipe. IMPORT_ISO – An ISO disk import created + /// the image. public let buildType: BuildType? /// For container images, this is the container recipe that Image Builder used to create the /// image. For images that distribute an AMI, this is empty. @@ -4248,7 +4250,8 @@ extension Imagebuilder { /// following ways: USER_INITIATED – A manual /// pipeline build request. SCHEDULED – A pipeline build /// initiated by a cron expression in the Image Builder pipeline, or from EventBridge. IMPORT – A VM import created - /// the image to use as the base image for the recipe. + /// the image to use as the base image for the recipe. IMPORT_ISO – An ISO disk import created + /// the image. public let buildType: BuildType? /// The date on which Image Builder created this image. public let dateCreated: String? @@ -4320,7 +4323,7 @@ extension Imagebuilder { /// Determines if tests should run after building the image. Image Builder defaults to enable tests /// to run following the image build, before image distribution. public let imageTestsEnabled: Bool? - /// The maximum time in minutes that tests are permitted to run. The timeout attribute is not currently active. This value is + /// The maximum time in minutes that tests are permitted to run. The timeout property is not currently active. This value is /// ignored. public let timeoutMinutes: Int? @@ -4350,7 +4353,8 @@ extension Imagebuilder { /// following ways: USER_INITIATED – A manual /// pipeline build request. SCHEDULED – A pipeline build /// initiated by a cron expression in the Image Builder pipeline, or from EventBridge. IMPORT – A VM import created - /// the image to use as the base image for the recipe. + /// the image to use as the base image for the recipe. IMPORT_ISO – An ISO disk import created + /// the image. public let buildType: BuildType? /// The date on which this specific version of the Image Builder image was created. public let dateCreated: String? @@ -4523,6 +4527,104 @@ extension Imagebuilder { } } + public struct ImportDiskImageRequest: AWSEncodableShape { + /// Unique, case-sensitive identifier you provide to ensure idempotency of the request. For more information, see Ensuring idempotency in the Amazon EC2 API Reference. + public let clientToken: String + /// The description for your disk image import. + public let description: String? + /// The name or Amazon Resource Name (ARN) for the IAM role you create that grants Image Builder access + /// to perform workflow actions to import an image from a Microsoft ISO file. + public let executionRole: String? + /// The Amazon Resource Name (ARN) of the infrastructure configuration resource that's used for + /// launching the EC2 instance on which the ISO image is built. + public let infrastructureConfigurationArn: String + /// The name of the image resource that's created from the import. + public let name: String + /// The operating system version for the imported image. Allowed values include + /// the following: Microsoft Windows 11. + public let osVersion: String + /// The operating system platform for the imported image. Allowed values include + /// the following: Windows. + public let platform: String + /// The semantic version to attach to the image that's created during the import + /// process. This version follows the semantic version syntax. + public let semanticVersion: String + /// Tags that are attached to image resources created from the import. + public let tags: [String: String]? + /// The uri of the ISO disk file that's stored in Amazon S3. + public let uri: String + + @inlinable + public init(clientToken: String = ImportDiskImageRequest.idempotencyToken(), description: String? = nil, executionRole: String? = nil, infrastructureConfigurationArn: String, name: String, osVersion: String, platform: String, semanticVersion: String, tags: [String: String]? = nil, uri: String) { + self.clientToken = clientToken + self.description = description + self.executionRole = executionRole + self.infrastructureConfigurationArn = infrastructureConfigurationArn + self.name = name + self.osVersion = osVersion + self.platform = platform + self.semanticVersion = semanticVersion + self.tags = tags + self.uri = uri + } + + public func validate(name: String) throws { + try self.validate(self.clientToken, name: "clientToken", parent: name, max: 36) + try self.validate(self.clientToken, name: "clientToken", parent: name, min: 1) + try self.validate(self.description, name: "description", parent: name, max: 1024) + try self.validate(self.description, name: "description", parent: name, min: 1) + try self.validate(self.executionRole, name: "executionRole", parent: name, max: 2048) + try self.validate(self.executionRole, name: "executionRole", parent: name, min: 1) + try self.validate(self.executionRole, name: "executionRole", parent: name, pattern: "^(?:arn:aws(?:-[a-z]+)*:iam::[0-9]{12}:role/)?[a-zA-Z_0-9+=,.@\\-_/]+$") + try self.validate(self.infrastructureConfigurationArn, name: "infrastructureConfigurationArn", parent: name, pattern: "^arn:aws[^:]*:imagebuilder:[^:]+:(?:[0-9]{12}|aws):infrastructure-configuration/[a-z0-9-_]+$") + try self.validate(self.name, name: "name", parent: name, max: 1024) + try self.validate(self.name, name: "name", parent: name, min: 1) + try self.validate(self.osVersion, name: "osVersion", parent: name, min: 1) + try self.validate(self.platform, name: "platform", parent: name, max: 1024) + try self.validate(self.platform, name: "platform", parent: name, min: 1) + try self.validate(self.semanticVersion, name: "semanticVersion", parent: name, pattern: "^[0-9]+\\.[0-9]+\\.[0-9]+$") + try self.tags?.forEach { + try validate($0.key, name: "tags.key", parent: name, max: 128) + try validate($0.key, name: "tags.key", parent: name, min: 1) + try validate($0.key, name: "tags.key", parent: name, pattern: "^(?!aws:)[a-zA-Z+-=._:/]+$") + try validate($0.value, name: "tags[\"\($0.key)\"]", parent: name, max: 256) + } + try self.validate(self.tags, name: "tags", parent: name, max: 50) + try self.validate(self.tags, name: "tags", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case clientToken = "clientToken" + case description = "description" + case executionRole = "executionRole" + case infrastructureConfigurationArn = "infrastructureConfigurationArn" + case name = "name" + case osVersion = "osVersion" + case platform = "platform" + case semanticVersion = "semanticVersion" + case tags = "tags" + case uri = "uri" + } + } + + public struct ImportDiskImageResponse: AWSDecodableShape { + /// The client token that uniquely identifies the request. + public let clientToken: String? + /// The Amazon Resource Name (ARN) of the output AMI that was created from the ISO disk file. + public let imageBuildVersionArn: String? + + @inlinable + public init(clientToken: String? = nil, imageBuildVersionArn: String? = nil) { + self.clientToken = clientToken + self.imageBuildVersionArn = imageBuildVersionArn + } + + private enum CodingKeys: String, CodingKey { + case clientToken = "clientToken" + case imageBuildVersionArn = "imageBuildVersionArn" + } + } + public struct ImportVmImageRequest: AWSEncodableShape { /// Unique, case-sensitive identifier you provide to ensure idempotency of the request. For more information, see Ensuring idempotency in the Amazon EC2 API Reference. public let clientToken: String @@ -7313,14 +7415,13 @@ extension Imagebuilder { } public struct Schedule: AWSEncodableShape & AWSDecodableShape { - /// The condition configures when the pipeline should trigger a new image build. When the - /// pipelineExecutionStartCondition is set to - /// EXPRESSION_MATCH_AND_DEPENDENCY_UPDATES_AVAILABLE, and you use semantic - /// version filters on the base image or components in your image recipe, EC2 Image Builder will - /// build a new image only when there are new versions of the image or components in your - /// recipe that match the semantic version filter. When it is set to - /// EXPRESSION_MATCH_ONLY, it will build a new image every time the CRON - /// expression matches the current time. For semantic version syntax, see CreateComponent in the EC2 Image Builder API Reference. + /// The start condition configures when the pipeline should trigger a new image build, + /// as follows. If no value is set Image Builder defaults to + /// EXPRESSION_MATCH_AND_DEPENDENCY_UPDATES_AVAILABLE. EXPRESSION_MATCH_AND_DEPENDENCY_UPDATES_AVAILABLE (default) – + /// When you use semantic version filters on the base image or components in your + /// image recipe, EC2 Image Builder builds a new image only when there are new versions of + /// the base image or components in your recipe that match the filter. For semantic version syntax, see CreateComponent. EXPRESSION_MATCH_ONLY – This condition builds a new + /// image every time the CRON expression matches the current time. public let pipelineExecutionStartCondition: PipelineExecutionStartCondition? /// The cron expression determines how often EC2 Image Builder evaluates your /// pipelineExecutionStartCondition. For information on how to format a cron expression in Image Builder, see Use diff --git a/Sources/Soto/Services/InternetMonitor/InternetMonitor_api.swift b/Sources/Soto/Services/InternetMonitor/InternetMonitor_api.swift index 2e5990b513..b751738e48 100644 --- a/Sources/Soto/Services/InternetMonitor/InternetMonitor_api.swift +++ b/Sources/Soto/Services/InternetMonitor/InternetMonitor_api.swift @@ -102,6 +102,7 @@ public struct InternetMonitor: AWSService { "ap-southeast-3": "internetmonitor.ap-southeast-3.api.aws", "ap-southeast-4": "internetmonitor.ap-southeast-4.api.aws", "ap-southeast-5": "internetmonitor.ap-southeast-5.api.aws", + "ap-southeast-7": "internetmonitor.ap-southeast-7.api.aws", "ca-central-1": "internetmonitor.ca-central-1.api.aws", "ca-west-1": "internetmonitor.ca-west-1.api.aws", "cn-north-1": "internetmonitor.cn-north-1.api.amazonwebservices.com.cn", @@ -117,6 +118,7 @@ public struct InternetMonitor: AWSService { "il-central-1": "internetmonitor.il-central-1.api.aws", "me-central-1": "internetmonitor.me-central-1.api.aws", "me-south-1": "internetmonitor.me-south-1.api.aws", + "mx-central-1": "internetmonitor.mx-central-1.api.aws", "sa-east-1": "internetmonitor.sa-east-1.api.aws", "us-east-1": "internetmonitor.us-east-1.api.aws", "us-east-2": "internetmonitor.us-east-2.api.aws", @@ -167,11 +169,13 @@ public struct InternetMonitor: AWSService { ]), [.fips]: .init(endpoints: [ "ap-southeast-5": "internetmonitor-fips.ap-southeast-5.api.aws", + "ap-southeast-7": "internetmonitor-fips.ap-southeast-7.api.aws", "ca-central-1": "internetmonitor-fips.ca-central-1.amazonaws.com", "ca-west-1": "internetmonitor-fips.ca-west-1.api.aws", "cn-north-1": "internetmonitor-fips.cn-north-1.api.amazonwebservices.com.cn", "cn-northwest-1": "internetmonitor-fips.cn-northwest-1.api.amazonwebservices.com.cn", "il-central-1": "internetmonitor-fips.il-central-1.api.aws", + "mx-central-1": "internetmonitor-fips.mx-central-1.api.aws", "us-east-1": "internetmonitor-fips.us-east-1.amazonaws.com", "us-east-2": "internetmonitor-fips.us-east-2.amazonaws.com", "us-gov-east-1": "internetmonitor-fips.us-gov-east-1.api.aws", diff --git a/Sources/Soto/Services/IoT/IoT_api.swift b/Sources/Soto/Services/IoT/IoT_api.swift index 8739a2b6ef..5566434ab5 100644 --- a/Sources/Soto/Services/IoT/IoT_api.swift +++ b/Sources/Soto/Services/IoT/IoT_api.swift @@ -890,7 +890,7 @@ public struct IoT: AWSService { /// - mandatoryParameters: A list of parameters that are required by the StartCommandExecution API. These parameters need to be specified only when using the AWS-IoT-FleetWise namespace. You can either specify them here or when running the command using the StartCommandExecution API. /// - namespace: The namespace of the command. The MQTT reserved topics and validations will be used for command executions according to the namespace setting. /// - payload: The payload object for the command. You must specify this information when using the AWS-IoT namespace. You can upload a static payload file from your local storage that contains the instructions for the device to process. The payload file can use any format. To make sure that the device correctly interprets the payload, we recommend you to specify the payload content type. - /// - roleArn: The IAM role that allows access to create the command. + /// - roleArn: The IAM role that you must provide when using the AWS-IoT-FleetWise namespace. The role grants IoT Device Management the permission to access IoT FleetWise resources for generating the payload for the command. This field is not required when you use the AWS-IoT namespace. /// - tags: Name-value pairs that are used as metadata to manage a command. /// - logger: Logger use during operation @inlinable @@ -5024,6 +5024,35 @@ public struct IoT: AWSService { return try await self.getStatistics(input, logger: logger) } + /// Retrieves the live connectivity status per device. + @Sendable + @inlinable + public func getThingConnectivityData(_ input: GetThingConnectivityDataRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetThingConnectivityDataResponse { + try await self.client.execute( + operation: "GetThingConnectivityData", + path: "/things/{thingName}/connectivity-data", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Retrieves the live connectivity status per device. + /// + /// Parameters: + /// - thingName: The name of your IoT thing. + /// - logger: Logger use during operation + @inlinable + public func getThingConnectivityData( + thingName: String, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> GetThingConnectivityDataResponse { + let input = GetThingConnectivityDataRequest( + thingName: thingName + ) + return try await self.getThingConnectivityData(input, logger: logger) + } + /// Gets information about the rule. Requires permission to access the GetTopicRule action. @Sendable @inlinable @@ -5632,7 +5661,7 @@ public struct IoT: AWSService { return try await self.listCertificatesByCA(input, logger: logger) } - /// List all command executions. You must provide only the startedTimeFilter or the completedTimeFilter information. If you provide both time filters, the API will generate an error. You can use this information to find command executions that started within a specific timeframe. + /// List all command executions. You must provide only the startedTimeFilter or the completedTimeFilter information. If you provide both time filters, the API will generate an error. You can use this information to retrieve a list of command executions within a specific timeframe. You must provide only the commandArn or the thingArn information depending on whether you want to list executions for a specific command or an IoT thing. If you provide both fields, the API will generate an error. For more information about considerations for using this API, see List command executions in your account (CLI). @Sendable @inlinable public func listCommandExecutions(_ input: ListCommandExecutionsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListCommandExecutionsResponse { @@ -5645,7 +5674,7 @@ public struct IoT: AWSService { logger: logger ) } - /// List all command executions. You must provide only the startedTimeFilter or the completedTimeFilter information. If you provide both time filters, the API will generate an error. You can use this information to find command executions that started within a specific timeframe. + /// List all command executions. You must provide only the startedTimeFilter or the completedTimeFilter information. If you provide both time filters, the API will generate an error. You can use this information to retrieve a list of command executions within a specific timeframe. You must provide only the commandArn or the thingArn information depending on whether you want to list executions for a specific command or an IoT thing. If you provide both fields, the API will generate an error. For more information about considerations for using this API, see List command executions in your account (CLI). /// /// Parameters: /// - commandArn: The Amazon Resource Number (ARN) of the command. You can use this information to list all command executions for a particular command. diff --git a/Sources/Soto/Services/IoT/IoT_shapes.swift b/Sources/Soto/Services/IoT/IoT_shapes.swift index bb321b7b20..6db355bce8 100644 --- a/Sources/Soto/Services/IoT/IoT_shapes.swift +++ b/Sources/Soto/Services/IoT/IoT_shapes.swift @@ -308,6 +308,24 @@ extension IoT { public var description: String { return self.rawValue } } + public enum DisconnectReasonValue: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case authError = "AUTH_ERROR" + case clientError = "CLIENT_ERROR" + case clientInitiatedDisconnect = "CLIENT_INITIATED_DISCONNECT" + case connectionLost = "CONNECTION_LOST" + case customauthTtlExpiration = "CUSTOMAUTH_TTL_EXPIRATION" + case duplicateClientid = "DUPLICATE_CLIENTID" + case forbiddenAccess = "FORBIDDEN_ACCESS" + case mqttKeepAliveTimeout = "MQTT_KEEP_ALIVE_TIMEOUT" + case none = "NONE" + case serverError = "SERVER_ERROR" + case serverInitiatedDisconnect = "SERVER_INITIATED_DISCONNECT" + case throttled = "THROTTLED" + case unknown = "UNKNOWN" + case websocketTtlExpiration = "WEBSOCKET_TTL_EXPIRATION" + public var description: String { return self.rawValue } + } + public enum DomainConfigurationStatus: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case disabled = "DISABLED" case enabled = "ENABLED" @@ -3457,7 +3475,7 @@ extension IoT { public let namespace: CommandNamespace? /// The payload object for the command. You must specify this information when using the AWS-IoT namespace. You can upload a static payload file from your local storage that contains the instructions for the device to process. The payload file can use any format. To make sure that the device correctly interprets the payload, we recommend you to specify the payload content type. public let payload: CommandPayload? - /// The IAM role that allows access to create the command. + /// The IAM role that you must provide when using the AWS-IoT-FleetWise namespace. The role grants IoT Device Management the permission to access IoT FleetWise resources for generating the payload for the command. This field is not required when you use the AWS-IoT namespace. public let roleArn: String? /// Name-value pairs that are used as metadata to manage a command. public let tags: [Tag]? @@ -9593,7 +9611,7 @@ extension IoT { public let statusReason: StatusReason? /// The Amazon Resource Number (ARN) of the device on which the command execution is being performed. public let targetArn: String? - /// The time to live (TTL) parameter for the GetCommandExecution API. + /// The time to live (TTL) parameter that indicates the duration for which executions will be retained in your account. The default value is six months. public let timeToLive: Date? @inlinable @@ -9677,7 +9695,7 @@ extension IoT { public let payload: CommandPayload? /// Indicates whether the command is being deleted. public let pendingDeletion: Bool? - /// The IAM role that allows access to retrieve information about the command. + /// The IAM role that you provided when creating the command with AWS-IoT-FleetWise as the namespace. public let roleArn: String? @inlinable @@ -10309,6 +10327,56 @@ extension IoT { } } + public struct GetThingConnectivityDataRequest: AWSEncodableShape { + /// The name of your IoT thing. + public let thingName: String + + @inlinable + public init(thingName: String) { + self.thingName = thingName + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.thingName, key: "thingName") + } + + public func validate(name: String) throws { + try self.validate(self.thingName, name: "thingName", parent: name, max: 128) + try self.validate(self.thingName, name: "thingName", parent: name, min: 1) + try self.validate(self.thingName, name: "thingName", parent: name, pattern: "^[a-zA-Z0-9:_-]+$") + } + + private enum CodingKeys: CodingKey {} + } + + public struct GetThingConnectivityDataResponse: AWSDecodableShape { + /// A Boolean that indicates the connectivity status. + public let connected: Bool? + /// The reason why the client is disconnecting. + public let disconnectReason: DisconnectReasonValue? + /// The name of your IoT thing. + public let thingName: String? + /// The timestamp of when the event occurred. + public let timestamp: Date? + + @inlinable + public init(connected: Bool? = nil, disconnectReason: DisconnectReasonValue? = nil, thingName: String? = nil, timestamp: Date? = nil) { + self.connected = connected + self.disconnectReason = disconnectReason + self.thingName = thingName + self.timestamp = timestamp + } + + private enum CodingKeys: String, CodingKey { + case connected = "connected" + case disconnectReason = "disconnectReason" + case thingName = "thingName" + case timestamp = "timestamp" + } + } + public struct GetTopicRuleDestinationRequest: AWSEncodableShape { /// The ARN of the topic rule destination. public let arn: String diff --git a/Sources/Soto/Services/IoT1ClickDevicesService/IoT1ClickDevicesService_api.swift b/Sources/Soto/Services/IoT1ClickDevicesService/IoT1ClickDevicesService_api.swift deleted file mode 100644 index d7aead0cee..0000000000 --- a/Sources/Soto/Services/IoT1ClickDevicesService/IoT1ClickDevicesService_api.swift +++ /dev/null @@ -1,504 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// This source file is part of the Soto for AWS open source project -// -// Copyright (c) 2017-2024 the Soto project authors -// Licensed under Apache License v2.0 -// -// See LICENSE.txt for license information -// See CONTRIBUTORS.txt for the list of Soto project authors -// -// SPDX-License-Identifier: Apache-2.0 -// -//===----------------------------------------------------------------------===// - -// THIS FILE IS AUTOMATICALLY GENERATED by https://github.com/soto-project/soto-codegenerator. -// DO NOT EDIT. - -#if os(Linux) && compiler(<5.10) -// swift-corelibs-foundation hasn't been updated with Sendable conformances -@preconcurrency import Foundation -#else -import Foundation -#endif -@_exported import SotoCore - -/// Service object for interacting with AWS IoT1ClickDevicesService service. -/// -/// Describes all of the AWS IoT 1-Click device-related API operations for the service. Also provides sample requests, responses, and errors for the supported web services protocols. -public struct IoT1ClickDevicesService: AWSService { - // MARK: Member variables - - /// Client used for communication with AWS - public let client: AWSClient - /// Service configuration - public let config: AWSServiceConfig - - // MARK: Initialization - - /// Initialize the IoT1ClickDevicesService client - /// - parameters: - /// - client: AWSClient used to process requests - /// - region: Region of server you want to communicate with. This will override the partition parameter. - /// - partition: AWS partition where service resides, standard (.aws), china (.awscn), government (.awsusgov). - /// - endpoint: Custom endpoint URL to use instead of standard AWS servers - /// - middleware: Middleware chain used to edit requests before they are sent and responses before they are decoded - /// - timeout: Timeout value for HTTP requests - /// - byteBufferAllocator: Allocator for ByteBuffers - /// - options: Service options - public init( - client: AWSClient, - region: SotoCore.Region? = nil, - partition: AWSPartition = .aws, - endpoint: String? = nil, - middleware: AWSMiddlewareProtocol? = nil, - timeout: TimeAmount? = nil, - byteBufferAllocator: ByteBufferAllocator = ByteBufferAllocator(), - options: AWSServiceConfig.Options = [] - ) { - self.client = client - self.config = AWSServiceConfig( - region: region, - partition: region?.partition ?? partition, - serviceName: "IoT1ClickDevicesService", - serviceIdentifier: "devices.iot1click", - signingName: "iot1click", - serviceProtocol: .restjson, - apiVersion: "2018-05-14", - endpoint: endpoint, - errorType: IoT1ClickDevicesServiceErrorType.self, - middleware: middleware, - timeout: timeout, - byteBufferAllocator: byteBufferAllocator, - options: options - ) - } - - - - - - // MARK: API Calls - - /// Adds device(s) to your account (i.e., claim one or more devices) if and only if you received a claim code with the device(s). - @Sendable - @inlinable - public func claimDevicesByClaimCode(_ input: ClaimDevicesByClaimCodeRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ClaimDevicesByClaimCodeResponse { - try await self.client.execute( - operation: "ClaimDevicesByClaimCode", - path: "/claims/{ClaimCode}", - httpMethod: .PUT, - serviceConfig: self.config, - input: input, - logger: logger - ) - } - /// Adds device(s) to your account (i.e., claim one or more devices) if and only if you received a claim code with the device(s). - /// - /// Parameters: - /// - claimCode: The claim code, starting with "C-", as provided by the device manufacturer. - /// - logger: Logger use during operation - @inlinable - public func claimDevicesByClaimCode( - claimCode: String, - logger: Logger = AWSClient.loggingDisabled - ) async throws -> ClaimDevicesByClaimCodeResponse { - let input = ClaimDevicesByClaimCodeRequest( - claimCode: claimCode - ) - return try await self.claimDevicesByClaimCode(input, logger: logger) - } - - /// Given a device ID, returns a DescribeDeviceResponse object describing the details of the device. - @Sendable - @inlinable - public func describeDevice(_ input: DescribeDeviceRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeDeviceResponse { - try await self.client.execute( - operation: "DescribeDevice", - path: "/devices/{DeviceId}", - httpMethod: .GET, - serviceConfig: self.config, - input: input, - logger: logger - ) - } - /// Given a device ID, returns a DescribeDeviceResponse object describing the details of the device. - /// - /// Parameters: - /// - deviceId: The unique identifier of the device. - /// - logger: Logger use during operation - @inlinable - public func describeDevice( - deviceId: String, - logger: Logger = AWSClient.loggingDisabled - ) async throws -> DescribeDeviceResponse { - let input = DescribeDeviceRequest( - deviceId: deviceId - ) - return try await self.describeDevice(input, logger: logger) - } - - /// Given a device ID, finalizes the claim request for the associated device. Claiming a device consists of initiating a claim, then publishing a device event, and finalizing the claim. For a device of type button, a device event can be published by simply clicking the device. - @Sendable - @inlinable - public func finalizeDeviceClaim(_ input: FinalizeDeviceClaimRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> FinalizeDeviceClaimResponse { - try await self.client.execute( - operation: "FinalizeDeviceClaim", - path: "/devices/{DeviceId}/finalize-claim", - httpMethod: .PUT, - serviceConfig: self.config, - input: input, - logger: logger - ) - } - /// Given a device ID, finalizes the claim request for the associated device. Claiming a device consists of initiating a claim, then publishing a device event, and finalizing the claim. For a device of type button, a device event can be published by simply clicking the device. - /// - /// Parameters: - /// - deviceId: The unique identifier of the device. - /// - tags: A collection of key/value pairs defining the resource tags. For example, { "tags": {"key1": "value1", "key2": "value2"} }. For more information, see AWS Tagging Strategies. - /// - logger: Logger use during operation - @inlinable - public func finalizeDeviceClaim( - deviceId: String, - tags: [String: String]? = nil, - logger: Logger = AWSClient.loggingDisabled - ) async throws -> FinalizeDeviceClaimResponse { - let input = FinalizeDeviceClaimRequest( - deviceId: deviceId, - tags: tags - ) - return try await self.finalizeDeviceClaim(input, logger: logger) - } - - /// Given a device ID, returns the invokable methods associated with the device. - @Sendable - @inlinable - public func getDeviceMethods(_ input: GetDeviceMethodsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetDeviceMethodsResponse { - try await self.client.execute( - operation: "GetDeviceMethods", - path: "/devices/{DeviceId}/methods", - httpMethod: .GET, - serviceConfig: self.config, - input: input, - logger: logger - ) - } - /// Given a device ID, returns the invokable methods associated with the device. - /// - /// Parameters: - /// - deviceId: The unique identifier of the device. - /// - logger: Logger use during operation - @inlinable - public func getDeviceMethods( - deviceId: String, - logger: Logger = AWSClient.loggingDisabled - ) async throws -> GetDeviceMethodsResponse { - let input = GetDeviceMethodsRequest( - deviceId: deviceId - ) - return try await self.getDeviceMethods(input, logger: logger) - } - - /// Given a device ID, initiates a claim request for the associated device. Claiming a device consists of initiating a claim, then publishing a device event, and finalizing the claim. For a device of type button, a device event can be published by simply clicking the device. - @Sendable - @inlinable - public func initiateDeviceClaim(_ input: InitiateDeviceClaimRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> InitiateDeviceClaimResponse { - try await self.client.execute( - operation: "InitiateDeviceClaim", - path: "/devices/{DeviceId}/initiate-claim", - httpMethod: .PUT, - serviceConfig: self.config, - input: input, - logger: logger - ) - } - /// Given a device ID, initiates a claim request for the associated device. Claiming a device consists of initiating a claim, then publishing a device event, and finalizing the claim. For a device of type button, a device event can be published by simply clicking the device. - /// - /// Parameters: - /// - deviceId: The unique identifier of the device. - /// - logger: Logger use during operation - @inlinable - public func initiateDeviceClaim( - deviceId: String, - logger: Logger = AWSClient.loggingDisabled - ) async throws -> InitiateDeviceClaimResponse { - let input = InitiateDeviceClaimRequest( - deviceId: deviceId - ) - return try await self.initiateDeviceClaim(input, logger: logger) - } - - /// Given a device ID, issues a request to invoke a named device method (with possible parameters). See the "Example POST" code snippet below. - @Sendable - @inlinable - public func invokeDeviceMethod(_ input: InvokeDeviceMethodRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> InvokeDeviceMethodResponse { - try await self.client.execute( - operation: "InvokeDeviceMethod", - path: "/devices/{DeviceId}/methods", - httpMethod: .POST, - serviceConfig: self.config, - input: input, - logger: logger - ) - } - /// Given a device ID, issues a request to invoke a named device method (with possible parameters). See the "Example POST" code snippet below. - /// - /// Parameters: - /// - deviceId: The unique identifier of the device. - /// - deviceMethod: The device method to invoke. - /// - deviceMethodParameters: A JSON encoded string containing the device method request parameters. - /// - logger: Logger use during operation - @inlinable - public func invokeDeviceMethod( - deviceId: String, - deviceMethod: DeviceMethod? = nil, - deviceMethodParameters: String? = nil, - logger: Logger = AWSClient.loggingDisabled - ) async throws -> InvokeDeviceMethodResponse { - let input = InvokeDeviceMethodRequest( - deviceId: deviceId, - deviceMethod: deviceMethod, - deviceMethodParameters: deviceMethodParameters - ) - return try await self.invokeDeviceMethod(input, logger: logger) - } - - /// Using a device ID, returns a DeviceEventsResponse object containing an array of events for the device. - @Sendable - @inlinable - public func listDeviceEvents(_ input: ListDeviceEventsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListDeviceEventsResponse { - try await self.client.execute( - operation: "ListDeviceEvents", - path: "/devices/{DeviceId}/events", - httpMethod: .GET, - serviceConfig: self.config, - input: input, - logger: logger - ) - } - /// Using a device ID, returns a DeviceEventsResponse object containing an array of events for the device. - /// - /// Parameters: - /// - deviceId: The unique identifier of the device. - /// - fromTimeStamp: The start date for the device event query, in ISO8061 format. For example, 2018-03-28T15:45:12.880Z - /// - maxResults: The maximum number of results to return per request. If not set, a default value of 100 is used. - /// - nextToken: The token to retrieve the next set of results. - /// - toTimeStamp: The end date for the device event query, in ISO8061 format. For example, 2018-03-28T15:45:12.880Z - /// - logger: Logger use during operation - @inlinable - public func listDeviceEvents( - deviceId: String, - fromTimeStamp: Date? = nil, - maxResults: Int? = nil, - nextToken: String? = nil, - toTimeStamp: Date? = nil, - logger: Logger = AWSClient.loggingDisabled - ) async throws -> ListDeviceEventsResponse { - let input = ListDeviceEventsRequest( - deviceId: deviceId, - fromTimeStamp: fromTimeStamp, - maxResults: maxResults, - nextToken: nextToken, - toTimeStamp: toTimeStamp - ) - return try await self.listDeviceEvents(input, logger: logger) - } - - /// Lists the 1-Click compatible devices associated with your AWS account. - @Sendable - @inlinable - public func listDevices(_ input: ListDevicesRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListDevicesResponse { - try await self.client.execute( - operation: "ListDevices", - path: "/devices", - httpMethod: .GET, - serviceConfig: self.config, - input: input, - logger: logger - ) - } - /// Lists the 1-Click compatible devices associated with your AWS account. - /// - /// Parameters: - /// - deviceType: The type of the device, such as "button". - /// - maxResults: The maximum number of results to return per request. If not set, a default value of 100 is used. - /// - nextToken: The token to retrieve the next set of results. - /// - logger: Logger use during operation - @inlinable - public func listDevices( - deviceType: String? = nil, - maxResults: Int? = nil, - nextToken: String? = nil, - logger: Logger = AWSClient.loggingDisabled - ) async throws -> ListDevicesResponse { - let input = ListDevicesRequest( - deviceType: deviceType, - maxResults: maxResults, - nextToken: nextToken - ) - return try await self.listDevices(input, logger: logger) - } - - /// Lists the tags associated with the specified resource ARN. - @Sendable - @inlinable - public func listTagsForResource(_ input: ListTagsForResourceRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListTagsForResourceResponse { - try await self.client.execute( - operation: "ListTagsForResource", - path: "/tags/{ResourceArn}", - httpMethod: .GET, - serviceConfig: self.config, - input: input, - logger: logger - ) - } - /// Lists the tags associated with the specified resource ARN. - /// - /// Parameters: - /// - resourceArn: The ARN of the resource. - /// - logger: Logger use during operation - @inlinable - public func listTagsForResource( - resourceArn: String, - logger: Logger = AWSClient.loggingDisabled - ) async throws -> ListTagsForResourceResponse { - let input = ListTagsForResourceRequest( - resourceArn: resourceArn - ) - return try await self.listTagsForResource(input, logger: logger) - } - - /// Adds or updates the tags associated with the resource ARN. See AWS IoT 1-Click Service Limits for the maximum number of tags allowed per resource. - @Sendable - @inlinable - public func tagResource(_ input: TagResourceRequest, logger: Logger = AWSClient.loggingDisabled) async throws { - try await self.client.execute( - operation: "TagResource", - path: "/tags/{ResourceArn}", - httpMethod: .POST, - serviceConfig: self.config, - input: input, - logger: logger - ) - } - /// Adds or updates the tags associated with the resource ARN. See AWS IoT 1-Click Service Limits for the maximum number of tags allowed per resource. - /// - /// Parameters: - /// - resourceArn: The ARN of the resource. - /// - tags: A collection of key/value pairs defining the resource tags. For example, { "tags": {"key1": "value1", "key2": "value2"} }. For more information, see AWS Tagging Strategies. - /// - logger: Logger use during operation - @inlinable - public func tagResource( - resourceArn: String, - tags: [String: String]? = nil, - logger: Logger = AWSClient.loggingDisabled - ) async throws { - let input = TagResourceRequest( - resourceArn: resourceArn, - tags: tags - ) - return try await self.tagResource(input, logger: logger) - } - - /// Disassociates a device from your AWS account using its device ID. - @Sendable - @inlinable - public func unclaimDevice(_ input: UnclaimDeviceRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UnclaimDeviceResponse { - try await self.client.execute( - operation: "UnclaimDevice", - path: "/devices/{DeviceId}/unclaim", - httpMethod: .PUT, - serviceConfig: self.config, - input: input, - logger: logger - ) - } - /// Disassociates a device from your AWS account using its device ID. - /// - /// Parameters: - /// - deviceId: The unique identifier of the device. - /// - logger: Logger use during operation - @inlinable - public func unclaimDevice( - deviceId: String, - logger: Logger = AWSClient.loggingDisabled - ) async throws -> UnclaimDeviceResponse { - let input = UnclaimDeviceRequest( - deviceId: deviceId - ) - return try await self.unclaimDevice(input, logger: logger) - } - - /// Using tag keys, deletes the tags (key/value pairs) associated with the specified resource ARN. - @Sendable - @inlinable - public func untagResource(_ input: UntagResourceRequest, logger: Logger = AWSClient.loggingDisabled) async throws { - try await self.client.execute( - operation: "UntagResource", - path: "/tags/{ResourceArn}", - httpMethod: .DELETE, - serviceConfig: self.config, - input: input, - logger: logger - ) - } - /// Using tag keys, deletes the tags (key/value pairs) associated with the specified resource ARN. - /// - /// Parameters: - /// - resourceArn: The ARN of the resource. - /// - tagKeys: A collections of tag keys. For example, {"key1","key2"} - /// - logger: Logger use during operation - @inlinable - public func untagResource( - resourceArn: String, - tagKeys: [String]? = nil, - logger: Logger = AWSClient.loggingDisabled - ) async throws { - let input = UntagResourceRequest( - resourceArn: resourceArn, - tagKeys: tagKeys - ) - return try await self.untagResource(input, logger: logger) - } - - /// Using a Boolean value (true or false), this operation enables or disables the device given a device ID. - @Sendable - @inlinable - public func updateDeviceState(_ input: UpdateDeviceStateRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateDeviceStateResponse { - try await self.client.execute( - operation: "UpdateDeviceState", - path: "/devices/{DeviceId}/state", - httpMethod: .PUT, - serviceConfig: self.config, - input: input, - logger: logger - ) - } - /// Using a Boolean value (true or false), this operation enables or disables the device given a device ID. - /// - /// Parameters: - /// - deviceId: The unique identifier of the device. - /// - enabled: If true, the device is enabled. If false, the device is disabled. - /// - logger: Logger use during operation - @inlinable - public func updateDeviceState( - deviceId: String, - enabled: Bool? = nil, - logger: Logger = AWSClient.loggingDisabled - ) async throws -> UpdateDeviceStateResponse { - let input = UpdateDeviceStateRequest( - deviceId: deviceId, - enabled: enabled - ) - return try await self.updateDeviceState(input, logger: logger) - } -} - -extension IoT1ClickDevicesService { - /// Initializer required by `AWSService.with(middlewares:timeout:byteBufferAllocator:options)`. You are not able to use this initializer directly as there are not public - /// initializers for `AWSServiceConfig.Patch`. Please use `AWSService.with(middlewares:timeout:byteBufferAllocator:options)` instead. - public init(from: IoT1ClickDevicesService, patch: AWSServiceConfig.Patch) { - self.client = from.client - self.config = from.config.with(patch: patch) - } -} diff --git a/Sources/Soto/Services/IoT1ClickDevicesService/IoT1ClickDevicesService_shapes.swift b/Sources/Soto/Services/IoT1ClickDevicesService/IoT1ClickDevicesService_shapes.swift deleted file mode 100644 index fa13f70607..0000000000 --- a/Sources/Soto/Services/IoT1ClickDevicesService/IoT1ClickDevicesService_shapes.swift +++ /dev/null @@ -1,642 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// This source file is part of the Soto for AWS open source project -// -// Copyright (c) 2017-2024 the Soto project authors -// Licensed under Apache License v2.0 -// -// See LICENSE.txt for license information -// See CONTRIBUTORS.txt for the list of Soto project authors -// -// SPDX-License-Identifier: Apache-2.0 -// -//===----------------------------------------------------------------------===// - -// THIS FILE IS AUTOMATICALLY GENERATED by https://github.com/soto-project/soto-codegenerator. -// DO NOT EDIT. - -#if os(Linux) && compiler(<5.10) -// swift-corelibs-foundation hasn't been updated with Sendable conformances -@preconcurrency import Foundation -#else -import Foundation -#endif -@_spi(SotoInternal) import SotoCore - -extension IoT1ClickDevicesService { - // MARK: Enums - - // MARK: Shapes - - public struct Attributes: AWSDecodableShape { - public init() {} - } - - public struct ClaimDevicesByClaimCodeRequest: AWSEncodableShape { - /// The claim code, starting with "C-", as provided by the device manufacturer. - public let claimCode: String - - @inlinable - public init(claimCode: String) { - self.claimCode = claimCode - } - - public func encode(to encoder: Encoder) throws { - let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer - _ = encoder.container(keyedBy: CodingKeys.self) - request.encodePath(self.claimCode, key: "ClaimCode") - } - - private enum CodingKeys: CodingKey {} - } - - public struct ClaimDevicesByClaimCodeResponse: AWSDecodableShape { - /// The claim code provided by the device manufacturer. - public let claimCode: String? - /// The total number of devices associated with the claim code that has been processed in the claim request. - public let total: Int? - - @inlinable - public init(claimCode: String? = nil, total: Int? = nil) { - self.claimCode = claimCode - self.total = total - } - - private enum CodingKeys: String, CodingKey { - case claimCode = "claimCode" - case total = "total" - } - } - - public struct DescribeDeviceRequest: AWSEncodableShape { - /// The unique identifier of the device. - public let deviceId: String - - @inlinable - public init(deviceId: String) { - self.deviceId = deviceId - } - - public func encode(to encoder: Encoder) throws { - let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer - _ = encoder.container(keyedBy: CodingKeys.self) - request.encodePath(self.deviceId, key: "DeviceId") - } - - private enum CodingKeys: CodingKey {} - } - - public struct DescribeDeviceResponse: AWSDecodableShape { - /// Device details. - public let deviceDescription: DeviceDescription? - - @inlinable - public init(deviceDescription: DeviceDescription? = nil) { - self.deviceDescription = deviceDescription - } - - private enum CodingKeys: String, CodingKey { - case deviceDescription = "deviceDescription" - } - } - - public struct Device: AWSDecodableShape { - /// The user specified attributes associated with the device for an event. - public let attributes: Attributes? - /// The unique identifier of the device. - public let deviceId: String? - /// The device type, such as "button". - public let type: String? - - @inlinable - public init(attributes: Attributes? = nil, deviceId: String? = nil, type: String? = nil) { - self.attributes = attributes - self.deviceId = deviceId - self.type = type - } - - private enum CodingKeys: String, CodingKey { - case attributes = "attributes" - case deviceId = "deviceId" - case type = "type" - } - } - - public struct DeviceDescription: AWSDecodableShape { - /// The ARN of the device. - public let arn: String? - /// An array of zero or more elements of DeviceAttribute objects providing user specified device attributes. - public let attributes: [String: String]? - /// The unique identifier of the device. - public let deviceId: String? - /// A Boolean value indicating whether or not the device is enabled. - public let enabled: Bool? - /// A value between 0 and 1 inclusive, representing the fraction of life remaining for the device. - public let remainingLife: Double? - /// The tags currently associated with the AWS IoT 1-Click device. - public let tags: [String: String]? - /// The type of the device, such as "button". - public let type: String? - - @inlinable - public init(arn: String? = nil, attributes: [String: String]? = nil, deviceId: String? = nil, enabled: Bool? = nil, remainingLife: Double? = nil, tags: [String: String]? = nil, type: String? = nil) { - self.arn = arn - self.attributes = attributes - self.deviceId = deviceId - self.enabled = enabled - self.remainingLife = remainingLife - self.tags = tags - self.type = type - } - - private enum CodingKeys: String, CodingKey { - case arn = "arn" - case attributes = "attributes" - case deviceId = "deviceId" - case enabled = "enabled" - case remainingLife = "remainingLife" - case tags = "tags" - case type = "type" - } - } - - public struct DeviceEvent: AWSDecodableShape { - /// An object representing the device associated with the event. - public let device: Device? - /// A serialized JSON object representing the device-type specific event. - public let stdEvent: String? - - @inlinable - public init(device: Device? = nil, stdEvent: String? = nil) { - self.device = device - self.stdEvent = stdEvent - } - - private enum CodingKeys: String, CodingKey { - case device = "device" - case stdEvent = "stdEvent" - } - } - - public struct DeviceMethod: AWSEncodableShape & AWSDecodableShape { - /// The type of the device, such as "button". - public let deviceType: String? - /// The name of the method applicable to the deviceType. - public let methodName: String? - - @inlinable - public init(deviceType: String? = nil, methodName: String? = nil) { - self.deviceType = deviceType - self.methodName = methodName - } - - private enum CodingKeys: String, CodingKey { - case deviceType = "deviceType" - case methodName = "methodName" - } - } - - public struct FinalizeDeviceClaimRequest: AWSEncodableShape { - /// The unique identifier of the device. - public let deviceId: String - /// A collection of key/value pairs defining the resource tags. For example, { "tags": {"key1": "value1", "key2": "value2"} }. For more information, see AWS Tagging Strategies. - public let tags: [String: String]? - - @inlinable - public init(deviceId: String, tags: [String: String]? = nil) { - self.deviceId = deviceId - self.tags = tags - } - - public func encode(to encoder: Encoder) throws { - let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer - var container = encoder.container(keyedBy: CodingKeys.self) - request.encodePath(self.deviceId, key: "DeviceId") - try container.encodeIfPresent(self.tags, forKey: .tags) - } - - private enum CodingKeys: String, CodingKey { - case tags = "tags" - } - } - - public struct FinalizeDeviceClaimResponse: AWSDecodableShape { - /// The device's final claim state. - public let state: String? - - @inlinable - public init(state: String? = nil) { - self.state = state - } - - private enum CodingKeys: String, CodingKey { - case state = "state" - } - } - - public struct GetDeviceMethodsRequest: AWSEncodableShape { - /// The unique identifier of the device. - public let deviceId: String - - @inlinable - public init(deviceId: String) { - self.deviceId = deviceId - } - - public func encode(to encoder: Encoder) throws { - let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer - _ = encoder.container(keyedBy: CodingKeys.self) - request.encodePath(self.deviceId, key: "DeviceId") - } - - private enum CodingKeys: CodingKey {} - } - - public struct GetDeviceMethodsResponse: AWSDecodableShape { - /// List of available device APIs. - public let deviceMethods: [DeviceMethod]? - - @inlinable - public init(deviceMethods: [DeviceMethod]? = nil) { - self.deviceMethods = deviceMethods - } - - private enum CodingKeys: String, CodingKey { - case deviceMethods = "deviceMethods" - } - } - - public struct InitiateDeviceClaimRequest: AWSEncodableShape { - /// The unique identifier of the device. - public let deviceId: String - - @inlinable - public init(deviceId: String) { - self.deviceId = deviceId - } - - public func encode(to encoder: Encoder) throws { - let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer - _ = encoder.container(keyedBy: CodingKeys.self) - request.encodePath(self.deviceId, key: "DeviceId") - } - - private enum CodingKeys: CodingKey {} - } - - public struct InitiateDeviceClaimResponse: AWSDecodableShape { - /// The device's final claim state. - public let state: String? - - @inlinable - public init(state: String? = nil) { - self.state = state - } - - private enum CodingKeys: String, CodingKey { - case state = "state" - } - } - - public struct InvokeDeviceMethodRequest: AWSEncodableShape { - /// The unique identifier of the device. - public let deviceId: String - /// The device method to invoke. - public let deviceMethod: DeviceMethod? - /// A JSON encoded string containing the device method request parameters. - public let deviceMethodParameters: String? - - @inlinable - public init(deviceId: String, deviceMethod: DeviceMethod? = nil, deviceMethodParameters: String? = nil) { - self.deviceId = deviceId - self.deviceMethod = deviceMethod - self.deviceMethodParameters = deviceMethodParameters - } - - public func encode(to encoder: Encoder) throws { - let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer - var container = encoder.container(keyedBy: CodingKeys.self) - request.encodePath(self.deviceId, key: "DeviceId") - try container.encodeIfPresent(self.deviceMethod, forKey: .deviceMethod) - try container.encodeIfPresent(self.deviceMethodParameters, forKey: .deviceMethodParameters) - } - - private enum CodingKeys: String, CodingKey { - case deviceMethod = "deviceMethod" - case deviceMethodParameters = "deviceMethodParameters" - } - } - - public struct InvokeDeviceMethodResponse: AWSDecodableShape { - /// A JSON encoded string containing the device method response. - public let deviceMethodResponse: String? - - @inlinable - public init(deviceMethodResponse: String? = nil) { - self.deviceMethodResponse = deviceMethodResponse - } - - private enum CodingKeys: String, CodingKey { - case deviceMethodResponse = "deviceMethodResponse" - } - } - - public struct ListDeviceEventsRequest: AWSEncodableShape { - /// The unique identifier of the device. - public let deviceId: String - /// The start date for the device event query, in ISO8061 format. For example, 2018-03-28T15:45:12.880Z - @OptionalCustomCoding - public var fromTimeStamp: Date? - /// The maximum number of results to return per request. If not set, a default value of 100 is used. - public let maxResults: Int? - /// The token to retrieve the next set of results. - public let nextToken: String? - /// The end date for the device event query, in ISO8061 format. For example, 2018-03-28T15:45:12.880Z - @OptionalCustomCoding - public var toTimeStamp: Date? - - @inlinable - public init(deviceId: String, fromTimeStamp: Date? = nil, maxResults: Int? = nil, nextToken: String? = nil, toTimeStamp: Date? = nil) { - self.deviceId = deviceId - self.fromTimeStamp = fromTimeStamp - self.maxResults = maxResults - self.nextToken = nextToken - self.toTimeStamp = toTimeStamp - } - - public func encode(to encoder: Encoder) throws { - let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer - _ = encoder.container(keyedBy: CodingKeys.self) - request.encodePath(self.deviceId, key: "DeviceId") - request.encodeQuery(self._fromTimeStamp, key: "fromTimeStamp") - request.encodeQuery(self.maxResults, key: "maxResults") - request.encodeQuery(self.nextToken, key: "nextToken") - request.encodeQuery(self._toTimeStamp, key: "toTimeStamp") - } - - public func validate(name: String) throws { - try self.validate(self.maxResults, name: "maxResults", parent: name, max: 250) - try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) - } - - private enum CodingKeys: CodingKey {} - } - - public struct ListDeviceEventsResponse: AWSDecodableShape { - /// An array of zero or more elements describing the event(s) associated with the device. - public let events: [DeviceEvent]? - /// The token to retrieve the next set of results. - public let nextToken: String? - - @inlinable - public init(events: [DeviceEvent]? = nil, nextToken: String? = nil) { - self.events = events - self.nextToken = nextToken - } - - private enum CodingKeys: String, CodingKey { - case events = "events" - case nextToken = "nextToken" - } - } - - public struct ListDevicesRequest: AWSEncodableShape { - /// The type of the device, such as "button". - public let deviceType: String? - /// The maximum number of results to return per request. If not set, a default value of 100 is used. - public let maxResults: Int? - /// The token to retrieve the next set of results. - public let nextToken: String? - - @inlinable - public init(deviceType: String? = nil, maxResults: Int? = nil, nextToken: String? = nil) { - self.deviceType = deviceType - self.maxResults = maxResults - self.nextToken = nextToken - } - - public func encode(to encoder: Encoder) throws { - let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer - _ = encoder.container(keyedBy: CodingKeys.self) - request.encodeQuery(self.deviceType, key: "deviceType") - request.encodeQuery(self.maxResults, key: "maxResults") - request.encodeQuery(self.nextToken, key: "nextToken") - } - - public func validate(name: String) throws { - try self.validate(self.maxResults, name: "maxResults", parent: name, max: 250) - try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) - } - - private enum CodingKeys: CodingKey {} - } - - public struct ListDevicesResponse: AWSDecodableShape { - /// A list of devices. - public let devices: [DeviceDescription]? - /// The token to retrieve the next set of results. - public let nextToken: String? - - @inlinable - public init(devices: [DeviceDescription]? = nil, nextToken: String? = nil) { - self.devices = devices - self.nextToken = nextToken - } - - private enum CodingKeys: String, CodingKey { - case devices = "devices" - case nextToken = "nextToken" - } - } - - public struct ListTagsForResourceRequest: AWSEncodableShape { - /// The ARN of the resource. - public let resourceArn: String - - @inlinable - public init(resourceArn: String) { - self.resourceArn = resourceArn - } - - public func encode(to encoder: Encoder) throws { - let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer - _ = encoder.container(keyedBy: CodingKeys.self) - request.encodePath(self.resourceArn, key: "ResourceArn") - } - - private enum CodingKeys: CodingKey {} - } - - public struct ListTagsForResourceResponse: AWSDecodableShape { - /// A collection of key/value pairs defining the resource tags. For example, { "tags": {"key1": "value1", "key2": "value2"} }. For more information, see AWS Tagging Strategies. - public let tags: [String: String]? - - @inlinable - public init(tags: [String: String]? = nil) { - self.tags = tags - } - - private enum CodingKeys: String, CodingKey { - case tags = "tags" - } - } - - public struct TagResourceRequest: AWSEncodableShape { - /// The ARN of the resource. - public let resourceArn: String - /// A collection of key/value pairs defining the resource tags. For example, { "tags": {"key1": "value1", "key2": "value2"} }. For more information, see AWS Tagging Strategies. - public let tags: [String: String]? - - @inlinable - public init(resourceArn: String, tags: [String: String]? = nil) { - self.resourceArn = resourceArn - self.tags = tags - } - - public func encode(to encoder: Encoder) throws { - let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer - var container = encoder.container(keyedBy: CodingKeys.self) - request.encodePath(self.resourceArn, key: "ResourceArn") - try container.encodeIfPresent(self.tags, forKey: .tags) - } - - private enum CodingKeys: String, CodingKey { - case tags = "tags" - } - } - - public struct UnclaimDeviceRequest: AWSEncodableShape { - /// The unique identifier of the device. - public let deviceId: String - - @inlinable - public init(deviceId: String) { - self.deviceId = deviceId - } - - public func encode(to encoder: Encoder) throws { - let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer - _ = encoder.container(keyedBy: CodingKeys.self) - request.encodePath(self.deviceId, key: "DeviceId") - } - - private enum CodingKeys: CodingKey {} - } - - public struct UnclaimDeviceResponse: AWSDecodableShape { - /// The device's final claim state. - public let state: String? - - @inlinable - public init(state: String? = nil) { - self.state = state - } - - private enum CodingKeys: String, CodingKey { - case state = "state" - } - } - - public struct UntagResourceRequest: AWSEncodableShape { - /// The ARN of the resource. - public let resourceArn: String - /// A collections of tag keys. For example, {"key1","key2"} - public let tagKeys: [String]? - - @inlinable - public init(resourceArn: String, tagKeys: [String]? = nil) { - self.resourceArn = resourceArn - self.tagKeys = tagKeys - } - - public func encode(to encoder: Encoder) throws { - let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer - _ = encoder.container(keyedBy: CodingKeys.self) - request.encodePath(self.resourceArn, key: "ResourceArn") - request.encodeQuery(self.tagKeys, key: "tagKeys") - } - - private enum CodingKeys: CodingKey {} - } - - public struct UpdateDeviceStateRequest: AWSEncodableShape { - /// The unique identifier of the device. - public let deviceId: String - /// If true, the device is enabled. If false, the device is disabled. - public let enabled: Bool? - - @inlinable - public init(deviceId: String, enabled: Bool? = nil) { - self.deviceId = deviceId - self.enabled = enabled - } - - public func encode(to encoder: Encoder) throws { - let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer - var container = encoder.container(keyedBy: CodingKeys.self) - request.encodePath(self.deviceId, key: "DeviceId") - try container.encodeIfPresent(self.enabled, forKey: .enabled) - } - - private enum CodingKeys: String, CodingKey { - case enabled = "enabled" - } - } - - public struct UpdateDeviceStateResponse: AWSDecodableShape { - public init() {} - } -} - -// MARK: - Errors - -/// Error enum for IoT1ClickDevicesService -public struct IoT1ClickDevicesServiceErrorType: AWSErrorType { - enum Code: String { - case forbiddenException = "ForbiddenException" - case internalFailureException = "InternalFailureException" - case invalidRequestException = "InvalidRequestException" - case preconditionFailedException = "PreconditionFailedException" - case rangeNotSatisfiableException = "RangeNotSatisfiableException" - case resourceConflictException = "ResourceConflictException" - case resourceNotFoundException = "ResourceNotFoundException" - } - - private let error: Code - public let context: AWSErrorContext? - - /// initialize IoT1ClickDevicesService - public init?(errorCode: String, context: AWSErrorContext) { - guard let error = Code(rawValue: errorCode) else { return nil } - self.error = error - self.context = context - } - - internal init(_ error: Code) { - self.error = error - self.context = nil - } - - /// return error code string - public var errorCode: String { self.error.rawValue } - - public static var forbiddenException: Self { .init(.forbiddenException) } - public static var internalFailureException: Self { .init(.internalFailureException) } - public static var invalidRequestException: Self { .init(.invalidRequestException) } - public static var preconditionFailedException: Self { .init(.preconditionFailedException) } - public static var rangeNotSatisfiableException: Self { .init(.rangeNotSatisfiableException) } - public static var resourceConflictException: Self { .init(.resourceConflictException) } - public static var resourceNotFoundException: Self { .init(.resourceNotFoundException) } -} - -extension IoT1ClickDevicesServiceErrorType: Equatable { - public static func == (lhs: IoT1ClickDevicesServiceErrorType, rhs: IoT1ClickDevicesServiceErrorType) -> Bool { - lhs.error == rhs.error - } -} - -extension IoT1ClickDevicesServiceErrorType: CustomStringConvertible { - public var description: String { - return "\(self.error.rawValue): \(self.message ?? "")" - } -} diff --git a/Sources/Soto/Services/IoT1ClickProjects/IoT1ClickProjects_api.swift b/Sources/Soto/Services/IoT1ClickProjects/IoT1ClickProjects_api.swift deleted file mode 100644 index b1d309b60c..0000000000 --- a/Sources/Soto/Services/IoT1ClickProjects/IoT1ClickProjects_api.swift +++ /dev/null @@ -1,718 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// This source file is part of the Soto for AWS open source project -// -// Copyright (c) 2017-2024 the Soto project authors -// Licensed under Apache License v2.0 -// -// See LICENSE.txt for license information -// See CONTRIBUTORS.txt for the list of Soto project authors -// -// SPDX-License-Identifier: Apache-2.0 -// -//===----------------------------------------------------------------------===// - -// THIS FILE IS AUTOMATICALLY GENERATED by https://github.com/soto-project/soto-codegenerator. -// DO NOT EDIT. - -#if os(Linux) && compiler(<5.10) -// swift-corelibs-foundation hasn't been updated with Sendable conformances -@preconcurrency import Foundation -#else -import Foundation -#endif -@_exported import SotoCore - -/// Service object for interacting with AWS IoT1ClickProjects service. -/// -/// The AWS IoT 1-Click Projects API Reference -public struct IoT1ClickProjects: AWSService { - // MARK: Member variables - - /// Client used for communication with AWS - public let client: AWSClient - /// Service configuration - public let config: AWSServiceConfig - - // MARK: Initialization - - /// Initialize the IoT1ClickProjects client - /// - parameters: - /// - client: AWSClient used to process requests - /// - region: Region of server you want to communicate with. This will override the partition parameter. - /// - partition: AWS partition where service resides, standard (.aws), china (.awscn), government (.awsusgov). - /// - endpoint: Custom endpoint URL to use instead of standard AWS servers - /// - middleware: Middleware chain used to edit requests before they are sent and responses before they are decoded - /// - timeout: Timeout value for HTTP requests - /// - byteBufferAllocator: Allocator for ByteBuffers - /// - options: Service options - public init( - client: AWSClient, - region: SotoCore.Region? = nil, - partition: AWSPartition = .aws, - endpoint: String? = nil, - middleware: AWSMiddlewareProtocol? = nil, - timeout: TimeAmount? = nil, - byteBufferAllocator: ByteBufferAllocator = ByteBufferAllocator(), - options: AWSServiceConfig.Options = [] - ) { - self.client = client - self.config = AWSServiceConfig( - region: region, - partition: region?.partition ?? partition, - serviceName: "IoT1ClickProjects", - serviceIdentifier: "projects.iot1click", - signingName: "iot1click", - serviceProtocol: .restjson, - apiVersion: "2018-05-14", - endpoint: endpoint, - errorType: IoT1ClickProjectsErrorType.self, - middleware: middleware, - timeout: timeout, - byteBufferAllocator: byteBufferAllocator, - options: options - ) - } - - - - - - // MARK: API Calls - - /// Associates a physical device with a placement. - @Sendable - @inlinable - public func associateDeviceWithPlacement(_ input: AssociateDeviceWithPlacementRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> AssociateDeviceWithPlacementResponse { - try await self.client.execute( - operation: "AssociateDeviceWithPlacement", - path: "/projects/{projectName}/placements/{placementName}/devices/{deviceTemplateName}", - httpMethod: .PUT, - serviceConfig: self.config, - input: input, - logger: logger - ) - } - /// Associates a physical device with a placement. - /// - /// Parameters: - /// - deviceId: The ID of the physical device to be associated with the given placement in the project. Note that a mandatory 4 character prefix is required for all deviceId values. - /// - deviceTemplateName: The device template name to associate with the device ID. - /// - placementName: The name of the placement in which to associate the device. - /// - projectName: The name of the project containing the placement in which to associate the device. - /// - logger: Logger use during operation - @inlinable - public func associateDeviceWithPlacement( - deviceId: String, - deviceTemplateName: String, - placementName: String, - projectName: String, - logger: Logger = AWSClient.loggingDisabled - ) async throws -> AssociateDeviceWithPlacementResponse { - let input = AssociateDeviceWithPlacementRequest( - deviceId: deviceId, - deviceTemplateName: deviceTemplateName, - placementName: placementName, - projectName: projectName - ) - return try await self.associateDeviceWithPlacement(input, logger: logger) - } - - /// Creates an empty placement. - @Sendable - @inlinable - public func createPlacement(_ input: CreatePlacementRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreatePlacementResponse { - try await self.client.execute( - operation: "CreatePlacement", - path: "/projects/{projectName}/placements", - httpMethod: .POST, - serviceConfig: self.config, - input: input, - logger: logger - ) - } - /// Creates an empty placement. - /// - /// Parameters: - /// - attributes: Optional user-defined key/value pairs providing contextual data (such as location or function) for the placement. - /// - placementName: The name of the placement to be created. - /// - projectName: The name of the project in which to create the placement. - /// - logger: Logger use during operation - @inlinable - public func createPlacement( - attributes: [String: String]? = nil, - placementName: String, - projectName: String, - logger: Logger = AWSClient.loggingDisabled - ) async throws -> CreatePlacementResponse { - let input = CreatePlacementRequest( - attributes: attributes, - placementName: placementName, - projectName: projectName - ) - return try await self.createPlacement(input, logger: logger) - } - - /// Creates an empty project with a placement template. A project contains zero or more placements that adhere to the placement template defined in the project. - @Sendable - @inlinable - public func createProject(_ input: CreateProjectRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateProjectResponse { - try await self.client.execute( - operation: "CreateProject", - path: "/projects", - httpMethod: .POST, - serviceConfig: self.config, - input: input, - logger: logger - ) - } - /// Creates an empty project with a placement template. A project contains zero or more placements that adhere to the placement template defined in the project. - /// - /// Parameters: - /// - description: An optional description for the project. - /// - placementTemplate: The schema defining the placement to be created. A placement template defines placement default attributes and device templates. You cannot add or remove device templates after the project has been created. However, you can update callbackOverrides for the device templates using the UpdateProject API. - /// - projectName: The name of the project to create. - /// - tags: Optional tags (metadata key/value pairs) to be associated with the project. For example, { {"key1": "value1", "key2": "value2"} }. For more information, see AWS Tagging Strategies. - /// - logger: Logger use during operation - @inlinable - public func createProject( - description: String? = nil, - placementTemplate: PlacementTemplate? = nil, - projectName: String, - tags: [String: String]? = nil, - logger: Logger = AWSClient.loggingDisabled - ) async throws -> CreateProjectResponse { - let input = CreateProjectRequest( - description: description, - placementTemplate: placementTemplate, - projectName: projectName, - tags: tags - ) - return try await self.createProject(input, logger: logger) - } - - /// Deletes a placement. To delete a placement, it must not have any devices associated with it. When you delete a placement, all associated data becomes irretrievable. - @Sendable - @inlinable - public func deletePlacement(_ input: DeletePlacementRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DeletePlacementResponse { - try await self.client.execute( - operation: "DeletePlacement", - path: "/projects/{projectName}/placements/{placementName}", - httpMethod: .DELETE, - serviceConfig: self.config, - input: input, - logger: logger - ) - } - /// Deletes a placement. To delete a placement, it must not have any devices associated with it. When you delete a placement, all associated data becomes irretrievable. - /// - /// Parameters: - /// - placementName: The name of the empty placement to delete. - /// - projectName: The project containing the empty placement to delete. - /// - logger: Logger use during operation - @inlinable - public func deletePlacement( - placementName: String, - projectName: String, - logger: Logger = AWSClient.loggingDisabled - ) async throws -> DeletePlacementResponse { - let input = DeletePlacementRequest( - placementName: placementName, - projectName: projectName - ) - return try await self.deletePlacement(input, logger: logger) - } - - /// Deletes a project. To delete a project, it must not have any placements associated with it. When you delete a project, all associated data becomes irretrievable. - @Sendable - @inlinable - public func deleteProject(_ input: DeleteProjectRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DeleteProjectResponse { - try await self.client.execute( - operation: "DeleteProject", - path: "/projects/{projectName}", - httpMethod: .DELETE, - serviceConfig: self.config, - input: input, - logger: logger - ) - } - /// Deletes a project. To delete a project, it must not have any placements associated with it. When you delete a project, all associated data becomes irretrievable. - /// - /// Parameters: - /// - projectName: The name of the empty project to delete. - /// - logger: Logger use during operation - @inlinable - public func deleteProject( - projectName: String, - logger: Logger = AWSClient.loggingDisabled - ) async throws -> DeleteProjectResponse { - let input = DeleteProjectRequest( - projectName: projectName - ) - return try await self.deleteProject(input, logger: logger) - } - - /// Describes a placement in a project. - @Sendable - @inlinable - public func describePlacement(_ input: DescribePlacementRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribePlacementResponse { - try await self.client.execute( - operation: "DescribePlacement", - path: "/projects/{projectName}/placements/{placementName}", - httpMethod: .GET, - serviceConfig: self.config, - input: input, - logger: logger - ) - } - /// Describes a placement in a project. - /// - /// Parameters: - /// - placementName: The name of the placement within a project. - /// - projectName: The project containing the placement to be described. - /// - logger: Logger use during operation - @inlinable - public func describePlacement( - placementName: String, - projectName: String, - logger: Logger = AWSClient.loggingDisabled - ) async throws -> DescribePlacementResponse { - let input = DescribePlacementRequest( - placementName: placementName, - projectName: projectName - ) - return try await self.describePlacement(input, logger: logger) - } - - /// Returns an object describing a project. - @Sendable - @inlinable - public func describeProject(_ input: DescribeProjectRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeProjectResponse { - try await self.client.execute( - operation: "DescribeProject", - path: "/projects/{projectName}", - httpMethod: .GET, - serviceConfig: self.config, - input: input, - logger: logger - ) - } - /// Returns an object describing a project. - /// - /// Parameters: - /// - projectName: The name of the project to be described. - /// - logger: Logger use during operation - @inlinable - public func describeProject( - projectName: String, - logger: Logger = AWSClient.loggingDisabled - ) async throws -> DescribeProjectResponse { - let input = DescribeProjectRequest( - projectName: projectName - ) - return try await self.describeProject(input, logger: logger) - } - - /// Removes a physical device from a placement. - @Sendable - @inlinable - public func disassociateDeviceFromPlacement(_ input: DisassociateDeviceFromPlacementRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DisassociateDeviceFromPlacementResponse { - try await self.client.execute( - operation: "DisassociateDeviceFromPlacement", - path: "/projects/{projectName}/placements/{placementName}/devices/{deviceTemplateName}", - httpMethod: .DELETE, - serviceConfig: self.config, - input: input, - logger: logger - ) - } - /// Removes a physical device from a placement. - /// - /// Parameters: - /// - deviceTemplateName: The device ID that should be removed from the placement. - /// - placementName: The name of the placement that the device should be removed from. - /// - projectName: The name of the project that contains the placement. - /// - logger: Logger use during operation - @inlinable - public func disassociateDeviceFromPlacement( - deviceTemplateName: String, - placementName: String, - projectName: String, - logger: Logger = AWSClient.loggingDisabled - ) async throws -> DisassociateDeviceFromPlacementResponse { - let input = DisassociateDeviceFromPlacementRequest( - deviceTemplateName: deviceTemplateName, - placementName: placementName, - projectName: projectName - ) - return try await self.disassociateDeviceFromPlacement(input, logger: logger) - } - - /// Returns an object enumerating the devices in a placement. - @Sendable - @inlinable - public func getDevicesInPlacement(_ input: GetDevicesInPlacementRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetDevicesInPlacementResponse { - try await self.client.execute( - operation: "GetDevicesInPlacement", - path: "/projects/{projectName}/placements/{placementName}/devices", - httpMethod: .GET, - serviceConfig: self.config, - input: input, - logger: logger - ) - } - /// Returns an object enumerating the devices in a placement. - /// - /// Parameters: - /// - placementName: The name of the placement to get the devices from. - /// - projectName: The name of the project containing the placement. - /// - logger: Logger use during operation - @inlinable - public func getDevicesInPlacement( - placementName: String, - projectName: String, - logger: Logger = AWSClient.loggingDisabled - ) async throws -> GetDevicesInPlacementResponse { - let input = GetDevicesInPlacementRequest( - placementName: placementName, - projectName: projectName - ) - return try await self.getDevicesInPlacement(input, logger: logger) - } - - /// Lists the placement(s) of a project. - @Sendable - @inlinable - public func listPlacements(_ input: ListPlacementsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListPlacementsResponse { - try await self.client.execute( - operation: "ListPlacements", - path: "/projects/{projectName}/placements", - httpMethod: .GET, - serviceConfig: self.config, - input: input, - logger: logger - ) - } - /// Lists the placement(s) of a project. - /// - /// Parameters: - /// - maxResults: The maximum number of results to return per request. If not set, a default value of 100 is used. - /// - nextToken: The token to retrieve the next set of results. - /// - projectName: The project containing the placements to be listed. - /// - logger: Logger use during operation - @inlinable - public func listPlacements( - maxResults: Int? = nil, - nextToken: String? = nil, - projectName: String, - logger: Logger = AWSClient.loggingDisabled - ) async throws -> ListPlacementsResponse { - let input = ListPlacementsRequest( - maxResults: maxResults, - nextToken: nextToken, - projectName: projectName - ) - return try await self.listPlacements(input, logger: logger) - } - - /// Lists the AWS IoT 1-Click project(s) associated with your AWS account and region. - @Sendable - @inlinable - public func listProjects(_ input: ListProjectsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListProjectsResponse { - try await self.client.execute( - operation: "ListProjects", - path: "/projects", - httpMethod: .GET, - serviceConfig: self.config, - input: input, - logger: logger - ) - } - /// Lists the AWS IoT 1-Click project(s) associated with your AWS account and region. - /// - /// Parameters: - /// - maxResults: The maximum number of results to return per request. If not set, a default value of 100 is used. - /// - nextToken: The token to retrieve the next set of results. - /// - logger: Logger use during operation - @inlinable - public func listProjects( - maxResults: Int? = nil, - nextToken: String? = nil, - logger: Logger = AWSClient.loggingDisabled - ) async throws -> ListProjectsResponse { - let input = ListProjectsRequest( - maxResults: maxResults, - nextToken: nextToken - ) - return try await self.listProjects(input, logger: logger) - } - - /// Lists the tags (metadata key/value pairs) which you have assigned to the resource. - @Sendable - @inlinable - public func listTagsForResource(_ input: ListTagsForResourceRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListTagsForResourceResponse { - try await self.client.execute( - operation: "ListTagsForResource", - path: "/tags/{resourceArn}", - httpMethod: .GET, - serviceConfig: self.config, - input: input, - logger: logger - ) - } - /// Lists the tags (metadata key/value pairs) which you have assigned to the resource. - /// - /// Parameters: - /// - resourceArn: The ARN of the resource whose tags you want to list. - /// - logger: Logger use during operation - @inlinable - public func listTagsForResource( - resourceArn: String, - logger: Logger = AWSClient.loggingDisabled - ) async throws -> ListTagsForResourceResponse { - let input = ListTagsForResourceRequest( - resourceArn: resourceArn - ) - return try await self.listTagsForResource(input, logger: logger) - } - - /// Creates or modifies tags for a resource. Tags are key/value pairs (metadata) that can be used to manage a resource. For more information, see AWS Tagging Strategies. - @Sendable - @inlinable - public func tagResource(_ input: TagResourceRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> TagResourceResponse { - try await self.client.execute( - operation: "TagResource", - path: "/tags/{resourceArn}", - httpMethod: .POST, - serviceConfig: self.config, - input: input, - logger: logger - ) - } - /// Creates or modifies tags for a resource. Tags are key/value pairs (metadata) that can be used to manage a resource. For more information, see AWS Tagging Strategies. - /// - /// Parameters: - /// - resourceArn: The ARN of the resouce for which tag(s) should be added or modified. - /// - tags: The new or modifying tag(s) for the resource. See AWS IoT 1-Click Service Limits for the maximum number of tags allowed per resource. - /// - logger: Logger use during operation - @inlinable - public func tagResource( - resourceArn: String, - tags: [String: String], - logger: Logger = AWSClient.loggingDisabled - ) async throws -> TagResourceResponse { - let input = TagResourceRequest( - resourceArn: resourceArn, - tags: tags - ) - return try await self.tagResource(input, logger: logger) - } - - /// Removes one or more tags (metadata key/value pairs) from a resource. - @Sendable - @inlinable - public func untagResource(_ input: UntagResourceRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UntagResourceResponse { - try await self.client.execute( - operation: "UntagResource", - path: "/tags/{resourceArn}", - httpMethod: .DELETE, - serviceConfig: self.config, - input: input, - logger: logger - ) - } - /// Removes one or more tags (metadata key/value pairs) from a resource. - /// - /// Parameters: - /// - resourceArn: The ARN of the resource whose tag you want to remove. - /// - tagKeys: The keys of those tags which you want to remove. - /// - logger: Logger use during operation - @inlinable - public func untagResource( - resourceArn: String, - tagKeys: [String], - logger: Logger = AWSClient.loggingDisabled - ) async throws -> UntagResourceResponse { - let input = UntagResourceRequest( - resourceArn: resourceArn, - tagKeys: tagKeys - ) - return try await self.untagResource(input, logger: logger) - } - - /// Updates a placement with the given attributes. To clear an attribute, pass an empty value (i.e., ""). - @Sendable - @inlinable - public func updatePlacement(_ input: UpdatePlacementRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdatePlacementResponse { - try await self.client.execute( - operation: "UpdatePlacement", - path: "/projects/{projectName}/placements/{placementName}", - httpMethod: .PUT, - serviceConfig: self.config, - input: input, - logger: logger - ) - } - /// Updates a placement with the given attributes. To clear an attribute, pass an empty value (i.e., ""). - /// - /// Parameters: - /// - attributes: The user-defined object of attributes used to update the placement. The maximum number of key/value pairs is 50. - /// - placementName: The name of the placement to update. - /// - projectName: The name of the project containing the placement to be updated. - /// - logger: Logger use during operation - @inlinable - public func updatePlacement( - attributes: [String: String]? = nil, - placementName: String, - projectName: String, - logger: Logger = AWSClient.loggingDisabled - ) async throws -> UpdatePlacementResponse { - let input = UpdatePlacementRequest( - attributes: attributes, - placementName: placementName, - projectName: projectName - ) - return try await self.updatePlacement(input, logger: logger) - } - - /// Updates a project associated with your AWS account and region. With the exception of device template names, you can pass just the values that need to be updated because the update request will change only the values that are provided. To clear a value, pass the empty string (i.e., ""). - @Sendable - @inlinable - public func updateProject(_ input: UpdateProjectRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateProjectResponse { - try await self.client.execute( - operation: "UpdateProject", - path: "/projects/{projectName}", - httpMethod: .PUT, - serviceConfig: self.config, - input: input, - logger: logger - ) - } - /// Updates a project associated with your AWS account and region. With the exception of device template names, you can pass just the values that need to be updated because the update request will change only the values that are provided. To clear a value, pass the empty string (i.e., ""). - /// - /// Parameters: - /// - description: An optional user-defined description for the project. - /// - placementTemplate: An object defining the project update. Once a project has been created, you cannot add device template names to the project. However, for a given placementTemplate, you can update the associated callbackOverrides for the device definition using this API. - /// - projectName: The name of the project to be updated. - /// - logger: Logger use during operation - @inlinable - public func updateProject( - description: String? = nil, - placementTemplate: PlacementTemplate? = nil, - projectName: String, - logger: Logger = AWSClient.loggingDisabled - ) async throws -> UpdateProjectResponse { - let input = UpdateProjectRequest( - description: description, - placementTemplate: placementTemplate, - projectName: projectName - ) - return try await self.updateProject(input, logger: logger) - } -} - -extension IoT1ClickProjects { - /// Initializer required by `AWSService.with(middlewares:timeout:byteBufferAllocator:options)`. You are not able to use this initializer directly as there are not public - /// initializers for `AWSServiceConfig.Patch`. Please use `AWSService.with(middlewares:timeout:byteBufferAllocator:options)` instead. - public init(from: IoT1ClickProjects, patch: AWSServiceConfig.Patch) { - self.client = from.client - self.config = from.config.with(patch: patch) - } -} - -// MARK: Paginators - -@available(macOS 10.15, iOS 13.0, tvOS 13.0, watchOS 6.0, *) -extension IoT1ClickProjects { - /// Return PaginatorSequence for operation ``listPlacements(_:logger:)``. - /// - /// - Parameters: - /// - input: Input for operation - /// - logger: Logger used for logging - @inlinable - public func listPlacementsPaginator( - _ input: ListPlacementsRequest, - logger: Logger = AWSClient.loggingDisabled - ) -> AWSClient.PaginatorSequence { - return .init( - input: input, - command: self.listPlacements, - inputKey: \ListPlacementsRequest.nextToken, - outputKey: \ListPlacementsResponse.nextToken, - logger: logger - ) - } - /// Return PaginatorSequence for operation ``listPlacements(_:logger:)``. - /// - /// - Parameters: - /// - maxResults: The maximum number of results to return per request. If not set, a default value of 100 is used. - /// - projectName: The project containing the placements to be listed. - /// - logger: Logger used for logging - @inlinable - public func listPlacementsPaginator( - maxResults: Int? = nil, - projectName: String, - logger: Logger = AWSClient.loggingDisabled - ) -> AWSClient.PaginatorSequence { - let input = ListPlacementsRequest( - maxResults: maxResults, - projectName: projectName - ) - return self.listPlacementsPaginator(input, logger: logger) - } - - /// Return PaginatorSequence for operation ``listProjects(_:logger:)``. - /// - /// - Parameters: - /// - input: Input for operation - /// - logger: Logger used for logging - @inlinable - public func listProjectsPaginator( - _ input: ListProjectsRequest, - logger: Logger = AWSClient.loggingDisabled - ) -> AWSClient.PaginatorSequence { - return .init( - input: input, - command: self.listProjects, - inputKey: \ListProjectsRequest.nextToken, - outputKey: \ListProjectsResponse.nextToken, - logger: logger - ) - } - /// Return PaginatorSequence for operation ``listProjects(_:logger:)``. - /// - /// - Parameters: - /// - maxResults: The maximum number of results to return per request. If not set, a default value of 100 is used. - /// - logger: Logger used for logging - @inlinable - public func listProjectsPaginator( - maxResults: Int? = nil, - logger: Logger = AWSClient.loggingDisabled - ) -> AWSClient.PaginatorSequence { - let input = ListProjectsRequest( - maxResults: maxResults - ) - return self.listProjectsPaginator(input, logger: logger) - } -} - -extension IoT1ClickProjects.ListPlacementsRequest: AWSPaginateToken { - @inlinable - public func usingPaginationToken(_ token: String) -> IoT1ClickProjects.ListPlacementsRequest { - return .init( - maxResults: self.maxResults, - nextToken: token, - projectName: self.projectName - ) - } -} - -extension IoT1ClickProjects.ListProjectsRequest: AWSPaginateToken { - @inlinable - public func usingPaginationToken(_ token: String) -> IoT1ClickProjects.ListProjectsRequest { - return .init( - maxResults: self.maxResults, - nextToken: token - ) - } -} diff --git a/Sources/Soto/Services/IoT1ClickProjects/IoT1ClickProjects_shapes.swift b/Sources/Soto/Services/IoT1ClickProjects/IoT1ClickProjects_shapes.swift deleted file mode 100644 index 094b0df02c..0000000000 --- a/Sources/Soto/Services/IoT1ClickProjects/IoT1ClickProjects_shapes.swift +++ /dev/null @@ -1,939 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// This source file is part of the Soto for AWS open source project -// -// Copyright (c) 2017-2024 the Soto project authors -// Licensed under Apache License v2.0 -// -// See LICENSE.txt for license information -// See CONTRIBUTORS.txt for the list of Soto project authors -// -// SPDX-License-Identifier: Apache-2.0 -// -//===----------------------------------------------------------------------===// - -// THIS FILE IS AUTOMATICALLY GENERATED by https://github.com/soto-project/soto-codegenerator. -// DO NOT EDIT. - -#if os(Linux) && compiler(<5.10) -// swift-corelibs-foundation hasn't been updated with Sendable conformances -@preconcurrency import Foundation -#else -import Foundation -#endif -@_spi(SotoInternal) import SotoCore - -extension IoT1ClickProjects { - // MARK: Enums - - // MARK: Shapes - - public struct AssociateDeviceWithPlacementRequest: AWSEncodableShape { - /// The ID of the physical device to be associated with the given placement in the project. Note that a mandatory 4 character prefix is required for all deviceId values. - public let deviceId: String - /// The device template name to associate with the device ID. - public let deviceTemplateName: String - /// The name of the placement in which to associate the device. - public let placementName: String - /// The name of the project containing the placement in which to associate the device. - public let projectName: String - - @inlinable - public init(deviceId: String, deviceTemplateName: String, placementName: String, projectName: String) { - self.deviceId = deviceId - self.deviceTemplateName = deviceTemplateName - self.placementName = placementName - self.projectName = projectName - } - - public func encode(to encoder: Encoder) throws { - let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer - var container = encoder.container(keyedBy: CodingKeys.self) - try container.encode(self.deviceId, forKey: .deviceId) - request.encodePath(self.deviceTemplateName, key: "deviceTemplateName") - request.encodePath(self.placementName, key: "placementName") - request.encodePath(self.projectName, key: "projectName") - } - - public func validate(name: String) throws { - try self.validate(self.deviceId, name: "deviceId", parent: name, max: 32) - try self.validate(self.deviceId, name: "deviceId", parent: name, min: 1) - try self.validate(self.deviceTemplateName, name: "deviceTemplateName", parent: name, max: 128) - try self.validate(self.deviceTemplateName, name: "deviceTemplateName", parent: name, min: 1) - try self.validate(self.deviceTemplateName, name: "deviceTemplateName", parent: name, pattern: "^[a-zA-Z0-9_-]+$") - try self.validate(self.placementName, name: "placementName", parent: name, max: 128) - try self.validate(self.placementName, name: "placementName", parent: name, min: 1) - try self.validate(self.placementName, name: "placementName", parent: name, pattern: "^[a-zA-Z0-9_-]+$") - try self.validate(self.projectName, name: "projectName", parent: name, max: 128) - try self.validate(self.projectName, name: "projectName", parent: name, min: 1) - try self.validate(self.projectName, name: "projectName", parent: name, pattern: "^[0-9A-Za-z_-]+$") - } - - private enum CodingKeys: String, CodingKey { - case deviceId = "deviceId" - } - } - - public struct AssociateDeviceWithPlacementResponse: AWSDecodableShape { - public init() {} - } - - public struct CreatePlacementRequest: AWSEncodableShape { - /// Optional user-defined key/value pairs providing contextual data (such as location or function) for the placement. - public let attributes: [String: String]? - /// The name of the placement to be created. - public let placementName: String - /// The name of the project in which to create the placement. - public let projectName: String - - @inlinable - public init(attributes: [String: String]? = nil, placementName: String, projectName: String) { - self.attributes = attributes - self.placementName = placementName - self.projectName = projectName - } - - public func encode(to encoder: Encoder) throws { - let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer - var container = encoder.container(keyedBy: CodingKeys.self) - try container.encodeIfPresent(self.attributes, forKey: .attributes) - try container.encode(self.placementName, forKey: .placementName) - request.encodePath(self.projectName, key: "projectName") - } - - public func validate(name: String) throws { - try self.attributes?.forEach { - try validate($0.key, name: "attributes.key", parent: name, max: 128) - try validate($0.key, name: "attributes.key", parent: name, min: 1) - try validate($0.value, name: "attributes[\"\($0.key)\"]", parent: name, max: 800) - } - try self.validate(self.placementName, name: "placementName", parent: name, max: 128) - try self.validate(self.placementName, name: "placementName", parent: name, min: 1) - try self.validate(self.placementName, name: "placementName", parent: name, pattern: "^[a-zA-Z0-9_-]+$") - try self.validate(self.projectName, name: "projectName", parent: name, max: 128) - try self.validate(self.projectName, name: "projectName", parent: name, min: 1) - try self.validate(self.projectName, name: "projectName", parent: name, pattern: "^[0-9A-Za-z_-]+$") - } - - private enum CodingKeys: String, CodingKey { - case attributes = "attributes" - case placementName = "placementName" - } - } - - public struct CreatePlacementResponse: AWSDecodableShape { - public init() {} - } - - public struct CreateProjectRequest: AWSEncodableShape { - /// An optional description for the project. - public let description: String? - /// The schema defining the placement to be created. A placement template defines placement default attributes and device templates. You cannot add or remove device templates after the project has been created. However, you can update callbackOverrides for the device templates using the UpdateProject API. - public let placementTemplate: PlacementTemplate? - /// The name of the project to create. - public let projectName: String - /// Optional tags (metadata key/value pairs) to be associated with the project. For example, { {"key1": "value1", "key2": "value2"} }. For more information, see AWS Tagging Strategies. - public let tags: [String: String]? - - @inlinable - public init(description: String? = nil, placementTemplate: PlacementTemplate? = nil, projectName: String, tags: [String: String]? = nil) { - self.description = description - self.placementTemplate = placementTemplate - self.projectName = projectName - self.tags = tags - } - - public func validate(name: String) throws { - try self.validate(self.description, name: "description", parent: name, max: 500) - try self.placementTemplate?.validate(name: "\(name).placementTemplate") - try self.validate(self.projectName, name: "projectName", parent: name, max: 128) - try self.validate(self.projectName, name: "projectName", parent: name, min: 1) - try self.validate(self.projectName, name: "projectName", parent: name, pattern: "^[0-9A-Za-z_-]+$") - try self.tags?.forEach { - try validate($0.key, name: "tags.key", parent: name, max: 128) - try validate($0.key, name: "tags.key", parent: name, min: 1) - try validate($0.key, name: "tags.key", parent: name, pattern: "^(?!aws:)[a-zA-Z+-=._:/]+$") - try validate($0.value, name: "tags[\"\($0.key)\"]", parent: name, max: 256) - } - try self.validate(self.tags, name: "tags", parent: name, max: 50) - try self.validate(self.tags, name: "tags", parent: name, min: 1) - } - - private enum CodingKeys: String, CodingKey { - case description = "description" - case placementTemplate = "placementTemplate" - case projectName = "projectName" - case tags = "tags" - } - } - - public struct CreateProjectResponse: AWSDecodableShape { - public init() {} - } - - public struct DeletePlacementRequest: AWSEncodableShape { - /// The name of the empty placement to delete. - public let placementName: String - /// The project containing the empty placement to delete. - public let projectName: String - - @inlinable - public init(placementName: String, projectName: String) { - self.placementName = placementName - self.projectName = projectName - } - - public func encode(to encoder: Encoder) throws { - let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer - _ = encoder.container(keyedBy: CodingKeys.self) - request.encodePath(self.placementName, key: "placementName") - request.encodePath(self.projectName, key: "projectName") - } - - public func validate(name: String) throws { - try self.validate(self.placementName, name: "placementName", parent: name, max: 128) - try self.validate(self.placementName, name: "placementName", parent: name, min: 1) - try self.validate(self.placementName, name: "placementName", parent: name, pattern: "^[a-zA-Z0-9_-]+$") - try self.validate(self.projectName, name: "projectName", parent: name, max: 128) - try self.validate(self.projectName, name: "projectName", parent: name, min: 1) - try self.validate(self.projectName, name: "projectName", parent: name, pattern: "^[0-9A-Za-z_-]+$") - } - - private enum CodingKeys: CodingKey {} - } - - public struct DeletePlacementResponse: AWSDecodableShape { - public init() {} - } - - public struct DeleteProjectRequest: AWSEncodableShape { - /// The name of the empty project to delete. - public let projectName: String - - @inlinable - public init(projectName: String) { - self.projectName = projectName - } - - public func encode(to encoder: Encoder) throws { - let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer - _ = encoder.container(keyedBy: CodingKeys.self) - request.encodePath(self.projectName, key: "projectName") - } - - public func validate(name: String) throws { - try self.validate(self.projectName, name: "projectName", parent: name, max: 128) - try self.validate(self.projectName, name: "projectName", parent: name, min: 1) - try self.validate(self.projectName, name: "projectName", parent: name, pattern: "^[0-9A-Za-z_-]+$") - } - - private enum CodingKeys: CodingKey {} - } - - public struct DeleteProjectResponse: AWSDecodableShape { - public init() {} - } - - public struct DescribePlacementRequest: AWSEncodableShape { - /// The name of the placement within a project. - public let placementName: String - /// The project containing the placement to be described. - public let projectName: String - - @inlinable - public init(placementName: String, projectName: String) { - self.placementName = placementName - self.projectName = projectName - } - - public func encode(to encoder: Encoder) throws { - let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer - _ = encoder.container(keyedBy: CodingKeys.self) - request.encodePath(self.placementName, key: "placementName") - request.encodePath(self.projectName, key: "projectName") - } - - public func validate(name: String) throws { - try self.validate(self.placementName, name: "placementName", parent: name, max: 128) - try self.validate(self.placementName, name: "placementName", parent: name, min: 1) - try self.validate(self.placementName, name: "placementName", parent: name, pattern: "^[a-zA-Z0-9_-]+$") - try self.validate(self.projectName, name: "projectName", parent: name, max: 128) - try self.validate(self.projectName, name: "projectName", parent: name, min: 1) - try self.validate(self.projectName, name: "projectName", parent: name, pattern: "^[0-9A-Za-z_-]+$") - } - - private enum CodingKeys: CodingKey {} - } - - public struct DescribePlacementResponse: AWSDecodableShape { - /// An object describing the placement. - public let placement: PlacementDescription - - @inlinable - public init(placement: PlacementDescription) { - self.placement = placement - } - - private enum CodingKeys: String, CodingKey { - case placement = "placement" - } - } - - public struct DescribeProjectRequest: AWSEncodableShape { - /// The name of the project to be described. - public let projectName: String - - @inlinable - public init(projectName: String) { - self.projectName = projectName - } - - public func encode(to encoder: Encoder) throws { - let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer - _ = encoder.container(keyedBy: CodingKeys.self) - request.encodePath(self.projectName, key: "projectName") - } - - public func validate(name: String) throws { - try self.validate(self.projectName, name: "projectName", parent: name, max: 128) - try self.validate(self.projectName, name: "projectName", parent: name, min: 1) - try self.validate(self.projectName, name: "projectName", parent: name, pattern: "^[0-9A-Za-z_-]+$") - } - - private enum CodingKeys: CodingKey {} - } - - public struct DescribeProjectResponse: AWSDecodableShape { - /// An object describing the project. - public let project: ProjectDescription - - @inlinable - public init(project: ProjectDescription) { - self.project = project - } - - private enum CodingKeys: String, CodingKey { - case project = "project" - } - } - - public struct DeviceTemplate: AWSEncodableShape & AWSDecodableShape { - /// An optional Lambda function to invoke instead of the default Lambda function provided by the placement template. - public let callbackOverrides: [String: String]? - /// The device type, which currently must be "button". - public let deviceType: String? - - @inlinable - public init(callbackOverrides: [String: String]? = nil, deviceType: String? = nil) { - self.callbackOverrides = callbackOverrides - self.deviceType = deviceType - } - - public func validate(name: String) throws { - try self.callbackOverrides?.forEach { - try validate($0.key, name: "callbackOverrides.key", parent: name, max: 128) - try validate($0.key, name: "callbackOverrides.key", parent: name, min: 1) - try validate($0.value, name: "callbackOverrides[\"\($0.key)\"]", parent: name, max: 200) - } - try self.validate(self.deviceType, name: "deviceType", parent: name, max: 128) - } - - private enum CodingKeys: String, CodingKey { - case callbackOverrides = "callbackOverrides" - case deviceType = "deviceType" - } - } - - public struct DisassociateDeviceFromPlacementRequest: AWSEncodableShape { - /// The device ID that should be removed from the placement. - public let deviceTemplateName: String - /// The name of the placement that the device should be removed from. - public let placementName: String - /// The name of the project that contains the placement. - public let projectName: String - - @inlinable - public init(deviceTemplateName: String, placementName: String, projectName: String) { - self.deviceTemplateName = deviceTemplateName - self.placementName = placementName - self.projectName = projectName - } - - public func encode(to encoder: Encoder) throws { - let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer - _ = encoder.container(keyedBy: CodingKeys.self) - request.encodePath(self.deviceTemplateName, key: "deviceTemplateName") - request.encodePath(self.placementName, key: "placementName") - request.encodePath(self.projectName, key: "projectName") - } - - public func validate(name: String) throws { - try self.validate(self.deviceTemplateName, name: "deviceTemplateName", parent: name, max: 128) - try self.validate(self.deviceTemplateName, name: "deviceTemplateName", parent: name, min: 1) - try self.validate(self.deviceTemplateName, name: "deviceTemplateName", parent: name, pattern: "^[a-zA-Z0-9_-]+$") - try self.validate(self.placementName, name: "placementName", parent: name, max: 128) - try self.validate(self.placementName, name: "placementName", parent: name, min: 1) - try self.validate(self.placementName, name: "placementName", parent: name, pattern: "^[a-zA-Z0-9_-]+$") - try self.validate(self.projectName, name: "projectName", parent: name, max: 128) - try self.validate(self.projectName, name: "projectName", parent: name, min: 1) - try self.validate(self.projectName, name: "projectName", parent: name, pattern: "^[0-9A-Za-z_-]+$") - } - - private enum CodingKeys: CodingKey {} - } - - public struct DisassociateDeviceFromPlacementResponse: AWSDecodableShape { - public init() {} - } - - public struct GetDevicesInPlacementRequest: AWSEncodableShape { - /// The name of the placement to get the devices from. - public let placementName: String - /// The name of the project containing the placement. - public let projectName: String - - @inlinable - public init(placementName: String, projectName: String) { - self.placementName = placementName - self.projectName = projectName - } - - public func encode(to encoder: Encoder) throws { - let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer - _ = encoder.container(keyedBy: CodingKeys.self) - request.encodePath(self.placementName, key: "placementName") - request.encodePath(self.projectName, key: "projectName") - } - - public func validate(name: String) throws { - try self.validate(self.placementName, name: "placementName", parent: name, max: 128) - try self.validate(self.placementName, name: "placementName", parent: name, min: 1) - try self.validate(self.placementName, name: "placementName", parent: name, pattern: "^[a-zA-Z0-9_-]+$") - try self.validate(self.projectName, name: "projectName", parent: name, max: 128) - try self.validate(self.projectName, name: "projectName", parent: name, min: 1) - try self.validate(self.projectName, name: "projectName", parent: name, pattern: "^[0-9A-Za-z_-]+$") - } - - private enum CodingKeys: CodingKey {} - } - - public struct GetDevicesInPlacementResponse: AWSDecodableShape { - /// An object containing the devices (zero or more) within the placement. - public let devices: [String: String] - - @inlinable - public init(devices: [String: String]) { - self.devices = devices - } - - private enum CodingKeys: String, CodingKey { - case devices = "devices" - } - } - - public struct ListPlacementsRequest: AWSEncodableShape { - /// The maximum number of results to return per request. If not set, a default value of 100 is used. - public let maxResults: Int? - /// The token to retrieve the next set of results. - public let nextToken: String? - /// The project containing the placements to be listed. - public let projectName: String - - @inlinable - public init(maxResults: Int? = nil, nextToken: String? = nil, projectName: String) { - self.maxResults = maxResults - self.nextToken = nextToken - self.projectName = projectName - } - - public func encode(to encoder: Encoder) throws { - let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer - _ = encoder.container(keyedBy: CodingKeys.self) - request.encodeQuery(self.maxResults, key: "maxResults") - request.encodeQuery(self.nextToken, key: "nextToken") - request.encodePath(self.projectName, key: "projectName") - } - - public func validate(name: String) throws { - try self.validate(self.maxResults, name: "maxResults", parent: name, max: 250) - try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) - try self.validate(self.nextToken, name: "nextToken", parent: name, max: 1024) - try self.validate(self.nextToken, name: "nextToken", parent: name, min: 1) - try self.validate(self.projectName, name: "projectName", parent: name, max: 128) - try self.validate(self.projectName, name: "projectName", parent: name, min: 1) - try self.validate(self.projectName, name: "projectName", parent: name, pattern: "^[0-9A-Za-z_-]+$") - } - - private enum CodingKeys: CodingKey {} - } - - public struct ListPlacementsResponse: AWSDecodableShape { - /// The token used to retrieve the next set of results - will be effectively empty if there are no further results. - public let nextToken: String? - /// An object listing the requested placements. - public let placements: [PlacementSummary] - - @inlinable - public init(nextToken: String? = nil, placements: [PlacementSummary]) { - self.nextToken = nextToken - self.placements = placements - } - - private enum CodingKeys: String, CodingKey { - case nextToken = "nextToken" - case placements = "placements" - } - } - - public struct ListProjectsRequest: AWSEncodableShape { - /// The maximum number of results to return per request. If not set, a default value of 100 is used. - public let maxResults: Int? - /// The token to retrieve the next set of results. - public let nextToken: String? - - @inlinable - public init(maxResults: Int? = nil, nextToken: String? = nil) { - self.maxResults = maxResults - self.nextToken = nextToken - } - - public func encode(to encoder: Encoder) throws { - let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer - _ = encoder.container(keyedBy: CodingKeys.self) - request.encodeQuery(self.maxResults, key: "maxResults") - request.encodeQuery(self.nextToken, key: "nextToken") - } - - public func validate(name: String) throws { - try self.validate(self.maxResults, name: "maxResults", parent: name, max: 250) - try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) - try self.validate(self.nextToken, name: "nextToken", parent: name, max: 1024) - try self.validate(self.nextToken, name: "nextToken", parent: name, min: 1) - } - - private enum CodingKeys: CodingKey {} - } - - public struct ListProjectsResponse: AWSDecodableShape { - /// The token used to retrieve the next set of results - will be effectively empty if there are no further results. - public let nextToken: String? - /// An object containing the list of projects. - public let projects: [ProjectSummary] - - @inlinable - public init(nextToken: String? = nil, projects: [ProjectSummary]) { - self.nextToken = nextToken - self.projects = projects - } - - private enum CodingKeys: String, CodingKey { - case nextToken = "nextToken" - case projects = "projects" - } - } - - public struct ListTagsForResourceRequest: AWSEncodableShape { - /// The ARN of the resource whose tags you want to list. - public let resourceArn: String - - @inlinable - public init(resourceArn: String) { - self.resourceArn = resourceArn - } - - public func encode(to encoder: Encoder) throws { - let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer - _ = encoder.container(keyedBy: CodingKeys.self) - request.encodePath(self.resourceArn, key: "resourceArn") - } - - public func validate(name: String) throws { - try self.validate(self.resourceArn, name: "resourceArn", parent: name, pattern: "^arn:aws:iot1click:[A-Za-z0-9_/.-]{0,63}:\\d+:projects/[0-9A-Za-z_-]{1,128}$") - } - - private enum CodingKeys: CodingKey {} - } - - public struct ListTagsForResourceResponse: AWSDecodableShape { - /// The tags (metadata key/value pairs) which you have assigned to the resource. - public let tags: [String: String]? - - @inlinable - public init(tags: [String: String]? = nil) { - self.tags = tags - } - - private enum CodingKeys: String, CodingKey { - case tags = "tags" - } - } - - public struct PlacementDescription: AWSDecodableShape { - /// The user-defined attributes associated with the placement. - public let attributes: [String: String] - /// The date when the placement was initially created, in UNIX epoch time format. - public let createdDate: Date - /// The name of the placement. - public let placementName: String - /// The name of the project containing the placement. - public let projectName: String - /// The date when the placement was last updated, in UNIX epoch time format. If the placement was not updated, then createdDate and updatedDate are the same. - public let updatedDate: Date - - @inlinable - public init(attributes: [String: String], createdDate: Date, placementName: String, projectName: String, updatedDate: Date) { - self.attributes = attributes - self.createdDate = createdDate - self.placementName = placementName - self.projectName = projectName - self.updatedDate = updatedDate - } - - private enum CodingKeys: String, CodingKey { - case attributes = "attributes" - case createdDate = "createdDate" - case placementName = "placementName" - case projectName = "projectName" - case updatedDate = "updatedDate" - } - } - - public struct PlacementSummary: AWSDecodableShape { - /// The date when the placement was originally created, in UNIX epoch time format. - public let createdDate: Date - /// The name of the placement being summarized. - public let placementName: String - /// The name of the project containing the placement. - public let projectName: String - /// The date when the placement was last updated, in UNIX epoch time format. If the placement was not updated, then createdDate and updatedDate are the same. - public let updatedDate: Date - - @inlinable - public init(createdDate: Date, placementName: String, projectName: String, updatedDate: Date) { - self.createdDate = createdDate - self.placementName = placementName - self.projectName = projectName - self.updatedDate = updatedDate - } - - private enum CodingKeys: String, CodingKey { - case createdDate = "createdDate" - case placementName = "placementName" - case projectName = "projectName" - case updatedDate = "updatedDate" - } - } - - public struct PlacementTemplate: AWSEncodableShape & AWSDecodableShape { - /// The default attributes (key/value pairs) to be applied to all placements using this template. - public let defaultAttributes: [String: String]? - /// An object specifying the DeviceTemplate for all placements using this (PlacementTemplate) template. - public let deviceTemplates: [String: DeviceTemplate]? - - @inlinable - public init(defaultAttributes: [String: String]? = nil, deviceTemplates: [String: DeviceTemplate]? = nil) { - self.defaultAttributes = defaultAttributes - self.deviceTemplates = deviceTemplates - } - - public func validate(name: String) throws { - try self.defaultAttributes?.forEach { - try validate($0.key, name: "defaultAttributes.key", parent: name, max: 128) - try validate($0.key, name: "defaultAttributes.key", parent: name, min: 1) - try validate($0.value, name: "defaultAttributes[\"\($0.key)\"]", parent: name, max: 800) - } - try self.deviceTemplates?.forEach { - try validate($0.key, name: "deviceTemplates.key", parent: name, max: 128) - try validate($0.key, name: "deviceTemplates.key", parent: name, min: 1) - try validate($0.key, name: "deviceTemplates.key", parent: name, pattern: "^[a-zA-Z0-9_-]+$") - try $0.value.validate(name: "\(name).deviceTemplates[\"\($0.key)\"]") - } - } - - private enum CodingKeys: String, CodingKey { - case defaultAttributes = "defaultAttributes" - case deviceTemplates = "deviceTemplates" - } - } - - public struct ProjectDescription: AWSDecodableShape { - /// The ARN of the project. - public let arn: String? - /// The date when the project was originally created, in UNIX epoch time format. - public let createdDate: Date - /// The description of the project. - public let description: String? - /// An object describing the project's placement specifications. - public let placementTemplate: PlacementTemplate? - /// The name of the project for which to obtain information from. - public let projectName: String - /// The tags (metadata key/value pairs) associated with the project. - public let tags: [String: String]? - /// The date when the project was last updated, in UNIX epoch time format. If the project was not updated, then createdDate and updatedDate are the same. - public let updatedDate: Date - - @inlinable - public init(arn: String? = nil, createdDate: Date, description: String? = nil, placementTemplate: PlacementTemplate? = nil, projectName: String, tags: [String: String]? = nil, updatedDate: Date) { - self.arn = arn - self.createdDate = createdDate - self.description = description - self.placementTemplate = placementTemplate - self.projectName = projectName - self.tags = tags - self.updatedDate = updatedDate - } - - private enum CodingKeys: String, CodingKey { - case arn = "arn" - case createdDate = "createdDate" - case description = "description" - case placementTemplate = "placementTemplate" - case projectName = "projectName" - case tags = "tags" - case updatedDate = "updatedDate" - } - } - - public struct ProjectSummary: AWSDecodableShape { - /// The ARN of the project. - public let arn: String? - /// The date when the project was originally created, in UNIX epoch time format. - public let createdDate: Date - /// The name of the project being summarized. - public let projectName: String - /// The tags (metadata key/value pairs) associated with the project. - public let tags: [String: String]? - /// The date when the project was last updated, in UNIX epoch time format. If the project was not updated, then createdDate and updatedDate are the same. - public let updatedDate: Date - - @inlinable - public init(arn: String? = nil, createdDate: Date, projectName: String, tags: [String: String]? = nil, updatedDate: Date) { - self.arn = arn - self.createdDate = createdDate - self.projectName = projectName - self.tags = tags - self.updatedDate = updatedDate - } - - private enum CodingKeys: String, CodingKey { - case arn = "arn" - case createdDate = "createdDate" - case projectName = "projectName" - case tags = "tags" - case updatedDate = "updatedDate" - } - } - - public struct TagResourceRequest: AWSEncodableShape { - /// The ARN of the resouce for which tag(s) should be added or modified. - public let resourceArn: String - /// The new or modifying tag(s) for the resource. See AWS IoT 1-Click Service Limits for the maximum number of tags allowed per resource. - public let tags: [String: String] - - @inlinable - public init(resourceArn: String, tags: [String: String]) { - self.resourceArn = resourceArn - self.tags = tags - } - - public func encode(to encoder: Encoder) throws { - let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer - var container = encoder.container(keyedBy: CodingKeys.self) - request.encodePath(self.resourceArn, key: "resourceArn") - try container.encode(self.tags, forKey: .tags) - } - - public func validate(name: String) throws { - try self.validate(self.resourceArn, name: "resourceArn", parent: name, pattern: "^arn:aws:iot1click:[A-Za-z0-9_/.-]{0,63}:\\d+:projects/[0-9A-Za-z_-]{1,128}$") - try self.tags.forEach { - try validate($0.key, name: "tags.key", parent: name, max: 128) - try validate($0.key, name: "tags.key", parent: name, min: 1) - try validate($0.key, name: "tags.key", parent: name, pattern: "^(?!aws:)[a-zA-Z+-=._:/]+$") - try validate($0.value, name: "tags[\"\($0.key)\"]", parent: name, max: 256) - } - try self.validate(self.tags, name: "tags", parent: name, max: 50) - try self.validate(self.tags, name: "tags", parent: name, min: 1) - } - - private enum CodingKeys: String, CodingKey { - case tags = "tags" - } - } - - public struct TagResourceResponse: AWSDecodableShape { - public init() {} - } - - public struct UntagResourceRequest: AWSEncodableShape { - /// The ARN of the resource whose tag you want to remove. - public let resourceArn: String - /// The keys of those tags which you want to remove. - public let tagKeys: [String] - - @inlinable - public init(resourceArn: String, tagKeys: [String]) { - self.resourceArn = resourceArn - self.tagKeys = tagKeys - } - - public func encode(to encoder: Encoder) throws { - let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer - _ = encoder.container(keyedBy: CodingKeys.self) - request.encodePath(self.resourceArn, key: "resourceArn") - request.encodeQuery(self.tagKeys, key: "tagKeys") - } - - public func validate(name: String) throws { - try self.validate(self.resourceArn, name: "resourceArn", parent: name, pattern: "^arn:aws:iot1click:[A-Za-z0-9_/.-]{0,63}:\\d+:projects/[0-9A-Za-z_-]{1,128}$") - try self.tagKeys.forEach { - try validate($0, name: "tagKeys[]", parent: name, max: 128) - try validate($0, name: "tagKeys[]", parent: name, min: 1) - try validate($0, name: "tagKeys[]", parent: name, pattern: "^(?!aws:)[a-zA-Z+-=._:/]+$") - } - try self.validate(self.tagKeys, name: "tagKeys", parent: name, max: 50) - try self.validate(self.tagKeys, name: "tagKeys", parent: name, min: 1) - } - - private enum CodingKeys: CodingKey {} - } - - public struct UntagResourceResponse: AWSDecodableShape { - public init() {} - } - - public struct UpdatePlacementRequest: AWSEncodableShape { - /// The user-defined object of attributes used to update the placement. The maximum number of key/value pairs is 50. - public let attributes: [String: String]? - /// The name of the placement to update. - public let placementName: String - /// The name of the project containing the placement to be updated. - public let projectName: String - - @inlinable - public init(attributes: [String: String]? = nil, placementName: String, projectName: String) { - self.attributes = attributes - self.placementName = placementName - self.projectName = projectName - } - - public func encode(to encoder: Encoder) throws { - let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer - var container = encoder.container(keyedBy: CodingKeys.self) - try container.encodeIfPresent(self.attributes, forKey: .attributes) - request.encodePath(self.placementName, key: "placementName") - request.encodePath(self.projectName, key: "projectName") - } - - public func validate(name: String) throws { - try self.attributes?.forEach { - try validate($0.key, name: "attributes.key", parent: name, max: 128) - try validate($0.key, name: "attributes.key", parent: name, min: 1) - try validate($0.value, name: "attributes[\"\($0.key)\"]", parent: name, max: 800) - } - try self.validate(self.placementName, name: "placementName", parent: name, max: 128) - try self.validate(self.placementName, name: "placementName", parent: name, min: 1) - try self.validate(self.placementName, name: "placementName", parent: name, pattern: "^[a-zA-Z0-9_-]+$") - try self.validate(self.projectName, name: "projectName", parent: name, max: 128) - try self.validate(self.projectName, name: "projectName", parent: name, min: 1) - try self.validate(self.projectName, name: "projectName", parent: name, pattern: "^[0-9A-Za-z_-]+$") - } - - private enum CodingKeys: String, CodingKey { - case attributes = "attributes" - } - } - - public struct UpdatePlacementResponse: AWSDecodableShape { - public init() {} - } - - public struct UpdateProjectRequest: AWSEncodableShape { - /// An optional user-defined description for the project. - public let description: String? - /// An object defining the project update. Once a project has been created, you cannot add device template names to the project. However, for a given placementTemplate, you can update the associated callbackOverrides for the device definition using this API. - public let placementTemplate: PlacementTemplate? - /// The name of the project to be updated. - public let projectName: String - - @inlinable - public init(description: String? = nil, placementTemplate: PlacementTemplate? = nil, projectName: String) { - self.description = description - self.placementTemplate = placementTemplate - self.projectName = projectName - } - - public func encode(to encoder: Encoder) throws { - let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer - var container = encoder.container(keyedBy: CodingKeys.self) - try container.encodeIfPresent(self.description, forKey: .description) - try container.encodeIfPresent(self.placementTemplate, forKey: .placementTemplate) - request.encodePath(self.projectName, key: "projectName") - } - - public func validate(name: String) throws { - try self.validate(self.description, name: "description", parent: name, max: 500) - try self.placementTemplate?.validate(name: "\(name).placementTemplate") - try self.validate(self.projectName, name: "projectName", parent: name, max: 128) - try self.validate(self.projectName, name: "projectName", parent: name, min: 1) - try self.validate(self.projectName, name: "projectName", parent: name, pattern: "^[0-9A-Za-z_-]+$") - } - - private enum CodingKeys: String, CodingKey { - case description = "description" - case placementTemplate = "placementTemplate" - } - } - - public struct UpdateProjectResponse: AWSDecodableShape { - public init() {} - } -} - -// MARK: - Errors - -/// Error enum for IoT1ClickProjects -public struct IoT1ClickProjectsErrorType: AWSErrorType { - enum Code: String { - case internalFailureException = "InternalFailureException" - case invalidRequestException = "InvalidRequestException" - case resourceConflictException = "ResourceConflictException" - case resourceNotFoundException = "ResourceNotFoundException" - case tooManyRequestsException = "TooManyRequestsException" - } - - private let error: Code - public let context: AWSErrorContext? - - /// initialize IoT1ClickProjects - public init?(errorCode: String, context: AWSErrorContext) { - guard let error = Code(rawValue: errorCode) else { return nil } - self.error = error - self.context = context - } - - internal init(_ error: Code) { - self.error = error - self.context = nil - } - - /// return error code string - public var errorCode: String { self.error.rawValue } - - public static var internalFailureException: Self { .init(.internalFailureException) } - public static var invalidRequestException: Self { .init(.invalidRequestException) } - public static var resourceConflictException: Self { .init(.resourceConflictException) } - public static var resourceNotFoundException: Self { .init(.resourceNotFoundException) } - public static var tooManyRequestsException: Self { .init(.tooManyRequestsException) } -} - -extension IoT1ClickProjectsErrorType: Equatable { - public static func == (lhs: IoT1ClickProjectsErrorType, rhs: IoT1ClickProjectsErrorType) -> Bool { - lhs.error == rhs.error - } -} - -extension IoT1ClickProjectsErrorType: CustomStringConvertible { - public var description: String { - return "\(self.error.rawValue): \(self.message ?? "")" - } -} diff --git a/Sources/Soto/Services/IoTFleetWise/IoTFleetWise_api.swift b/Sources/Soto/Services/IoTFleetWise/IoTFleetWise_api.swift index 542f089439..a86eff398d 100644 --- a/Sources/Soto/Services/IoTFleetWise/IoTFleetWise_api.swift +++ b/Sources/Soto/Services/IoTFleetWise/IoTFleetWise_api.swift @@ -66,6 +66,7 @@ public struct IoTFleetWise: AWSService { serviceProtocol: .json(version: "1.0"), apiVersion: "2021-06-17", endpoint: endpoint, + variantEndpoints: Self.variantEndpoints, errorType: IoTFleetWiseErrorType.self, middleware: middleware, timeout: timeout, @@ -77,6 +78,14 @@ public struct IoTFleetWise: AWSService { + /// FIPS and dualstack endpoints + static var variantEndpoints: [EndpointVariantType: AWSServiceConfig.EndpointVariant] {[ + [.dualstack]: .init(endpoints: [ + "ap-south-1": "iotfleetwise.ap-south-1.api.aws", + "eu-central-1": "iotfleetwise.eu-central-1.api.aws", + "us-east-1": "iotfleetwise.us-east-1.api.aws" + ]) + ]} // MARK: API Calls diff --git a/Sources/Soto/Services/IoTSecureTunneling/IoTSecureTunneling_api.swift b/Sources/Soto/Services/IoTSecureTunneling/IoTSecureTunneling_api.swift index aff7bdb4be..df19be639b 100644 --- a/Sources/Soto/Services/IoTSecureTunneling/IoTSecureTunneling_api.swift +++ b/Sources/Soto/Services/IoTSecureTunneling/IoTSecureTunneling_api.swift @@ -83,22 +83,42 @@ public struct IoTSecureTunneling: AWSService { /// FIPS and dualstack endpoints static var variantEndpoints: [EndpointVariantType: AWSServiceConfig.EndpointVariant] {[ + [.dualstack]: .init(endpoints: [ + "ap-east-1": "api.iot-tunneling.ap-east-1.api.aws", + "ap-northeast-1": "api.iot-tunneling.ap-northeast-1.api.aws", + "ap-northeast-2": "api.iot-tunneling.ap-northeast-2.api.aws", + "ap-south-1": "api.iot-tunneling.ap-south-1.api.aws", + "ap-southeast-1": "api.iot-tunneling.ap-southeast-1.api.aws", + "ap-southeast-2": "api.iot-tunneling.ap-southeast-2.api.aws", + "ca-central-1": "api.iot-tunneling.ca-central-1.api.aws", + "cn-north-1": "api.iot-tunneling.cn-north-1.api.amazonwebservices.com.cn", + "cn-northwest-1": "api.iot-tunneling.cn-northwest-1.api.amazonwebservices.com.cn", + "eu-central-1": "api.iot-tunneling.eu-central-1.api.aws", + "eu-north-1": "api.iot-tunneling.eu-north-1.api.aws", + "eu-west-1": "api.iot-tunneling.eu-west-1.api.aws", + "eu-west-2": "api.iot-tunneling.eu-west-2.api.aws", + "eu-west-3": "api.iot-tunneling.eu-west-3.api.aws", + "me-central-1": "api.iot-tunneling.me-central-1.api.aws", + "me-south-1": "api.iot-tunneling.me-south-1.api.aws", + "sa-east-1": "api.iot-tunneling.sa-east-1.api.aws", + "us-east-1": "api.iot-tunneling.us-east-1.api.aws", + "us-east-2": "api.iot-tunneling.us-east-2.api.aws", + "us-gov-east-1": "api.iot-tunneling.us-gov-east-1.api.aws", + "us-gov-west-1": "api.iot-tunneling.us-gov-west-1.api.aws", + "us-west-1": "api.iot-tunneling.us-west-1.api.aws", + "us-west-2": "api.iot-tunneling.us-west-2.api.aws" + ]), + [.dualstack, .fips]: .init(endpoints: [ + "ca-central-1": "api.iot-tunneling-fips.ca-central-1.api.aws", + "us-east-1": "api.iot-tunneling-fips.us-east-1.api.aws", + "us-east-2": "api.iot-tunneling-fips.us-east-2.api.aws", + "us-gov-east-1": "api.iot-tunneling-fips.us-gov-east-1.api.aws", + "us-gov-west-1": "api.iot-tunneling-fips.us-gov-west-1.api.aws", + "us-west-1": "api.iot-tunneling-fips.us-west-1.api.aws", + "us-west-2": "api.iot-tunneling-fips.us-west-2.api.aws" + ]), [.fips]: .init(endpoints: [ - "ap-east-1": "api.tunneling.iot-fips.ap-east-1.amazonaws.com", - "ap-northeast-1": "api.tunneling.iot-fips.ap-northeast-1.amazonaws.com", - "ap-northeast-2": "api.tunneling.iot-fips.ap-northeast-2.amazonaws.com", - "ap-south-1": "api.tunneling.iot-fips.ap-south-1.amazonaws.com", - "ap-southeast-1": "api.tunneling.iot-fips.ap-southeast-1.amazonaws.com", - "ap-southeast-2": "api.tunneling.iot-fips.ap-southeast-2.amazonaws.com", "ca-central-1": "api.tunneling.iot-fips.ca-central-1.amazonaws.com", - "eu-central-1": "api.tunneling.iot-fips.eu-central-1.amazonaws.com", - "eu-north-1": "api.tunneling.iot-fips.eu-north-1.amazonaws.com", - "eu-west-1": "api.tunneling.iot-fips.eu-west-1.amazonaws.com", - "eu-west-2": "api.tunneling.iot-fips.eu-west-2.amazonaws.com", - "eu-west-3": "api.tunneling.iot-fips.eu-west-3.amazonaws.com", - "me-central-1": "api.tunneling.iot-fips.me-central-1.amazonaws.com", - "me-south-1": "api.tunneling.iot-fips.me-south-1.amazonaws.com", - "sa-east-1": "api.tunneling.iot-fips.sa-east-1.amazonaws.com", "us-east-1": "api.tunneling.iot-fips.us-east-1.amazonaws.com", "us-east-2": "api.tunneling.iot-fips.us-east-2.amazonaws.com", "us-gov-east-1": "api.tunneling.iot-fips.us-gov-east-1.amazonaws.com", diff --git a/Sources/Soto/Services/IoTSiteWise/IoTSiteWise_api.swift b/Sources/Soto/Services/IoTSiteWise/IoTSiteWise_api.swift index c3ffc77d17..4bbfdead2b 100644 --- a/Sources/Soto/Services/IoTSiteWise/IoTSiteWise_api.swift +++ b/Sources/Soto/Services/IoTSiteWise/IoTSiteWise_api.swift @@ -362,14 +362,17 @@ public struct IoTSiteWise: AWSService { /// Sends a list of asset property values to IoT SiteWise. Each value is a timestamp-quality-value (TQV) data point. For more information, see Ingesting data using the API in the IoT SiteWise User Guide. To identify an asset property, you must specify one of the following: The assetId and propertyId of an asset property. A propertyAlias, which is a data stream alias (for example, /company/windfarm/3/turbine/7/temperature). To define an asset property's alias, see UpdateAssetProperty. With respect to Unix epoch time, IoT SiteWise accepts only TQVs that have a timestamp of no more than 7 days in the past and no more than 10 minutes in the future. IoT SiteWise rejects timestamps outside of the inclusive range of [-7 days, +10 minutes] and returns a TimestampOutOfRangeException error. For each asset property, IoT SiteWise overwrites TQVs with duplicate timestamps unless the newer TQV has a different quality. For example, if you store a TQV {T1, GOOD, V1}, then storing {T1, GOOD, V2} replaces the existing TQV. IoT SiteWise authorizes access to each BatchPutAssetPropertyValue entry individually. For more information, see BatchPutAssetPropertyValue authorization in the IoT SiteWise User Guide. /// /// Parameters: + /// - enablePartialEntryProcessing: This setting enables partial ingestion at entry-level. If set to true, we ingest all TQVs not resulting in an error. If set to false, an invalid TQV fails ingestion of the entire entry that contains it. /// - entries: The list of asset property value entries for the batch put request. You can specify up to 10 entries per request. /// - logger: Logger use during operation @inlinable public func batchPutAssetPropertyValue( + enablePartialEntryProcessing: Bool? = nil, entries: [PutAssetPropertyValueEntry], logger: Logger = AWSClient.loggingDisabled ) async throws -> BatchPutAssetPropertyValueResponse { let input = BatchPutAssetPropertyValueRequest( + enablePartialEntryProcessing: enablePartialEntryProcessing, entries: entries ) return try await self.batchPutAssetPropertyValue(input, logger: logger) @@ -2990,6 +2993,7 @@ public struct IoTSiteWise: AWSService { /// Configures storage settings for IoT SiteWise. /// /// Parameters: + /// - disallowIngestNullNaN: Describes the configuration for ingesting NULL and NaN data. By default the feature is allowed. The feature is disallowed if the value is true. /// - disassociatedDataStorage: Contains the storage configuration for time series (data streams) that aren't associated with asset properties. The disassociatedDataStorage can be one of the following values: ENABLED – IoT SiteWise accepts time series that aren't associated with asset properties. After the disassociatedDataStorage is enabled, you can't disable it. DISABLED – IoT SiteWise doesn't accept time series (data streams) that aren't associated with asset properties. For more information, see Data streams in the IoT SiteWise User Guide. /// - multiLayerStorage: Identifies a storage destination. If you specified MULTI_LAYER_STORAGE for the storage type, you must specify a MultiLayerStorage object. /// - retentionPeriod: @@ -2999,6 +3003,7 @@ public struct IoTSiteWise: AWSService { /// - logger: Logger use during operation @inlinable public func putStorageConfiguration( + disallowIngestNullNaN: Bool? = nil, disassociatedDataStorage: DisassociatedDataStorageState? = nil, multiLayerStorage: MultiLayerStorage? = nil, retentionPeriod: RetentionPeriod? = nil, @@ -3008,6 +3013,7 @@ public struct IoTSiteWise: AWSService { logger: Logger = AWSClient.loggingDisabled ) async throws -> PutStorageConfigurationResponse { let input = PutStorageConfigurationRequest( + disallowIngestNullNaN: disallowIngestNullNaN, disassociatedDataStorage: disassociatedDataStorage, multiLayerStorage: multiLayerStorage, retentionPeriod: retentionPeriod, diff --git a/Sources/Soto/Services/IoTSiteWise/IoTSiteWise_shapes.swift b/Sources/Soto/Services/IoTSiteWise/IoTSiteWise_shapes.swift index 4460d5c2c1..7c0ffc1dd2 100644 --- a/Sources/Soto/Services/IoTSiteWise/IoTSiteWise_shapes.swift +++ b/Sources/Soto/Services/IoTSiteWise/IoTSiteWise_shapes.swift @@ -321,6 +321,15 @@ extension IoTSiteWise { public var description: String { return self.rawValue } } + public enum RawValueType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case boolean = "B" + case double = "D" + case integer = "I" + case string = "S" + case unknown = "U" + public var description: String { return self.rawValue } + } + public enum ResourceType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case portal = "PORTAL" case project = "PROJECT" @@ -2397,11 +2406,14 @@ extension IoTSiteWise { } public struct BatchPutAssetPropertyValueRequest: AWSEncodableShape { + /// This setting enables partial ingestion at entry-level. If set to true, we ingest all TQVs not resulting in an error. If set to false, an invalid TQV fails ingestion of the entire entry that contains it. + public let enablePartialEntryProcessing: Bool? /// The list of asset property value entries for the batch put request. You can specify up to 10 entries per request. public let entries: [PutAssetPropertyValueEntry] @inlinable - public init(entries: [PutAssetPropertyValueEntry]) { + public init(enablePartialEntryProcessing: Bool? = nil, entries: [PutAssetPropertyValueEntry]) { + self.enablePartialEntryProcessing = enablePartialEntryProcessing self.entries = entries } @@ -2412,6 +2424,7 @@ extension IoTSiteWise { } private enum CodingKeys: String, CodingKey { + case enablePartialEntryProcessing = "enablePartialEntryProcessing" case entries = "entries" } } @@ -5235,6 +5248,8 @@ extension IoTSiteWise { public struct DescribeStorageConfigurationResponse: AWSDecodableShape { public let configurationStatus: ConfigurationStatus + /// Describes the configuration for ingesting NULL and NaN data. By default the feature is allowed. The feature is disallowed if the value is true. + public let disallowIngestNullNaN: Bool? /// Contains the storage configuration for time series (data streams) that aren't associated with asset properties. The disassociatedDataStorage can be one of the following values: ENABLED – IoT SiteWise accepts time series that aren't associated with asset properties. After the disassociatedDataStorage is enabled, you can't disable it. DISABLED – IoT SiteWise doesn't accept time series (data streams) that aren't associated with asset properties. For more information, see Data streams in the IoT SiteWise User Guide. public let disassociatedDataStorage: DisassociatedDataStorageState? /// The date the storage configuration was last updated, in Unix epoch time. @@ -5251,8 +5266,9 @@ extension IoTSiteWise { public let warmTierRetentionPeriod: WarmTierRetentionPeriod? @inlinable - public init(configurationStatus: ConfigurationStatus, disassociatedDataStorage: DisassociatedDataStorageState? = nil, lastUpdateDate: Date? = nil, multiLayerStorage: MultiLayerStorage? = nil, retentionPeriod: RetentionPeriod? = nil, storageType: StorageType, warmTier: WarmTierState? = nil, warmTierRetentionPeriod: WarmTierRetentionPeriod? = nil) { + public init(configurationStatus: ConfigurationStatus, disallowIngestNullNaN: Bool? = nil, disassociatedDataStorage: DisassociatedDataStorageState? = nil, lastUpdateDate: Date? = nil, multiLayerStorage: MultiLayerStorage? = nil, retentionPeriod: RetentionPeriod? = nil, storageType: StorageType, warmTier: WarmTierState? = nil, warmTierRetentionPeriod: WarmTierRetentionPeriod? = nil) { self.configurationStatus = configurationStatus + self.disallowIngestNullNaN = disallowIngestNullNaN self.disassociatedDataStorage = disassociatedDataStorage self.lastUpdateDate = lastUpdateDate self.multiLayerStorage = multiLayerStorage @@ -5264,6 +5280,7 @@ extension IoTSiteWise { private enum CodingKeys: String, CodingKey { case configurationStatus = "configurationStatus" + case disallowIngestNullNaN = "disallowIngestNullNaN" case disassociatedDataStorage = "disassociatedDataStorage" case lastUpdateDate = "lastUpdateDate" case multiLayerStorage = "multiLayerStorage" @@ -8020,6 +8037,20 @@ extension IoTSiteWise { } } + public struct PropertyValueNullValue: AWSEncodableShape & AWSDecodableShape { + /// The type of null asset property data. + public let valueType: RawValueType + + @inlinable + public init(valueType: RawValueType) { + self.valueType = valueType + } + + private enum CodingKeys: String, CodingKey { + case valueType = "valueType" + } + } + public struct PutAssetPropertyValueEntry: AWSEncodableShape { /// The ID of the asset to update. public let assetId: String? @@ -8132,6 +8163,8 @@ extension IoTSiteWise { } public struct PutStorageConfigurationRequest: AWSEncodableShape { + /// Describes the configuration for ingesting NULL and NaN data. By default the feature is allowed. The feature is disallowed if the value is true. + public let disallowIngestNullNaN: Bool? /// Contains the storage configuration for time series (data streams) that aren't associated with asset properties. The disassociatedDataStorage can be one of the following values: ENABLED – IoT SiteWise accepts time series that aren't associated with asset properties. After the disassociatedDataStorage is enabled, you can't disable it. DISABLED – IoT SiteWise doesn't accept time series (data streams) that aren't associated with asset properties. For more information, see Data streams in the IoT SiteWise User Guide. public let disassociatedDataStorage: DisassociatedDataStorageState? /// Identifies a storage destination. If you specified MULTI_LAYER_STORAGE for the storage type, you must specify a MultiLayerStorage object. @@ -8145,7 +8178,8 @@ extension IoTSiteWise { public let warmTierRetentionPeriod: WarmTierRetentionPeriod? @inlinable - public init(disassociatedDataStorage: DisassociatedDataStorageState? = nil, multiLayerStorage: MultiLayerStorage? = nil, retentionPeriod: RetentionPeriod? = nil, storageType: StorageType, warmTier: WarmTierState? = nil, warmTierRetentionPeriod: WarmTierRetentionPeriod? = nil) { + public init(disallowIngestNullNaN: Bool? = nil, disassociatedDataStorage: DisassociatedDataStorageState? = nil, multiLayerStorage: MultiLayerStorage? = nil, retentionPeriod: RetentionPeriod? = nil, storageType: StorageType, warmTier: WarmTierState? = nil, warmTierRetentionPeriod: WarmTierRetentionPeriod? = nil) { + self.disallowIngestNullNaN = disallowIngestNullNaN self.disassociatedDataStorage = disassociatedDataStorage self.multiLayerStorage = multiLayerStorage self.retentionPeriod = retentionPeriod @@ -8156,11 +8190,10 @@ extension IoTSiteWise { public func validate(name: String) throws { try self.multiLayerStorage?.validate(name: "\(name).multiLayerStorage") - try self.retentionPeriod?.validate(name: "\(name).retentionPeriod") - try self.warmTierRetentionPeriod?.validate(name: "\(name).warmTierRetentionPeriod") } private enum CodingKeys: String, CodingKey { + case disallowIngestNullNaN = "disallowIngestNullNaN" case disassociatedDataStorage = "disassociatedDataStorage" case multiLayerStorage = "multiLayerStorage" case retentionPeriod = "retentionPeriod" @@ -8172,6 +8205,8 @@ extension IoTSiteWise { public struct PutStorageConfigurationResponse: AWSDecodableShape { public let configurationStatus: ConfigurationStatus + /// Describes the configuration for ingesting NULL and NaN data. By default the feature is allowed. The feature is disallowed if the value is true. + public let disallowIngestNullNaN: Bool? /// Contains the storage configuration for time series (data streams) that aren't associated with asset properties. The disassociatedDataStorage can be one of the following values: ENABLED – IoT SiteWise accepts time series that aren't associated with asset properties. After the disassociatedDataStorage is enabled, you can't disable it. DISABLED – IoT SiteWise doesn't accept time series (data streams) that aren't associated with asset properties. For more information, see Data streams in the IoT SiteWise User Guide. public let disassociatedDataStorage: DisassociatedDataStorageState? /// Contains information about the storage destination. @@ -8185,8 +8220,9 @@ extension IoTSiteWise { public let warmTierRetentionPeriod: WarmTierRetentionPeriod? @inlinable - public init(configurationStatus: ConfigurationStatus, disassociatedDataStorage: DisassociatedDataStorageState? = nil, multiLayerStorage: MultiLayerStorage? = nil, retentionPeriod: RetentionPeriod? = nil, storageType: StorageType, warmTier: WarmTierState? = nil, warmTierRetentionPeriod: WarmTierRetentionPeriod? = nil) { + public init(configurationStatus: ConfigurationStatus, disallowIngestNullNaN: Bool? = nil, disassociatedDataStorage: DisassociatedDataStorageState? = nil, multiLayerStorage: MultiLayerStorage? = nil, retentionPeriod: RetentionPeriod? = nil, storageType: StorageType, warmTier: WarmTierState? = nil, warmTierRetentionPeriod: WarmTierRetentionPeriod? = nil) { self.configurationStatus = configurationStatus + self.disallowIngestNullNaN = disallowIngestNullNaN self.disassociatedDataStorage = disassociatedDataStorage self.multiLayerStorage = multiLayerStorage self.retentionPeriod = retentionPeriod @@ -8197,6 +8233,7 @@ extension IoTSiteWise { private enum CodingKeys: String, CodingKey { case configurationStatus = "configurationStatus" + case disallowIngestNullNaN = "disallowIngestNullNaN" case disassociatedDataStorage = "disassociatedDataStorage" case multiLayerStorage = "multiLayerStorage" case retentionPeriod = "retentionPeriod" @@ -8268,10 +8305,6 @@ extension IoTSiteWise { self.unlimited = unlimited } - public func validate(name: String) throws { - try self.validate(self.numberOfDays, name: "numberOfDays", parent: name, min: 30) - } - private enum CodingKeys: String, CodingKey { case numberOfDays = "numberOfDays" case unlimited = "unlimited" @@ -9462,18 +9495,21 @@ extension IoTSiteWise { public struct Variant: AWSEncodableShape & AWSDecodableShape { /// Asset property data of type Boolean (true or false). public let booleanValue: Bool? - /// Asset property data of type double (floating point number). + /// Asset property data of type double (floating point number). The min value is -10^10. The max value is 10^10. Double.NaN is allowed. public let doubleValue: Double? /// Asset property data of type integer (whole number). public let integerValue: Int? - /// Asset property data of type string (sequence of characters). + /// The type of null asset property data with BAD and UNCERTAIN qualities. + public let nullValue: PropertyValueNullValue? + /// Asset property data of type string (sequence of characters). The allowed pattern: "^$|[^\u0000-\u001F\u007F]+". The max length is 1024. public let stringValue: String? @inlinable - public init(booleanValue: Bool? = nil, doubleValue: Double? = nil, integerValue: Int? = nil, stringValue: String? = nil) { + public init(booleanValue: Bool? = nil, doubleValue: Double? = nil, integerValue: Int? = nil, nullValue: PropertyValueNullValue? = nil, stringValue: String? = nil) { self.booleanValue = booleanValue self.doubleValue = doubleValue self.integerValue = integerValue + self.nullValue = nullValue self.stringValue = stringValue } @@ -9481,6 +9517,7 @@ extension IoTSiteWise { case booleanValue = "booleanValue" case doubleValue = "doubleValue" case integerValue = "integerValue" + case nullValue = "nullValue" case stringValue = "stringValue" } } @@ -9497,10 +9534,6 @@ extension IoTSiteWise { self.unlimited = unlimited } - public func validate(name: String) throws { - try self.validate(self.numberOfDays, name: "numberOfDays", parent: name, min: 30) - } - private enum CodingKeys: String, CodingKey { case numberOfDays = "numberOfDays" case unlimited = "unlimited" diff --git a/Sources/Soto/Services/KMS/KMS_api.swift b/Sources/Soto/Services/KMS/KMS_api.swift index 254ed15fc8..7b66a3a8b2 100644 --- a/Sources/Soto/Services/KMS/KMS_api.swift +++ b/Sources/Soto/Services/KMS/KMS_api.swift @@ -94,6 +94,7 @@ public struct KMS: AWSService { "ap-southeast-3": "kms-fips.ap-southeast-3.amazonaws.com", "ap-southeast-4": "kms-fips.ap-southeast-4.amazonaws.com", "ap-southeast-5": "kms-fips.ap-southeast-5.amazonaws.com", + "ap-southeast-7": "kms-fips.ap-southeast-7.amazonaws.com", "ca-central-1": "kms-fips.ca-central-1.amazonaws.com", "ca-west-1": "kms-fips.ca-west-1.amazonaws.com", "eu-central-1": "kms-fips.eu-central-1.amazonaws.com", @@ -107,6 +108,7 @@ public struct KMS: AWSService { "il-central-1": "kms-fips.il-central-1.amazonaws.com", "me-central-1": "kms-fips.me-central-1.amazonaws.com", "me-south-1": "kms-fips.me-south-1.amazonaws.com", + "mx-central-1": "kms-fips.mx-central-1.amazonaws.com", "sa-east-1": "kms-fips.sa-east-1.amazonaws.com", "us-east-1": "kms-fips.us-east-1.amazonaws.com", "us-east-2": "kms-fips.us-east-2.amazonaws.com", diff --git a/Sources/Soto/Services/KafkaConnect/KafkaConnect_api.swift b/Sources/Soto/Services/KafkaConnect/KafkaConnect_api.swift index db88a2e298..7550344de3 100644 --- a/Sources/Soto/Services/KafkaConnect/KafkaConnect_api.swift +++ b/Sources/Soto/Services/KafkaConnect/KafkaConnect_api.swift @@ -340,6 +340,35 @@ public struct KafkaConnect: AWSService { return try await self.describeConnector(input, logger: logger) } + /// Returns information about the specified connector's operations. + @Sendable + @inlinable + public func describeConnectorOperation(_ input: DescribeConnectorOperationRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeConnectorOperationResponse { + try await self.client.execute( + operation: "DescribeConnectorOperation", + path: "/v1/connectorOperations/{connectorOperationArn}", + httpMethod: .GET, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Returns information about the specified connector's operations. + /// + /// Parameters: + /// - connectorOperationArn: ARN of the connector operation to be described. + /// - logger: Logger use during operation + @inlinable + public func describeConnectorOperation( + connectorOperationArn: String, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> DescribeConnectorOperationResponse { + let input = DescribeConnectorOperationRequest( + connectorOperationArn: connectorOperationArn + ) + return try await self.describeConnectorOperation(input, logger: logger) + } + /// A summary description of the custom plugin. @Sendable @inlinable @@ -398,6 +427,41 @@ public struct KafkaConnect: AWSService { return try await self.describeWorkerConfiguration(input, logger: logger) } + /// Lists information about a connector's operation(s). + @Sendable + @inlinable + public func listConnectorOperations(_ input: ListConnectorOperationsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListConnectorOperationsResponse { + try await self.client.execute( + operation: "ListConnectorOperations", + path: "/v1/connectors/{connectorArn}/operations", + httpMethod: .GET, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Lists information about a connector's operation(s). + /// + /// Parameters: + /// - connectorArn: The Amazon Resource Name (ARN) of the connector for which to list operations. + /// - maxResults: Maximum number of connector operations to fetch in one get request. + /// - nextToken: If the response is truncated, it includes a NextToken. Send this NextToken in a subsequent request to continue listing from where it left off. + /// - logger: Logger use during operation + @inlinable + public func listConnectorOperations( + connectorArn: String, + maxResults: Int? = nil, + nextToken: String? = nil, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> ListConnectorOperationsResponse { + let input = ListConnectorOperationsRequest( + connectorArn: connectorArn, + maxResults: maxResults, + nextToken: nextToken + ) + return try await self.listConnectorOperations(input, logger: logger) + } + /// Returns a list of all the connectors in this account and Region. The list is limited to connectors whose name starts with the specified prefix. The response also includes a description of each of the listed connectors. @Sendable @inlinable @@ -614,18 +678,21 @@ public struct KafkaConnect: AWSService { /// Parameters: /// - capacity: The target capacity. /// - connectorArn: The Amazon Resource Name (ARN) of the connector that you want to update. + /// - connectorConfiguration: A map of keys to values that represent the configuration for the connector. /// - currentVersion: The current version of the connector that you want to update. /// - logger: Logger use during operation @inlinable public func updateConnector( - capacity: CapacityUpdate, + capacity: CapacityUpdate? = nil, connectorArn: String, + connectorConfiguration: [String: String]? = nil, currentVersion: String, logger: Logger = AWSClient.loggingDisabled ) async throws -> UpdateConnectorResponse { let input = UpdateConnectorRequest( capacity: capacity, connectorArn: connectorArn, + connectorConfiguration: connectorConfiguration, currentVersion: currentVersion ) return try await self.updateConnector(input, logger: logger) @@ -645,6 +712,43 @@ extension KafkaConnect { @available(macOS 10.15, iOS 13.0, tvOS 13.0, watchOS 6.0, *) extension KafkaConnect { + /// Return PaginatorSequence for operation ``listConnectorOperations(_:logger:)``. + /// + /// - Parameters: + /// - input: Input for operation + /// - logger: Logger used for logging + @inlinable + public func listConnectorOperationsPaginator( + _ input: ListConnectorOperationsRequest, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + return .init( + input: input, + command: self.listConnectorOperations, + inputKey: \ListConnectorOperationsRequest.nextToken, + outputKey: \ListConnectorOperationsResponse.nextToken, + logger: logger + ) + } + /// Return PaginatorSequence for operation ``listConnectorOperations(_:logger:)``. + /// + /// - Parameters: + /// - connectorArn: The Amazon Resource Name (ARN) of the connector for which to list operations. + /// - maxResults: Maximum number of connector operations to fetch in one get request. + /// - logger: Logger used for logging + @inlinable + public func listConnectorOperationsPaginator( + connectorArn: String, + maxResults: Int? = nil, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + let input = ListConnectorOperationsRequest( + connectorArn: connectorArn, + maxResults: maxResults + ) + return self.listConnectorOperationsPaginator(input, logger: logger) + } + /// Return PaginatorSequence for operation ``listConnectors(_:logger:)``. /// /// - Parameters: @@ -757,6 +861,17 @@ extension KafkaConnect { } } +extension KafkaConnect.ListConnectorOperationsRequest: AWSPaginateToken { + @inlinable + public func usingPaginationToken(_ token: String) -> KafkaConnect.ListConnectorOperationsRequest { + return .init( + connectorArn: self.connectorArn, + maxResults: self.maxResults, + nextToken: token + ) + } +} + extension KafkaConnect.ListConnectorsRequest: AWSPaginateToken { @inlinable public func usingPaginationToken(_ token: String) -> KafkaConnect.ListConnectorsRequest { diff --git a/Sources/Soto/Services/KafkaConnect/KafkaConnect_shapes.swift b/Sources/Soto/Services/KafkaConnect/KafkaConnect_shapes.swift index c4efe78682..db7c707823 100644 --- a/Sources/Soto/Services/KafkaConnect/KafkaConnect_shapes.swift +++ b/Sources/Soto/Services/KafkaConnect/KafkaConnect_shapes.swift @@ -26,6 +26,43 @@ import Foundation extension KafkaConnect { // MARK: Enums + public enum ConnectorOperationState: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case pending = "PENDING" + case rollbackComplete = "ROLLBACK_COMPLETE" + case rollbackFailed = "ROLLBACK_FAILED" + case rollbackInProgress = "ROLLBACK_IN_PROGRESS" + case updateComplete = "UPDATE_COMPLETE" + case updateFailed = "UPDATE_FAILED" + case updateInProgress = "UPDATE_IN_PROGRESS" + public var description: String { return self.rawValue } + } + + public enum ConnectorOperationStepState: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case cancelled = "CANCELLED" + case completed = "COMPLETED" + case failed = "FAILED" + case inProgress = "IN_PROGRESS" + case pending = "PENDING" + public var description: String { return self.rawValue } + } + + public enum ConnectorOperationStepType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case finalizeUpdate = "FINALIZE_UPDATE" + case initializeUpdate = "INITIALIZE_UPDATE" + case updateConnectorConfiguration = "UPDATE_CONNECTOR_CONFIGURATION" + case updateWorkerSetting = "UPDATE_WORKER_SETTING" + case validateUpdate = "VALIDATE_UPDATE" + public var description: String { return self.rawValue } + } + + public enum ConnectorOperationType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case isolateConnector = "ISOLATE_CONNECTOR" + case restoreConnector = "RESTORE_CONNECTOR" + case updateConnectorConfiguration = "UPDATE_CONNECTOR_CONFIGURATION" + case updateWorkerSetting = "UPDATE_WORKER_SETTING" + public var description: String { return self.rawValue } + } + public enum ConnectorState: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case creating = "CREATING" case deleting = "DELETING" @@ -129,12 +166,8 @@ extension KafkaConnect { } public func validate(name: String) throws { - try self.validate(self.maxWorkerCount, name: "maxWorkerCount", parent: name, max: 10) - try self.validate(self.maxWorkerCount, name: "maxWorkerCount", parent: name, min: 1) try self.validate(self.mcuCount, name: "mcuCount", parent: name, max: 8) try self.validate(self.mcuCount, name: "mcuCount", parent: name, min: 1) - try self.validate(self.minWorkerCount, name: "minWorkerCount", parent: name, max: 10) - try self.validate(self.minWorkerCount, name: "minWorkerCount", parent: name, min: 1) try self.scaleInPolicy?.validate(name: "\(name).scaleInPolicy") try self.scaleOutPolicy?.validate(name: "\(name).scaleOutPolicy") } @@ -200,12 +233,8 @@ extension KafkaConnect { } public func validate(name: String) throws { - try self.validate(self.maxWorkerCount, name: "maxWorkerCount", parent: name, max: 10) - try self.validate(self.maxWorkerCount, name: "maxWorkerCount", parent: name, min: 1) try self.validate(self.mcuCount, name: "mcuCount", parent: name, max: 8) try self.validate(self.mcuCount, name: "mcuCount", parent: name, min: 1) - try self.validate(self.minWorkerCount, name: "minWorkerCount", parent: name, max: 10) - try self.validate(self.minWorkerCount, name: "minWorkerCount", parent: name, min: 1) try self.scaleInPolicy.validate(name: "\(name).scaleInPolicy") try self.scaleOutPolicy.validate(name: "\(name).scaleOutPolicy") } @@ -319,6 +348,56 @@ extension KafkaConnect { } } + public struct ConnectorOperationStep: AWSDecodableShape { + /// The step state of the operation. + public let stepState: ConnectorOperationStepState? + /// The step type of the operation. + public let stepType: ConnectorOperationStepType? + + @inlinable + public init(stepState: ConnectorOperationStepState? = nil, stepType: ConnectorOperationStepType? = nil) { + self.stepState = stepState + self.stepType = stepType + } + + private enum CodingKeys: String, CodingKey { + case stepState = "stepState" + case stepType = "stepType" + } + } + + public struct ConnectorOperationSummary: AWSDecodableShape { + /// The Amazon Resource Name (ARN) of the connector operation. + public let connectorOperationArn: String? + /// The state of the connector operation. + public let connectorOperationState: ConnectorOperationState? + /// The type of connector operation performed. + public let connectorOperationType: ConnectorOperationType? + /// The time when operation was created. + @OptionalCustomCoding + public var creationTime: Date? + /// The time when operation ended. + @OptionalCustomCoding + public var endTime: Date? + + @inlinable + public init(connectorOperationArn: String? = nil, connectorOperationState: ConnectorOperationState? = nil, connectorOperationType: ConnectorOperationType? = nil, creationTime: Date? = nil, endTime: Date? = nil) { + self.connectorOperationArn = connectorOperationArn + self.connectorOperationState = connectorOperationState + self.connectorOperationType = connectorOperationType + self.creationTime = creationTime + self.endTime = endTime + } + + private enum CodingKeys: String, CodingKey { + case connectorOperationArn = "connectorOperationArn" + case connectorOperationState = "connectorOperationState" + case connectorOperationType = "connectorOperationType" + case creationTime = "creationTime" + case endTime = "endTime" + } + } + public struct ConnectorSummary: AWSDecodableShape { /// The connector's compute capacity settings. public let capacity: CapacityDescription? @@ -897,6 +976,83 @@ extension KafkaConnect { } } + public struct DescribeConnectorOperationRequest: AWSEncodableShape { + /// ARN of the connector operation to be described. + public let connectorOperationArn: String + + @inlinable + public init(connectorOperationArn: String) { + self.connectorOperationArn = connectorOperationArn + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.connectorOperationArn, key: "connectorOperationArn") + } + + private enum CodingKeys: CodingKey {} + } + + public struct DescribeConnectorOperationResponse: AWSDecodableShape { + /// The Amazon Resource Name (ARN) of the connector. + public let connectorArn: String? + /// The Amazon Resource Name (ARN) of the connector operation. + public let connectorOperationArn: String? + /// The state of the connector operation. + public let connectorOperationState: ConnectorOperationState? + /// The type of connector operation performed. + public let connectorOperationType: ConnectorOperationType? + /// The time when the operation was created. + @OptionalCustomCoding + public var creationTime: Date? + /// The time when the operation ended. + @OptionalCustomCoding + public var endTime: Date? + public let errorInfo: StateDescription? + /// The array of operation steps taken. + public let operationSteps: [ConnectorOperationStep]? + /// The origin connector configuration. + public let originConnectorConfiguration: [String: String]? + /// The origin worker setting. + public let originWorkerSetting: WorkerSetting? + /// The target connector configuration. + public let targetConnectorConfiguration: [String: String]? + /// The target worker setting. + public let targetWorkerSetting: WorkerSetting? + + @inlinable + public init(connectorArn: String? = nil, connectorOperationArn: String? = nil, connectorOperationState: ConnectorOperationState? = nil, connectorOperationType: ConnectorOperationType? = nil, creationTime: Date? = nil, endTime: Date? = nil, errorInfo: StateDescription? = nil, operationSteps: [ConnectorOperationStep]? = nil, originConnectorConfiguration: [String: String]? = nil, originWorkerSetting: WorkerSetting? = nil, targetConnectorConfiguration: [String: String]? = nil, targetWorkerSetting: WorkerSetting? = nil) { + self.connectorArn = connectorArn + self.connectorOperationArn = connectorOperationArn + self.connectorOperationState = connectorOperationState + self.connectorOperationType = connectorOperationType + self.creationTime = creationTime + self.endTime = endTime + self.errorInfo = errorInfo + self.operationSteps = operationSteps + self.originConnectorConfiguration = originConnectorConfiguration + self.originWorkerSetting = originWorkerSetting + self.targetConnectorConfiguration = targetConnectorConfiguration + self.targetWorkerSetting = targetWorkerSetting + } + + private enum CodingKeys: String, CodingKey { + case connectorArn = "connectorArn" + case connectorOperationArn = "connectorOperationArn" + case connectorOperationState = "connectorOperationState" + case connectorOperationType = "connectorOperationType" + case creationTime = "creationTime" + case endTime = "endTime" + case errorInfo = "errorInfo" + case operationSteps = "operationSteps" + case originConnectorConfiguration = "originConnectorConfiguration" + case originWorkerSetting = "originWorkerSetting" + case targetConnectorConfiguration = "targetConnectorConfiguration" + case targetWorkerSetting = "targetWorkerSetting" + } + } + public struct DescribeConnectorRequest: AWSEncodableShape { /// The Amazon Resource Name (ARN) of the connector that you want to describe. public let connectorArn: String @@ -1224,6 +1380,55 @@ extension KafkaConnect { } } + public struct ListConnectorOperationsRequest: AWSEncodableShape { + /// The Amazon Resource Name (ARN) of the connector for which to list operations. + public let connectorArn: String + /// Maximum number of connector operations to fetch in one get request. + public let maxResults: Int? + /// If the response is truncated, it includes a NextToken. Send this NextToken in a subsequent request to continue listing from where it left off. + public let nextToken: String? + + @inlinable + public init(connectorArn: String, maxResults: Int? = nil, nextToken: String? = nil) { + self.connectorArn = connectorArn + self.maxResults = maxResults + self.nextToken = nextToken + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.connectorArn, key: "connectorArn") + request.encodeQuery(self.maxResults, key: "maxResults") + request.encodeQuery(self.nextToken, key: "nextToken") + } + + public func validate(name: String) throws { + try self.validate(self.maxResults, name: "maxResults", parent: name, max: 100) + try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) + } + + private enum CodingKeys: CodingKey {} + } + + public struct ListConnectorOperationsResponse: AWSDecodableShape { + /// An array of connector operation descriptions. + public let connectorOperations: [ConnectorOperationSummary]? + /// If the response is truncated, it includes a NextToken. Send this NextToken in a subsequent request to continue listing from where it left off. + public let nextToken: String? + + @inlinable + public init(connectorOperations: [ConnectorOperationSummary]? = nil, nextToken: String? = nil) { + self.connectorOperations = connectorOperations + self.nextToken = nextToken + } + + private enum CodingKeys: String, CodingKey { + case connectorOperations = "connectorOperations" + case nextToken = "nextToken" + } + } + public struct ListConnectorsRequest: AWSEncodableShape { /// The name prefix that you want to use to search for and list connectors. public let connectorNamePrefix: String? @@ -1478,8 +1683,6 @@ extension KafkaConnect { public func validate(name: String) throws { try self.validate(self.mcuCount, name: "mcuCount", parent: name, max: 8) try self.validate(self.mcuCount, name: "mcuCount", parent: name, min: 1) - try self.validate(self.workerCount, name: "workerCount", parent: name, max: 10) - try self.validate(self.workerCount, name: "workerCount", parent: name, min: 1) } private enum CodingKeys: String, CodingKey { @@ -1521,8 +1724,6 @@ extension KafkaConnect { public func validate(name: String) throws { try self.validate(self.mcuCount, name: "mcuCount", parent: name, max: 8) try self.validate(self.mcuCount, name: "mcuCount", parent: name, min: 1) - try self.validate(self.workerCount, name: "workerCount", parent: name, max: 10) - try self.validate(self.workerCount, name: "workerCount", parent: name, min: 1) } private enum CodingKeys: String, CodingKey { @@ -1814,50 +2015,59 @@ extension KafkaConnect { public struct UpdateConnectorRequest: AWSEncodableShape { /// The target capacity. - public let capacity: CapacityUpdate + public let capacity: CapacityUpdate? /// The Amazon Resource Name (ARN) of the connector that you want to update. public let connectorArn: String + /// A map of keys to values that represent the configuration for the connector. + public let connectorConfiguration: [String: String]? /// The current version of the connector that you want to update. public let currentVersion: String @inlinable - public init(capacity: CapacityUpdate, connectorArn: String, currentVersion: String) { + public init(capacity: CapacityUpdate? = nil, connectorArn: String, connectorConfiguration: [String: String]? = nil, currentVersion: String) { self.capacity = capacity self.connectorArn = connectorArn + self.connectorConfiguration = connectorConfiguration self.currentVersion = currentVersion } public func encode(to encoder: Encoder) throws { let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer var container = encoder.container(keyedBy: CodingKeys.self) - try container.encode(self.capacity, forKey: .capacity) + try container.encodeIfPresent(self.capacity, forKey: .capacity) request.encodePath(self.connectorArn, key: "connectorArn") + try container.encodeIfPresent(self.connectorConfiguration, forKey: .connectorConfiguration) request.encodeQuery(self.currentVersion, key: "currentVersion") } public func validate(name: String) throws { - try self.capacity.validate(name: "\(name).capacity") + try self.capacity?.validate(name: "\(name).capacity") } private enum CodingKeys: String, CodingKey { case capacity = "capacity" + case connectorConfiguration = "connectorConfiguration" } } public struct UpdateConnectorResponse: AWSDecodableShape { /// The Amazon Resource Name (ARN) of the connector. public let connectorArn: String? + /// The Amazon Resource Name (ARN) of the connector operation. + public let connectorOperationArn: String? /// The state of the connector. public let connectorState: ConnectorState? @inlinable - public init(connectorArn: String? = nil, connectorState: ConnectorState? = nil) { + public init(connectorArn: String? = nil, connectorOperationArn: String? = nil, connectorState: ConnectorState? = nil) { self.connectorArn = connectorArn + self.connectorOperationArn = connectorOperationArn self.connectorState = connectorState } private enum CodingKeys: String, CodingKey { case connectorArn = "connectorArn" + case connectorOperationArn = "connectorOperationArn" case connectorState = "connectorState" } } @@ -2067,6 +2277,19 @@ extension KafkaConnect { case s3 = "s3" } } + + public struct WorkerSetting: AWSDecodableShape { + public let capacity: CapacityDescription? + + @inlinable + public init(capacity: CapacityDescription? = nil) { + self.capacity = capacity + } + + private enum CodingKeys: String, CodingKey { + case capacity = "capacity" + } + } } // MARK: - Errors diff --git a/Sources/Soto/Services/KendraRanking/KendraRanking_api.swift b/Sources/Soto/Services/KendraRanking/KendraRanking_api.swift index 0290e8faf1..d241364728 100644 --- a/Sources/Soto/Services/KendraRanking/KendraRanking_api.swift +++ b/Sources/Soto/Services/KendraRanking/KendraRanking_api.swift @@ -91,6 +91,7 @@ public struct KendraRanking: AWSService { "ap-southeast-3": "kendra-ranking.ap-southeast-3.api.aws", "ap-southeast-4": "kendra-ranking.ap-southeast-4.api.aws", "ap-southeast-5": "kendra-ranking.ap-southeast-5.api.aws", + "ap-southeast-7": "kendra-ranking.ap-southeast-7.api.aws", "ca-central-1": "kendra-ranking.ca-central-1.api.aws", "ca-west-1": "kendra-ranking.ca-west-1.api.aws", "cn-north-1": "kendra-ranking.cn-north-1.api.amazonwebservices.com.cn", @@ -104,6 +105,7 @@ public struct KendraRanking: AWSService { "il-central-1": "kendra-ranking.il-central-1.api.aws", "me-central-1": "kendra-ranking.me-central-1.api.aws", "me-south-1": "kendra-ranking.me-south-1.api.aws", + "mx-central-1": "kendra-ranking.mx-central-1.api.aws", "sa-east-1": "kendra-ranking.sa-east-1.api.aws", "us-east-1": "kendra-ranking.us-east-1.api.aws", "us-east-2": "kendra-ranking.us-east-2.api.aws", @@ -129,6 +131,7 @@ public struct KendraRanking: AWSService { "ap-southeast-3": "kendra-ranking-fips.ap-southeast-3.api.aws", "ap-southeast-4": "kendra-ranking-fips.ap-southeast-4.api.aws", "ap-southeast-5": "kendra-ranking-fips.ap-southeast-5.api.aws", + "ap-southeast-7": "kendra-ranking-fips.ap-southeast-7.api.aws", "ca-central-1": "kendra-ranking-fips.ca-central-1.api.aws", "ca-west-1": "kendra-ranking-fips.ca-west-1.api.aws", "cn-north-1": "kendra-ranking-fips.cn-north-1.api.amazonwebservices.com.cn", @@ -142,6 +145,7 @@ public struct KendraRanking: AWSService { "il-central-1": "kendra-ranking-fips.il-central-1.api.aws", "me-central-1": "kendra-ranking-fips.me-central-1.api.aws", "me-south-1": "kendra-ranking-fips.me-south-1.api.aws", + "mx-central-1": "kendra-ranking-fips.mx-central-1.api.aws", "sa-east-1": "kendra-ranking-fips.sa-east-1.api.aws", "us-east-1": "kendra-ranking-fips.us-east-1.api.aws", "us-east-2": "kendra-ranking-fips.us-east-2.api.aws", diff --git a/Sources/Soto/Services/Lambda/Lambda_api.swift b/Sources/Soto/Services/Lambda/Lambda_api.swift index 0cf6ab1263..dc02d57277 100644 --- a/Sources/Soto/Services/Lambda/Lambda_api.swift +++ b/Sources/Soto/Services/Lambda/Lambda_api.swift @@ -92,6 +92,7 @@ public struct Lambda: AWSService { "ap-southeast-3": "lambda.ap-southeast-3.api.aws", "ap-southeast-4": "lambda.ap-southeast-4.api.aws", "ap-southeast-5": "lambda.ap-southeast-5.api.aws", + "ap-southeast-7": "lambda.ap-southeast-7.api.aws", "ca-central-1": "lambda.ca-central-1.api.aws", "ca-west-1": "lambda.ca-west-1.api.aws", "cn-north-1": "lambda.cn-north-1.api.amazonwebservices.com.cn", @@ -107,6 +108,7 @@ public struct Lambda: AWSService { "il-central-1": "lambda.il-central-1.api.aws", "me-central-1": "lambda.me-central-1.api.aws", "me-south-1": "lambda.me-south-1.api.aws", + "mx-central-1": "lambda.mx-central-1.api.aws", "sa-east-1": "lambda.sa-east-1.api.aws", "us-east-1": "lambda.us-east-1.api.aws", "us-east-2": "lambda.us-east-2.api.aws", diff --git a/Sources/Soto/Services/MWAA/MWAA_api.swift b/Sources/Soto/Services/MWAA/MWAA_api.swift index 5bf9007015..5e889f5649 100644 --- a/Sources/Soto/Services/MWAA/MWAA_api.swift +++ b/Sources/Soto/Services/MWAA/MWAA_api.swift @@ -127,7 +127,7 @@ public struct MWAA: AWSService { /// /// Parameters: /// - airflowConfigurationOptions: A list of key-value pairs containing the Apache Airflow configuration options you want to attach to your environment. For more information, see Apache Airflow configuration options. - /// - airflowVersion: The Apache Airflow version for your environment. If no value is specified, it defaults to the latest version. For more information, see Apache Airflow versions on Amazon Managed Workflows for Apache Airflow (Amazon MWAA). Valid values: 1.10.12, 2.0.2, 2.2.2, 2.4.3, 2.5.1, 2.6.3, 2.7.2, 2.8.1, 2.9.2, and 2.10.1. + /// - airflowVersion: The Apache Airflow version for your environment. If no value is specified, it defaults to the latest version. For more information, see Apache Airflow versions on Amazon Managed Workflows for Apache Airflow (Amazon MWAA). Valid values: 1.10.12, 2.0.2, 2.2.2, 2.4.3, 2.5.1, 2.6.3, 2.7.2, 2.8.1, 2.9.2, 2.10.1, and 2.10.3. /// - dagS3Path: The relative path to the DAGs folder on your Amazon S3 bucket. For example, dags. For more information, see Adding or updating DAGs. /// - endpointManagement: Defines whether the VPC endpoints configured for the environment are created, and managed, by the customer or by Amazon MWAA. If set to SERVICE, Amazon MWAA will create and manage the required VPC endpoints in your VPC. If set to CUSTOMER, you must create, and manage, the VPC endpoints for your VPC. If you choose to create an environment in a shared VPC, you must set this value to CUSTOMER. In a shared VPC deployment, the environment will remain in PENDING status until you create the VPC endpoints. If you do not take action to create the endpoints within 72 hours, the status will change to CREATE_FAILED. You can delete the failed environment and create a new one. /// - environmentClass: The environment class type. Valid values: mw1.micro, mw1.small, mw1.medium, mw1.large, mw1.xlarge, and mw1.2xlarge. For more information, see Amazon MWAA environment class. @@ -525,7 +525,7 @@ public struct MWAA: AWSService { /// /// Parameters: /// - airflowConfigurationOptions: A list of key-value pairs containing the Apache Airflow configuration options you want to attach to your environment. For more information, see Apache Airflow configuration options. - /// - airflowVersion: The Apache Airflow version for your environment. To upgrade your environment, specify a newer version of Apache Airflow supported by Amazon MWAA. Before you upgrade an environment, make sure your requirements, DAGs, plugins, and other resources used in your workflows are compatible with the new Apache Airflow version. For more information about updating your resources, see Upgrading an Amazon MWAA environment. Valid values: 1.10.12, 2.0.2, 2.2.2, 2.4.3, 2.5.1, 2.6.3, 2.7.2, 2.8.1, 2.9.2, and 2.10.1. + /// - airflowVersion: The Apache Airflow version for your environment. To upgrade your environment, specify a newer version of Apache Airflow supported by Amazon MWAA. Before you upgrade an environment, make sure your requirements, DAGs, plugins, and other resources used in your workflows are compatible with the new Apache Airflow version. For more information about updating your resources, see Upgrading an Amazon MWAA environment. Valid values: 1.10.12, 2.0.2, 2.2.2, 2.4.3, 2.5.1, 2.6.3, 2.7.2, 2.8.1, 2.9.2, 2.10.1, and 2.10.3. /// - dagS3Path: The relative path to the DAGs folder on your Amazon S3 bucket. For example, dags. For more information, see Adding or updating DAGs. /// - environmentClass: The environment class type. Valid values: mw1.micro, mw1.small, mw1.medium, mw1.large, mw1.xlarge, and mw1.2xlarge. For more information, see Amazon MWAA environment class. /// - executionRoleArn: The Amazon Resource Name (ARN) of the execution role in IAM that allows MWAA to access Amazon Web Services resources in your environment. For example, arn:aws:iam::123456789:role/my-execution-role. For more information, see Amazon MWAA Execution role. diff --git a/Sources/Soto/Services/MWAA/MWAA_shapes.swift b/Sources/Soto/Services/MWAA/MWAA_shapes.swift index 6fa180b104..0dfc1fdb26 100644 --- a/Sources/Soto/Services/MWAA/MWAA_shapes.swift +++ b/Sources/Soto/Services/MWAA/MWAA_shapes.swift @@ -157,7 +157,7 @@ extension MWAA { public struct CreateEnvironmentInput: AWSEncodableShape { /// A list of key-value pairs containing the Apache Airflow configuration options you want to attach to your environment. For more information, see Apache Airflow configuration options. public let airflowConfigurationOptions: [String: String]? - /// The Apache Airflow version for your environment. If no value is specified, it defaults to the latest version. For more information, see Apache Airflow versions on Amazon Managed Workflows for Apache Airflow (Amazon MWAA). Valid values: 1.10.12, 2.0.2, 2.2.2, 2.4.3, 2.5.1, 2.6.3, 2.7.2, 2.8.1, 2.9.2, and 2.10.1. + /// The Apache Airflow version for your environment. If no value is specified, it defaults to the latest version. For more information, see Apache Airflow versions on Amazon Managed Workflows for Apache Airflow (Amazon MWAA). Valid values: 1.10.12, 2.0.2, 2.2.2, 2.4.3, 2.5.1, 2.6.3, 2.7.2, 2.8.1, 2.9.2, 2.10.1, and 2.10.3. public let airflowVersion: String? /// The relative path to the DAGs folder on your Amazon S3 bucket. For example, dags. For more information, see Adding or updating DAGs. public let dagS3Path: String @@ -471,7 +471,7 @@ extension MWAA { public struct Environment: AWSDecodableShape { /// A list of key-value pairs containing the Apache Airflow configuration options attached to your environment. For more information, see Apache Airflow configuration options. public let airflowConfigurationOptions: [String: String]? - /// The Apache Airflow version on your environment. Valid values: 1.10.12, 2.0.2, 2.2.2, 2.4.3, 2.5.1, 2.6.3, 2.7.2, 2.8.1, 2.9.2, and 2.10.1. + /// The Apache Airflow version on your environment. Valid values: 1.10.12, 2.0.2, 2.2.2, 2.4.3, 2.5.1, 2.6.3, 2.7.2, 2.8.1, 2.9.2, 2.10.1, and 2.10.3. public let airflowVersion: String? /// The Amazon Resource Name (ARN) of the Amazon MWAA environment. public let arn: String? @@ -1139,7 +1139,7 @@ extension MWAA { public struct UpdateEnvironmentInput: AWSEncodableShape { /// A list of key-value pairs containing the Apache Airflow configuration options you want to attach to your environment. For more information, see Apache Airflow configuration options. public let airflowConfigurationOptions: [String: String]? - /// The Apache Airflow version for your environment. To upgrade your environment, specify a newer version of Apache Airflow supported by Amazon MWAA. Before you upgrade an environment, make sure your requirements, DAGs, plugins, and other resources used in your workflows are compatible with the new Apache Airflow version. For more information about updating your resources, see Upgrading an Amazon MWAA environment. Valid values: 1.10.12, 2.0.2, 2.2.2, 2.4.3, 2.5.1, 2.6.3, 2.7.2, 2.8.1, 2.9.2, and 2.10.1. + /// The Apache Airflow version for your environment. To upgrade your environment, specify a newer version of Apache Airflow supported by Amazon MWAA. Before you upgrade an environment, make sure your requirements, DAGs, plugins, and other resources used in your workflows are compatible with the new Apache Airflow version. For more information about updating your resources, see Upgrading an Amazon MWAA environment. Valid values: 1.10.12, 2.0.2, 2.2.2, 2.4.3, 2.5.1, 2.6.3, 2.7.2, 2.8.1, 2.9.2, 2.10.1, and 2.10.3. public let airflowVersion: String? /// The relative path to the DAGs folder on your Amazon S3 bucket. For example, dags. For more information, see Adding or updating DAGs. public let dagS3Path: String? diff --git a/Sources/Soto/Services/Macie2/Macie2_api.swift b/Sources/Soto/Services/Macie2/Macie2_api.swift index 95dba9c310..f37524d56e 100644 --- a/Sources/Soto/Services/Macie2/Macie2_api.swift +++ b/Sources/Soto/Services/Macie2/Macie2_api.swift @@ -79,6 +79,36 @@ public struct Macie2: AWSService { /// FIPS and dualstack endpoints static var variantEndpoints: [EndpointVariantType: AWSServiceConfig.EndpointVariant] {[ + [.dualstack]: .init(endpoints: [ + "af-south-1": "macie2.af-south-1.api.aws", + "ap-east-1": "macie2.ap-east-1.api.aws", + "ap-northeast-1": "macie2.ap-northeast-1.api.aws", + "ap-northeast-2": "macie2.ap-northeast-2.api.aws", + "ap-northeast-3": "macie2.ap-northeast-3.api.aws", + "ap-south-1": "macie2.ap-south-1.api.aws", + "ap-southeast-1": "macie2.ap-southeast-1.api.aws", + "ap-southeast-2": "macie2.ap-southeast-2.api.aws", + "ca-central-1": "macie2.ca-central-1.api.aws", + "eu-central-1": "macie2.eu-central-1.api.aws", + "eu-north-1": "macie2.eu-north-1.api.aws", + "eu-south-1": "macie2.eu-south-1.api.aws", + "eu-west-1": "macie2.eu-west-1.api.aws", + "eu-west-2": "macie2.eu-west-2.api.aws", + "eu-west-3": "macie2.eu-west-3.api.aws", + "il-central-1": "macie2.il-central-1.api.aws", + "me-south-1": "macie2.me-south-1.api.aws", + "sa-east-1": "macie2.sa-east-1.api.aws", + "us-east-1": "macie2.us-east-1.api.aws", + "us-east-2": "macie2.us-east-2.api.aws", + "us-west-1": "macie2.us-west-1.api.aws", + "us-west-2": "macie2.us-west-2.api.aws" + ]), + [.dualstack, .fips]: .init(endpoints: [ + "us-east-1": "macie2-fips.us-east-1.api.aws", + "us-east-2": "macie2-fips.us-east-2.api.aws", + "us-west-1": "macie2-fips.us-west-1.api.aws", + "us-west-2": "macie2-fips.us-west-2.api.aws" + ]), [.fips]: .init(endpoints: [ "us-east-1": "macie2-fips.us-east-1.amazonaws.com", "us-east-2": "macie2-fips.us-east-2.amazonaws.com", @@ -1732,7 +1762,7 @@ public struct Macie2: AWSService { return try await self.listClassificationScopes(input, logger: logger) } - /// Retrieves a subset of information about all the custom data identifiers for an account. + /// Retrieves a subset of information about the custom data identifiers for an account. @Sendable @inlinable public func listCustomDataIdentifiers(_ input: ListCustomDataIdentifiersRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListCustomDataIdentifiersResponse { @@ -1745,7 +1775,7 @@ public struct Macie2: AWSService { logger: logger ) } - /// Retrieves a subset of information about all the custom data identifiers for an account. + /// Retrieves a subset of information about the custom data identifiers for an account. /// /// Parameters: /// - maxResults: The maximum number of items to include in each page of the response. @@ -2151,7 +2181,7 @@ public struct Macie2: AWSService { return try await self.putFindingsPublicationConfiguration(input, logger: logger) } - /// Retrieves (queries) statistical data and other information about Amazon Web Services resources that Amazon Macie monitors and analyzes. + /// Retrieves (queries) statistical data and other information about Amazon Web Services resources that Amazon Macie monitors and analyzes for an account. @Sendable @inlinable public func searchResources(_ input: SearchResourcesRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> SearchResourcesResponse { @@ -2164,7 +2194,7 @@ public struct Macie2: AWSService { logger: logger ) } - /// Retrieves (queries) statistical data and other information about Amazon Web Services resources that Amazon Macie monitors and analyzes. + /// Retrieves (queries) statistical data and other information about Amazon Web Services resources that Amazon Macie monitors and analyzes for an account. /// /// Parameters: /// - bucketCriteria: The filter conditions that determine which S3 buckets to include or exclude from the query results. @@ -2617,7 +2647,7 @@ public struct Macie2: AWSService { /// /// Parameters: /// - resourceArn: The Amazon Resource Name (ARN) of the S3 bucket that the request applies to. - /// - suppressDataIdentifiers: An array of objects, one for each custom data identifier or managed data identifier that detected the type of sensitive data to start excluding or including in the bucket's score. To start including all sensitive data types in the score, don't specify any values for this array. + /// - suppressDataIdentifiers: An array of objects, one for each custom data identifier or managed data identifier that detected a type of sensitive data to exclude from the bucket's score. To include all sensitive data types in the score, don't specify any values for this array. /// - logger: Logger use during operation @inlinable public func updateResourceProfileDetections( diff --git a/Sources/Soto/Services/Macie2/Macie2_shapes.swift b/Sources/Soto/Services/Macie2/Macie2_shapes.swift index 9b345c01fa..d8d07deec2 100644 --- a/Sources/Soto/Services/Macie2/Macie2_shapes.swift +++ b/Sources/Soto/Services/Macie2/Macie2_shapes.swift @@ -90,6 +90,7 @@ extension Macie2 { public enum BucketMetadataErrorCode: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case accessDenied = "ACCESS_DENIED" + case bucketCountExceedsQuota = "BUCKET_COUNT_EXCEEDS_QUOTA" public var description: String { return self.rawValue } } @@ -665,7 +666,7 @@ extension Macie2 { public struct ApiCallDetails: AWSDecodableShape { /// The name of the operation that was invoked most recently and produced the finding. public let api: String? - /// The URL of the Amazon Web Service that provides the operation, for example: s3.amazonaws.com. + /// The URL of the Amazon Web Services service that provides the operation, for example: s3.amazonaws.com. public let apiServiceName: String? /// The first date and time, in UTC and extended ISO 8601 format, when any operation was invoked and produced the finding. @OptionalCustomCoding @@ -934,7 +935,7 @@ extension Macie2 { public let publiclyReadable: Int64? /// The total number of buckets that allow the general public to have write access to the bucket. public let publiclyWritable: Int64? - /// The total number of buckets that Amazon Macie wasn't able to evaluate permissions settings for. Macie can't determine whether these buckets are publicly accessible. + /// The total number of buckets that Amazon Macie wasn't able to evaluate permissions settings for. For example, the buckets' policies or a quota prevented Macie from retrieving the requisite data. Macie can't determine whether the buckets are publicly accessible. public let unknown: Int64? @inlinable @@ -960,7 +961,7 @@ extension Macie2 { public let s3Managed: Int64? /// The total number of buckets that don't specify default server-side encryption behavior for new objects. Default encryption settings aren't configured for these buckets. public let unencrypted: Int64? - /// The total number of buckets that Amazon Macie doesn't have current encryption metadata for. Macie can't provide current data about the default encryption settings for these buckets. + /// The total number of buckets that Amazon Macie doesn't have current encryption metadata for. For example, the buckets' permissions settings or a quota prevented Macie from retrieving the default encryption settings for the buckets. public let unknown: Int64? @inlinable @@ -986,7 +987,7 @@ extension Macie2 { public let `internal`: Int64? /// The total number of buckets that aren't shared with other Amazon Web Services accounts, Amazon CloudFront OAIs, or CloudFront OACs. public let notShared: Int64? - /// The total number of buckets that Amazon Macie wasn't able to evaluate shared access settings for. Macie can't determine whether these buckets are shared with other Amazon Web Services accounts, Amazon CloudFront OAIs, or CloudFront OACs. + /// The total number of buckets that Amazon Macie wasn't able to evaluate shared access settings for. For example, the buckets' permissions settings or a quota prevented Macie from retrieving the requisite data. Macie can't determine whether the buckets are shared with other Amazon Web Services accounts, Amazon CloudFront OAIs, or CloudFront OACs. public let unknown: Int64? @inlinable @@ -1010,7 +1011,7 @@ extension Macie2 { public let allowsUnencryptedObjectUploads: Int64? /// The total number of buckets whose bucket policies require server-side encryption of new objects. PutObject requests for these buckets must include a valid server-side encryption header: the x-amz-server-side-encryption header with a value of AES256 or aws:kms, or the x-amz-server-side-encryption-customer-algorithm header with a value of AES256. public let deniesUnencryptedObjectUploads: Int64? - /// The total number of buckets that Amazon Macie wasn't able to evaluate server-side encryption requirements for. Macie can't determine whether the bucket policies for these buckets require server-side encryption of new objects. + /// The total number of buckets that Amazon Macie wasn't able to evaluate server-side encryption requirements for. For example, the buckets' permissions settings or a quota prevented Macie from retrieving the requisite data. Macie can't determine whether bucket policies for the buckets require server-side encryption of new objects. public let unknown: Int64? @inlinable @@ -1105,13 +1106,13 @@ extension Macie2 { public let classifiableObjectCount: Int64? /// The total storage size, in bytes, of the objects that Amazon Macie can analyze in the bucket. These objects use a supported storage class and have a file name extension for a supported file or storage format. If versioning is enabled for the bucket, Macie calculates this value based on the size of the latest version of each applicable object in the bucket. This value doesn't reflect the storage size of all versions of each applicable object in the bucket. public let classifiableSizeInBytes: Int64? - /// The error code for an error that prevented Amazon Macie from retrieving and processing information about the bucket and the bucket's objects. If this value is ACCESS_DENIED, Macie doesn't have permission to retrieve the information. For example, the bucket has a restrictive bucket policy and Amazon S3 denied the request. If this value is null, Macie was able to retrieve and process the information. + /// The code for an error or issue that prevented Amazon Macie from retrieving and processing information about the bucket and the bucket's objects. Possible values are: ACCESS_DENIED - Macie doesn't have permission to retrieve the information. For example, the bucket has a restrictive bucket policy and Amazon S3 denied the request. BUCKET_COUNT_EXCEEDS_QUOTA - Retrieving and processing the information would exceed the quota for the number of buckets that Macie monitors for an account (10,000). If this value is null, Macie was able to retrieve and process the information. public let errorCode: BucketMetadataErrorCode? - /// A brief description of the error (errorCode) that prevented Amazon Macie from retrieving and processing information about the bucket and the bucket's objects. This value is null if Macie was able to retrieve and process the information. + /// A brief description of the error or issue (errorCode) that prevented Amazon Macie from retrieving and processing information about the bucket and the bucket's objects. This value is null if Macie was able to retrieve and process the information. public let errorMessage: String? /// Specifies whether any one-time or recurring classification jobs are configured to analyze objects in the bucket, and, if so, the details of the job that ran most recently. public let jobDetails: JobDetails? - /// The date and time, in UTC and extended ISO 8601 format, when Amazon Macie most recently analyzed objects in the bucket while performing automated sensitive data discovery. This value is null if automated sensitive data discovery is disabled for your account. + /// The date and time, in UTC and extended ISO 8601 format, when Amazon Macie most recently analyzed objects in the bucket while performing automated sensitive data discovery. This value is null if this analysis hasn't occurred. @OptionalCustomCoding public var lastAutomatedDiscoveryTime: Date? /// The date and time, in UTC and extended ISO 8601 format, when Amazon Macie most recently retrieved bucket or object metadata from Amazon S3 for the bucket. @@ -1127,7 +1128,7 @@ extension Macie2 { public let region: String? /// Specifies whether the bucket is configured to replicate one or more objects to buckets for other Amazon Web Services accounts and, if so, which accounts. public let replicationDetails: ReplicationDetails? - /// The sensitivity score for the bucket, ranging from -1 (classification error) to 100 (sensitive).If automated sensitive data discovery has never been enabled for your account or it’s been disabled for your organization or your standalone account for more than 30 days, possible values are: 1, the bucket is empty; or, 50, the bucket stores objects but it’s been excluded from recent analyses. + /// The sensitivity score for the bucket, ranging from -1 (classification error) to 100 (sensitive).If automated sensitive data discovery has never been enabled for your account or it's been disabled for your organization or standalone account for more than 30 days, possible values are: 1, the bucket is empty; or, 50, the bucket stores objects but it's been excluded from recent analyses. public let sensitivityScore: Int? /// The default server-side encryption settings for the bucket. public let serverSideEncryption: BucketServerSideEncryption? @@ -2326,7 +2327,7 @@ extension Macie2 { public let id: String? /// The name of the custom data identifier or managed data identifier that detected the sensitive data. For a managed data identifier, this value is the same as the unique identifier (id). public let name: String? - /// Specifies whether occurrences of this type of sensitive data are excluded (true) or included (false) in the bucket's sensitivity score. + /// Specifies whether occurrences of this type of sensitive data are excluded (true) or included (false) in the bucket's sensitivity score, if the score is calculated by Amazon Macie. public let suppressed: Bool? /// The type of data identifier that detected the sensitive data. Possible values are: CUSTOM, for a custom data identifier; and, MANAGED, for a managed data identifier. public let type: DataIdentifierType? @@ -2851,7 +2852,7 @@ extension Macie2 { public let bucketCountByObjectEncryptionRequirement: BucketCountPolicyAllowsUnencryptedObjectUploads? /// The total number of buckets that are or aren't shared with other Amazon Web Services accounts, Amazon CloudFront origin access identities (OAIs), or CloudFront origin access controls (OACs). public let bucketCountBySharedAccessType: BucketCountBySharedAccessType? - /// The aggregated sensitive data discovery statistics for the buckets. If automated sensitive data discovery is currently disabled for your account, the value for each statistic is 0. + /// The aggregated sensitive data discovery statistics for the buckets. If automated sensitive data discovery is currently disabled for your account, the value for most statistics is 0. public let bucketStatisticsBySensitivity: BucketStatisticsBySensitivity? /// The total number of objects that Amazon Macie can analyze in the buckets. These objects use a supported storage class and have a file name extension for a supported file or storage format. public let classifiableObjectCount: Int64? @@ -3418,7 +3419,7 @@ extension Macie2 { public struct GetSensitiveDataOccurrencesAvailabilityResponse: AWSDecodableShape { /// Specifies whether occurrences of sensitive data can be retrieved for the finding. Possible values are: AVAILABLE, the sensitive data can be retrieved; and, UNAVAILABLE, the sensitive data can't be retrieved. If this value is UNAVAILABLE, the reasons array indicates why the data can't be retrieved. public let code: AvailabilityCode? - /// Specifies why occurrences of sensitive data can't be retrieved for the finding. Possible values are: ACCOUNT_NOT_IN_ORGANIZATION - The affected account isn't currently part of your organization. Or the account is part of your organization but Macie isn't currently enabled for the account. You're not allowed to access the affected S3 object by using Macie. INVALID_CLASSIFICATION_RESULT - There isn't a corresponding sensitive data discovery result for the finding. Or the corresponding sensitive data discovery result isn't available in the current Amazon Web Services Region, is malformed or corrupted, or uses an unsupported storage format. Macie can't verify the location of the sensitive data to retrieve. INVALID_RESULT_SIGNATURE - The corresponding sensitive data discovery result is stored in an S3 object that wasn't signed by Macie. Macie can't verify the integrity and authenticity of the sensitive data discovery result. Therefore, Macie can't verify the location of the sensitive data to retrieve. MEMBER_ROLE_TOO_PERMISSIVE - The trust or permissions policy for the IAM role in the affected member account doesn't meet Macie requirements for restricting access to the role. Or the role's trust policy doesn't specify the correct external ID for your organization. Macie can't assume the role to retrieve the sensitive data. MISSING_GET_MEMBER_PERMISSION - You're not allowed to retrieve information about the association between your account and the affected account. Macie can't determine whether you’re allowed to access the affected S3 object as the delegated Macie administrator for the affected account. OBJECT_EXCEEDS_SIZE_QUOTA - The storage size of the affected S3 object exceeds the size quota for retrieving occurrences of sensitive data from this type of file. OBJECT_UNAVAILABLE - The affected S3 object isn't available. The object was renamed, moved, deleted, or changed after Macie created the finding. Or the object is encrypted with an KMS key that's currently disabled. RESULT_NOT_SIGNED - The corresponding sensitive data discovery result is stored in an S3 object that hasn't been signed. Macie can't verify the integrity and authenticity of the sensitive data discovery result. Therefore, Macie can't verify the location of the sensitive data to retrieve. ROLE_TOO_PERMISSIVE - Your account is configured to retrieve occurrences of sensitive data by using an IAM role whose trust or permissions policy doesn't meet Macie requirements for restricting access to the role. Macie can’t assume the role to retrieve the sensitive data. UNSUPPORTED_FINDING_TYPE - The specified finding isn't a sensitive data finding. UNSUPPORTED_OBJECT_TYPE - The affected S3 object uses a file or storage format that Macie doesn't support for retrieving occurrences of sensitive data. This value is null if sensitive data can be retrieved for the finding. + /// Specifies why occurrences of sensitive data can't be retrieved for the finding. Possible values are: ACCOUNT_NOT_IN_ORGANIZATION - The affected account isn't currently part of your organization. Or the account is part of your organization but Macie isn't currently enabled for the account. You're not allowed to access the affected S3 object by using Macie. INVALID_CLASSIFICATION_RESULT - There isn't a corresponding sensitive data discovery result for the finding. Or the corresponding sensitive data discovery result isn't available in the current Amazon Web Services Region, is malformed or corrupted, or uses an unsupported storage format. Macie can't verify the location of the sensitive data to retrieve. INVALID_RESULT_SIGNATURE - The corresponding sensitive data discovery result is stored in an S3 object that wasn't signed by Macie. Macie can't verify the integrity and authenticity of the sensitive data discovery result. Therefore, Macie can't verify the location of the sensitive data to retrieve. MEMBER_ROLE_TOO_PERMISSIVE - The trust or permissions policy for the IAM role in the affected member account doesn't meet Macie requirements for restricting access to the role. Or the role's trust policy doesn't specify the correct external ID for your organization. Macie can't assume the role to retrieve the sensitive data. MISSING_GET_MEMBER_PERMISSION - You're not allowed to retrieve information about the association between your account and the affected account. Macie can't determine whether you’re allowed to access the affected S3 object as the delegated Macie administrator for the affected account. OBJECT_EXCEEDS_SIZE_QUOTA - The storage size of the affected S3 object exceeds the size quota for retrieving occurrences of sensitive data from this type of file. OBJECT_UNAVAILABLE - The affected S3 object isn't available. The object was renamed, moved, deleted, or changed after Macie created the finding. Or the object is encrypted with an KMS key that isn’t available. For example, the key is disabled, is scheduled for deletion, or was deleted. RESULT_NOT_SIGNED - The corresponding sensitive data discovery result is stored in an S3 object that hasn't been signed. Macie can't verify the integrity and authenticity of the sensitive data discovery result. Therefore, Macie can't verify the location of the sensitive data to retrieve. ROLE_TOO_PERMISSIVE - Your account is configured to retrieve occurrences of sensitive data by using an IAM role whose trust or permissions policy doesn't meet Macie requirements for restricting access to the role. Macie can’t assume the role to retrieve the sensitive data. UNSUPPORTED_FINDING_TYPE - The specified finding isn't a sensitive data finding. UNSUPPORTED_OBJECT_TYPE - The affected S3 object uses a file or storage format that Macie doesn't support for retrieving occurrences of sensitive data. This value is null if sensitive data can be retrieved for the finding. public let reasons: [UnavailabilityReasonCode]? @inlinable @@ -4673,20 +4674,20 @@ extension Macie2 { public let classifiableObjectCount: Int64? /// The total storage size, in bytes, of the objects that Amazon Macie can analyze in the bucket. These objects use a supported storage class and have a file name extension for a supported file or storage format. If versioning is enabled for the bucket, Macie calculates this value based on the size of the latest version of each applicable object in the bucket. This value doesn't reflect the storage size of all versions of each applicable object in the bucket. public let classifiableSizeInBytes: Int64? - /// The error code for an error that prevented Amazon Macie from retrieving and processing information about the bucket and the bucket's objects. If this value is ACCESS_DENIED, Macie doesn't have permission to retrieve the information. For example, the bucket has a restrictive bucket policy and Amazon S3 denied the request. If this value is null, Macie was able to retrieve and process the information. + /// The code for an error or issue that prevented Amazon Macie from retrieving and processing information about the bucket and the bucket's objects. Possible values are: ACCESS_DENIED - Macie doesn't have permission to retrieve the information. For example, the bucket has a restrictive bucket policy and Amazon S3 denied the request. BUCKET_COUNT_EXCEEDS_QUOTA - Retrieving and processing the information would exceed the quota for the number of buckets that Macie monitors for an account (10,000). If this value is null, Macie was able to retrieve and process the information. public let errorCode: BucketMetadataErrorCode? - /// A brief description of the error (errorCode) that prevented Amazon Macie from retrieving and processing information about the bucket and the bucket's objects. This value is null if Macie was able to retrieve and process the information. + /// A brief description of the error or issue (errorCode) that prevented Amazon Macie from retrieving and processing information about the bucket and the bucket's objects. This value is null if Macie was able to retrieve and process the information. public let errorMessage: String? /// Specifies whether any one-time or recurring classification jobs are configured to analyze objects in the bucket, and, if so, the details of the job that ran most recently. public let jobDetails: JobDetails? - /// The date and time, in UTC and extended ISO 8601 format, when Amazon Macie most recently analyzed objects in the bucket while performing automated sensitive data discovery. This value is null if automated sensitive data discovery is disabled for your account. + /// The date and time, in UTC and extended ISO 8601 format, when Amazon Macie most recently analyzed objects in the bucket while performing automated sensitive data discovery. This value is null if this analysis hasn't occurred. @OptionalCustomCoding public var lastAutomatedDiscoveryTime: Date? /// The total number of objects in the bucket. public let objectCount: Int64? /// The total number of objects in the bucket, grouped by server-side encryption type. This includes a grouping that reports the total number of objects that aren't encrypted or use client-side encryption. public let objectCountByEncryptionType: ObjectCountByEncryptionType? - /// The sensitivity score for the bucket, ranging from -1 (classification error) to 100 (sensitive).If automated sensitive data discovery has never been enabled for your account or it’s been disabled for your organization or your standalone account for more than 30 days, possible values are: 1, the bucket is empty; or, 50, the bucket stores objects but it’s been excluded from recent analyses. + /// The sensitivity score for the bucket, ranging from -1 (classification error) to 100 (sensitive).If automated sensitive data discovery has never been enabled for your account or it's been disabled for your organization or standalone account for more than 30 days, possible values are: 1, the bucket is empty; or, 50, the bucket stores objects but it's been excluded from recent analyses. public let sensitivityScore: Int? /// The total storage size, in bytes, of the bucket. If versioning is enabled for the bucket, Amazon Macie calculates this value based on the size of the latest version of each object in the bucket. This value doesn't reflect the storage size of all versions of each object in the bucket. public let sizeInBytes: Int64? @@ -4738,7 +4739,7 @@ extension Macie2 { } public struct MatchingResource: AWSDecodableShape { - /// The details of an S3 bucket that Amazon Macie monitors and analyzes. + /// The details of an S3 bucket that Amazon Macie monitors and analyzes for your account. public let matchingBucket: MatchingBucket? @inlinable @@ -5304,7 +5305,7 @@ extension Macie2 { } public struct S3ClassificationScopeExclusionUpdate: AWSEncodableShape { - /// Depending on the value specified for the update operation (ClassificationScopeUpdateOperation), an array of strings that: lists the names of buckets to add or remove from the list, or specifies a new set of bucket names that overwrites all existing names in the list. Each string must be the full name of an S3 bucket. Values are case sensitive. + /// Depending on the value specified for the update operation (ClassificationScopeUpdateOperation), an array of strings that: lists the names of buckets to add or remove from the list, or specifies a new set of bucket names that overwrites all existing names in the list. Each string must be the full name of an existing S3 bucket. Values are case sensitive. public let bucketNames: [String]? /// Specifies how to apply the changes to the exclusion list. Valid values are: ADD - Append the specified bucket names to the current list. REMOVE - Remove the specified bucket names from the current list. REPLACE - Overwrite the current list with the specified list of bucket names. If you specify this value, Amazon Macie removes all existing names from the list and adds all the specified names to the list. public let operation: ClassificationScopeUpdateOperation? @@ -6007,7 +6008,7 @@ extension Macie2 { } public struct SuppressDataIdentifier: AWSEncodableShape { - /// The unique identifier for the custom data identifier or managed data identifier that detected the type of sensitive data to exclude or include in the score. + /// The unique identifier for the custom data identifier or managed data identifier that detected the type of sensitive data to exclude from the score. public let id: String? /// The type of data identifier that detected the sensitive data. Possible values are: CUSTOM, for a custom data identifier; and, MANAGED, for a managed data identifier. public let type: DataIdentifierType? @@ -6506,7 +6507,7 @@ extension Macie2 { public struct UpdateResourceProfileDetectionsRequest: AWSEncodableShape { /// The Amazon Resource Name (ARN) of the S3 bucket that the request applies to. public let resourceArn: String? - /// An array of objects, one for each custom data identifier or managed data identifier that detected the type of sensitive data to start excluding or including in the bucket's score. To start including all sensitive data types in the score, don't specify any values for this array. + /// An array of objects, one for each custom data identifier or managed data identifier that detected a type of sensitive data to exclude from the bucket's score. To include all sensitive data types in the score, don't specify any values for this array. public let suppressDataIdentifiers: [SuppressDataIdentifier]? @inlinable @@ -6783,7 +6784,7 @@ extension Macie2 { public let assumedRole: AssumedRole? /// If the action was performed using the credentials for another Amazon Web Services account, the details of that account. public let awsAccount: AwsAccount? - /// If the action was performed by an Amazon Web Services account that belongs to an Amazon Web Service, the name of the service. + /// If the action was performed by an Amazon Web Services account that belongs to an Amazon Web Services service, the name of the service. public let awsService: AwsService? /// If the action was performed with temporary security credentials that were obtained using the GetFederationToken operation of the Security Token Service (STS) API, the identifiers, session context, and other details about the identity. public let federatedUser: FederatedUser? diff --git a/Sources/Soto/Services/MediaConnect/MediaConnect_shapes.swift b/Sources/Soto/Services/MediaConnect/MediaConnect_shapes.swift index 4b36715377..0d9fae1eed 100644 --- a/Sources/Soto/Services/MediaConnect/MediaConnect_shapes.swift +++ b/Sources/Soto/Services/MediaConnect/MediaConnect_shapes.swift @@ -72,6 +72,12 @@ extension MediaConnect { public var description: String { return self.rawValue } } + public enum ContentQualityAnalysisState: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case disabled = "DISABLED" + case enabled = "ENABLED" + public var description: String { return self.rawValue } + } + public enum DesiredState: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case active = "ACTIVE" case deleted = "DELETED" @@ -790,6 +796,38 @@ extension MediaConnect { } } + public struct AudioMonitoringSetting: AWSEncodableShape & AWSDecodableShape { + /// Detects periods of silence. + public let silentAudio: SilentAudio? + + @inlinable + public init(silentAudio: SilentAudio? = nil) { + self.silentAudio = silentAudio + } + + private enum CodingKeys: String, CodingKey { + case silentAudio = "silentAudio" + } + } + + public struct BlackFrames: AWSEncodableShape & AWSDecodableShape { + /// Indicates whether the BlackFrames metric is enabled or disabled. + public let state: State? + /// Specifies the number of consecutive seconds of black frames that triggers an event or alert. + public let thresholdSeconds: Int? + + @inlinable + public init(state: State? = nil, thresholdSeconds: Int? = nil) { + self.state = state + self.thresholdSeconds = thresholdSeconds + } + + private enum CodingKeys: String, CodingKey { + case state = "state" + case thresholdSeconds = "thresholdSeconds" + } + } + public struct Bridge: AWSDecodableShape { /// The Amazon Resource Number (ARN) of the bridge. public let bridgeArn: String? @@ -1904,6 +1942,24 @@ extension MediaConnect { } } + public struct FrozenFrames: AWSEncodableShape & AWSDecodableShape { + /// Indicates whether the FrozenFrames metric is enabled or disabled. + public let state: State? + /// Specifies the number of consecutive seconds of a static image that triggers an event or alert. + public let thresholdSeconds: Int? + + @inlinable + public init(state: State? = nil, thresholdSeconds: Int? = nil) { + self.state = state + self.thresholdSeconds = thresholdSeconds + } + + private enum CodingKeys: String, CodingKey { + case state = "state" + case thresholdSeconds = "thresholdSeconds" + } + } + public struct Gateway: AWSDecodableShape { /// The range of IP addresses that contribute content or initiate output requests for flows communicating with this gateway. These IP addresses should be in the form of a Classless Inter-Domain Routing (CIDR) block; for example, 10.0.0.0/16. public let egressCidrBlocks: [String]? @@ -2907,16 +2963,28 @@ extension MediaConnect { } public struct MonitoringConfig: AWSEncodableShape & AWSDecodableShape { + /// Contains the settings for audio stream metrics monitoring. + public let audioMonitoringSettings: [AudioMonitoringSetting]? + /// Indicates whether content quality analysis is enabled or disabled. + public let contentQualityAnalysisState: ContentQualityAnalysisState? /// The state of thumbnail monitoring. public let thumbnailState: ThumbnailState? + /// Contains the settings for video stream metrics monitoring. + public let videoMonitoringSettings: [VideoMonitoringSetting]? @inlinable - public init(thumbnailState: ThumbnailState? = nil) { + public init(audioMonitoringSettings: [AudioMonitoringSetting]? = nil, contentQualityAnalysisState: ContentQualityAnalysisState? = nil, thumbnailState: ThumbnailState? = nil, videoMonitoringSettings: [VideoMonitoringSetting]? = nil) { + self.audioMonitoringSettings = audioMonitoringSettings + self.contentQualityAnalysisState = contentQualityAnalysisState self.thumbnailState = thumbnailState + self.videoMonitoringSettings = videoMonitoringSettings } private enum CodingKeys: String, CodingKey { + case audioMonitoringSettings = "audioMonitoringSettings" + case contentQualityAnalysisState = "contentQualityAnalysisState" case thumbnailState = "thumbnailState" + case videoMonitoringSettings = "videoMonitoringSettings" } } @@ -3556,6 +3624,24 @@ extension MediaConnect { } } + public struct SilentAudio: AWSEncodableShape & AWSDecodableShape { + /// Indicates whether the SilentAudio metric is enabled or disabled. + public let state: State? + /// Specifies the number of consecutive seconds of silence that triggers an event or alert. + public let thresholdSeconds: Int? + + @inlinable + public init(state: State? = nil, thresholdSeconds: Int? = nil) { + self.state = state + self.thresholdSeconds = thresholdSeconds + } + + private enum CodingKeys: String, CodingKey { + case state = "state" + case thresholdSeconds = "thresholdSeconds" + } + } + public struct Source: AWSDecodableShape { /// Percentage from 0-100 of the data transfer cost to be billed to the subscriber. public let dataTransferSubscriberFeePercent: Int? @@ -4799,6 +4885,24 @@ extension MediaConnect { } } + public struct VideoMonitoringSetting: AWSEncodableShape & AWSDecodableShape { + /// Detects video frames that are black. + public let blackFrames: BlackFrames? + /// Detects video frames that have not changed. + public let frozenFrames: FrozenFrames? + + @inlinable + public init(blackFrames: BlackFrames? = nil, frozenFrames: FrozenFrames? = nil) { + self.blackFrames = blackFrames + self.frozenFrames = frozenFrames + } + + private enum CodingKeys: String, CodingKey { + case blackFrames = "blackFrames" + case frozenFrames = "frozenFrames" + } + } + public struct VpcInterface: AWSDecodableShape { /// Immutable and has to be a unique against other VpcInterfaces in this Flow. public let name: String? diff --git a/Sources/Soto/Services/MediaConvert/MediaConvert_api.swift b/Sources/Soto/Services/MediaConvert/MediaConvert_api.swift index 16e841b6a5..261775dee7 100644 --- a/Sources/Soto/Services/MediaConvert/MediaConvert_api.swift +++ b/Sources/Soto/Services/MediaConvert/MediaConvert_api.swift @@ -65,7 +65,6 @@ public struct MediaConvert: AWSService { serviceProtocol: .restjson, apiVersion: "2017-08-29", endpoint: endpoint, - serviceEndpoints: Self.serviceEndpoints, variantEndpoints: Self.variantEndpoints, errorType: MediaConvertErrorType.self, middleware: middleware, @@ -76,14 +75,42 @@ public struct MediaConvert: AWSService { } - /// custom endpoints for regions - static var serviceEndpoints: [String: String] {[ - "cn-northwest-1": "mediaconvert.cn-northwest-1.amazonaws.com.cn" - ]} /// FIPS and dualstack endpoints static var variantEndpoints: [EndpointVariantType: AWSServiceConfig.EndpointVariant] {[ + [.dualstack]: .init(endpoints: [ + "af-south-1": "mediaconvert.af-south-1.api.aws", + "ap-northeast-1": "mediaconvert.ap-northeast-1.api.aws", + "ap-northeast-2": "mediaconvert.ap-northeast-2.api.aws", + "ap-northeast-3": "mediaconvert.ap-northeast-3.api.aws", + "ap-south-1": "mediaconvert.ap-south-1.api.aws", + "ap-southeast-1": "mediaconvert.ap-southeast-1.api.aws", + "ap-southeast-2": "mediaconvert.ap-southeast-2.api.aws", + "ap-southeast-4": "mediaconvert.ap-southeast-4.api.aws", + "ca-central-1": "mediaconvert.ca-central-1.api.aws", + "cn-northwest-1": "mediaconvert.cn-northwest-1.api.amazonwebservices.com.cn", + "eu-central-1": "mediaconvert.eu-central-1.api.aws", + "eu-north-1": "mediaconvert.eu-north-1.api.aws", + "eu-west-1": "mediaconvert.eu-west-1.api.aws", + "eu-west-2": "mediaconvert.eu-west-2.api.aws", + "eu-west-3": "mediaconvert.eu-west-3.api.aws", + "me-central-1": "mediaconvert.me-central-1.api.aws", + "sa-east-1": "mediaconvert.sa-east-1.api.aws", + "us-east-1": "mediaconvert.us-east-1.api.aws", + "us-east-2": "mediaconvert.us-east-2.api.aws", + "us-gov-west-1": "mediaconvert.us-gov-west-1.api.aws", + "us-west-1": "mediaconvert.us-west-1.api.aws", + "us-west-2": "mediaconvert.us-west-2.api.aws" + ]), + [.dualstack, .fips]: .init(endpoints: [ + "ca-central-1": "mediaconvert-fips.ca-central-1.api.aws", + "us-east-1": "mediaconvert-fips.us-east-1.api.aws", + "us-east-2": "mediaconvert-fips.us-east-2.api.aws", + "us-gov-west-1": "mediaconvert.us-gov-west-1.api.aws", + "us-west-1": "mediaconvert-fips.us-west-1.api.aws", + "us-west-2": "mediaconvert-fips.us-west-2.api.aws" + ]), [.fips]: .init(endpoints: [ "ca-central-1": "mediaconvert-fips.ca-central-1.amazonaws.com", "us-east-1": "mediaconvert-fips.us-east-1.amazonaws.com", diff --git a/Sources/Soto/Services/MediaConvert/MediaConvert_shapes.swift b/Sources/Soto/Services/MediaConvert/MediaConvert_shapes.swift index f8947f80e8..12a716b498 100644 --- a/Sources/Soto/Services/MediaConvert/MediaConvert_shapes.swift +++ b/Sources/Soto/Services/MediaConvert/MediaConvert_shapes.swift @@ -1435,6 +1435,12 @@ extension MediaConvert { public var description: String { return self.rawValue } } + public enum H264WriteMp4PackagingType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case avc1 = "AVC1" + case avc3 = "AVC3" + public var description: String { return self.rawValue } + } + public enum H265AdaptiveQuantization: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case auto = "AUTO" case high = "HIGH" @@ -2689,6 +2695,12 @@ extension MediaConvert { public var description: String { return self.rawValue } } + public enum RemoveRubyReserveAttributes: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case disabled = "DISABLED" + case enabled = "ENABLED" + public var description: String { return self.rawValue } + } + public enum RenewalType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case autoRenew = "AUTO_RENEW" case expire = "EXPIRE" @@ -2832,6 +2844,12 @@ extension MediaConvert { public var description: String { return self.rawValue } } + public enum TimecodeTrack: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case disabled = "DISABLED" + case enabled = "ENABLED" + public var description: String { return self.rawValue } + } + public enum TimedMetadata: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case none = "NONE" case passthrough = "PASSTHROUGH" @@ -3595,6 +3613,7 @@ extension MediaConvert { public func validate(name: String) throws { try self.audioNormalizationSettings?.validate(name: "\(name).audioNormalizationSettings") + try self.validate(self.audioSourceName, name: "audioSourceName", parent: name, max: 2048) try self.validate(self.audioType, name: "audioType", parent: name, max: 255) try self.validate(self.audioType, name: "audioType", parent: name, min: 0) try self.codecSettings?.validate(name: "\(name).codecSettings") @@ -4104,6 +4123,8 @@ extension MediaConvert { public let outlineColor: BurninSubtitleOutlineColor? /// Specify the Outline size of the caption text, in pixels. Leave Outline size blank and set Style passthrough to enabled to use the outline size data from your input captions, if present. public let outlineSize: Int? + /// Optionally remove any tts:rubyReserve attributes present in your input, that do not have a tts:ruby attribute in the same element, from your output. Use if your vertical Japanese output captions have alignment issues. To remove ruby reserve attributes when present: Choose Enabled. To not remove any ruby reserve attributes: Keep the default value, Disabled. + public let removeRubyReserveAttributes: RemoveRubyReserveAttributes? /// Specify the color of the shadow cast by the captions. Leave Shadow color blank and set Style passthrough to enabled to use the shadow color data from your input captions, if present. public let shadowColor: BurninSubtitleShadowColor? /// Specify the opacity of the shadow. Enter a value from 0 to 255, where 0 is transparent and 255 is opaque. If Style passthrough is set to Enabled, leave Shadow opacity blank to pass through the shadow style information in your input captions to your output captions. If Style passthrough is set to disabled, leave blank to use a value of 0 and remove all shadows from your output captions. @@ -4122,7 +4143,7 @@ extension MediaConvert { public let yPosition: Int? @inlinable - public init(alignment: BurninSubtitleAlignment? = nil, applyFontColor: BurninSubtitleApplyFontColor? = nil, backgroundColor: BurninSubtitleBackgroundColor? = nil, backgroundOpacity: Int? = nil, fallbackFont: BurninSubtitleFallbackFont? = nil, fontColor: BurninSubtitleFontColor? = nil, fontFileBold: String? = nil, fontFileBoldItalic: String? = nil, fontFileItalic: String? = nil, fontFileRegular: String? = nil, fontOpacity: Int? = nil, fontResolution: Int? = nil, fontScript: FontScript? = nil, fontSize: Int? = nil, hexFontColor: String? = nil, outlineColor: BurninSubtitleOutlineColor? = nil, outlineSize: Int? = nil, shadowColor: BurninSubtitleShadowColor? = nil, shadowOpacity: Int? = nil, shadowXOffset: Int? = nil, shadowYOffset: Int? = nil, stylePassthrough: BurnInSubtitleStylePassthrough? = nil, teletextSpacing: BurninSubtitleTeletextSpacing? = nil, xPosition: Int? = nil, yPosition: Int? = nil) { + public init(alignment: BurninSubtitleAlignment? = nil, applyFontColor: BurninSubtitleApplyFontColor? = nil, backgroundColor: BurninSubtitleBackgroundColor? = nil, backgroundOpacity: Int? = nil, fallbackFont: BurninSubtitleFallbackFont? = nil, fontColor: BurninSubtitleFontColor? = nil, fontFileBold: String? = nil, fontFileBoldItalic: String? = nil, fontFileItalic: String? = nil, fontFileRegular: String? = nil, fontOpacity: Int? = nil, fontResolution: Int? = nil, fontScript: FontScript? = nil, fontSize: Int? = nil, hexFontColor: String? = nil, outlineColor: BurninSubtitleOutlineColor? = nil, outlineSize: Int? = nil, removeRubyReserveAttributes: RemoveRubyReserveAttributes? = nil, shadowColor: BurninSubtitleShadowColor? = nil, shadowOpacity: Int? = nil, shadowXOffset: Int? = nil, shadowYOffset: Int? = nil, stylePassthrough: BurnInSubtitleStylePassthrough? = nil, teletextSpacing: BurninSubtitleTeletextSpacing? = nil, xPosition: Int? = nil, yPosition: Int? = nil) { self.alignment = alignment self.applyFontColor = applyFontColor self.backgroundColor = backgroundColor @@ -4140,6 +4161,7 @@ extension MediaConvert { self.hexFontColor = hexFontColor self.outlineColor = outlineColor self.outlineSize = outlineSize + self.removeRubyReserveAttributes = removeRubyReserveAttributes self.shadowColor = shadowColor self.shadowOpacity = shadowOpacity self.shadowXOffset = shadowXOffset @@ -4197,6 +4219,7 @@ extension MediaConvert { case hexFontColor = "hexFontColor" case outlineColor = "outlineColor" case outlineSize = "outlineSize" + case removeRubyReserveAttributes = "removeRubyReserveAttributes" case shadowColor = "shadowColor" case shadowOpacity = "shadowOpacity" case shadowXOffset = "shadowXOffset" @@ -6794,9 +6817,11 @@ extension MediaConvert { public let temporalAdaptiveQuantization: H264TemporalAdaptiveQuantization? /// Inserts timecode for each frame as 4 bytes of an unregistered SEI message. public let unregisteredSeiTimecode: H264UnregisteredSeiTimecode? + /// Specify how SPS and PPS NAL units are written in your output MP4 container, according to ISO/IEC 14496-15. If the location of these parameters doesn't matter in your workflow: Keep the default value, AVC1. MediaConvert writes SPS and PPS NAL units in the sample description ('stsd') box (but not into samples directly). To write SPS and PPS NAL units directly into samples (but not in the 'stsd' box): Choose AVC3. When you do, note that your output might not play properly with some downstream systems or players. + public let writeMp4PackagingType: H264WriteMp4PackagingType? @inlinable - public init(adaptiveQuantization: H264AdaptiveQuantization? = nil, bandwidthReductionFilter: BandwidthReductionFilter? = nil, bitrate: Int? = nil, codecLevel: H264CodecLevel? = nil, codecProfile: H264CodecProfile? = nil, dynamicSubGop: H264DynamicSubGop? = nil, endOfStreamMarkers: H264EndOfStreamMarkers? = nil, entropyEncoding: H264EntropyEncoding? = nil, fieldEncoding: H264FieldEncoding? = nil, flickerAdaptiveQuantization: H264FlickerAdaptiveQuantization? = nil, framerateControl: H264FramerateControl? = nil, framerateConversionAlgorithm: H264FramerateConversionAlgorithm? = nil, framerateDenominator: Int? = nil, framerateNumerator: Int? = nil, gopBReference: H264GopBReference? = nil, gopClosedCadence: Int? = nil, gopSize: Double? = nil, gopSizeUnits: H264GopSizeUnits? = nil, hrdBufferFinalFillPercentage: Int? = nil, hrdBufferInitialFillPercentage: Int? = nil, hrdBufferSize: Int? = nil, interlaceMode: H264InterlaceMode? = nil, maxBitrate: Int? = nil, minIInterval: Int? = nil, numberBFramesBetweenReferenceFrames: Int? = nil, numberReferenceFrames: Int? = nil, parControl: H264ParControl? = nil, parDenominator: Int? = nil, parNumerator: Int? = nil, qualityTuningLevel: H264QualityTuningLevel? = nil, qvbrSettings: H264QvbrSettings? = nil, rateControlMode: H264RateControlMode? = nil, repeatPps: H264RepeatPps? = nil, saliencyAwareEncoding: H264SaliencyAwareEncoding? = nil, scanTypeConversionMode: H264ScanTypeConversionMode? = nil, sceneChangeDetect: H264SceneChangeDetect? = nil, slices: Int? = nil, slowPal: H264SlowPal? = nil, softness: Int? = nil, spatialAdaptiveQuantization: H264SpatialAdaptiveQuantization? = nil, syntax: H264Syntax? = nil, telecine: H264Telecine? = nil, temporalAdaptiveQuantization: H264TemporalAdaptiveQuantization? = nil, unregisteredSeiTimecode: H264UnregisteredSeiTimecode? = nil) { + public init(adaptiveQuantization: H264AdaptiveQuantization? = nil, bandwidthReductionFilter: BandwidthReductionFilter? = nil, bitrate: Int? = nil, codecLevel: H264CodecLevel? = nil, codecProfile: H264CodecProfile? = nil, dynamicSubGop: H264DynamicSubGop? = nil, endOfStreamMarkers: H264EndOfStreamMarkers? = nil, entropyEncoding: H264EntropyEncoding? = nil, fieldEncoding: H264FieldEncoding? = nil, flickerAdaptiveQuantization: H264FlickerAdaptiveQuantization? = nil, framerateControl: H264FramerateControl? = nil, framerateConversionAlgorithm: H264FramerateConversionAlgorithm? = nil, framerateDenominator: Int? = nil, framerateNumerator: Int? = nil, gopBReference: H264GopBReference? = nil, gopClosedCadence: Int? = nil, gopSize: Double? = nil, gopSizeUnits: H264GopSizeUnits? = nil, hrdBufferFinalFillPercentage: Int? = nil, hrdBufferInitialFillPercentage: Int? = nil, hrdBufferSize: Int? = nil, interlaceMode: H264InterlaceMode? = nil, maxBitrate: Int? = nil, minIInterval: Int? = nil, numberBFramesBetweenReferenceFrames: Int? = nil, numberReferenceFrames: Int? = nil, parControl: H264ParControl? = nil, parDenominator: Int? = nil, parNumerator: Int? = nil, qualityTuningLevel: H264QualityTuningLevel? = nil, qvbrSettings: H264QvbrSettings? = nil, rateControlMode: H264RateControlMode? = nil, repeatPps: H264RepeatPps? = nil, saliencyAwareEncoding: H264SaliencyAwareEncoding? = nil, scanTypeConversionMode: H264ScanTypeConversionMode? = nil, sceneChangeDetect: H264SceneChangeDetect? = nil, slices: Int? = nil, slowPal: H264SlowPal? = nil, softness: Int? = nil, spatialAdaptiveQuantization: H264SpatialAdaptiveQuantization? = nil, syntax: H264Syntax? = nil, telecine: H264Telecine? = nil, temporalAdaptiveQuantization: H264TemporalAdaptiveQuantization? = nil, unregisteredSeiTimecode: H264UnregisteredSeiTimecode? = nil, writeMp4PackagingType: H264WriteMp4PackagingType? = nil) { self.adaptiveQuantization = adaptiveQuantization self.bandwidthReductionFilter = bandwidthReductionFilter self.bitrate = bitrate @@ -6841,6 +6866,7 @@ extension MediaConvert { self.telecine = telecine self.temporalAdaptiveQuantization = temporalAdaptiveQuantization self.unregisteredSeiTimecode = unregisteredSeiTimecode + self.writeMp4PackagingType = writeMp4PackagingType } public func validate(name: String) throws { @@ -6922,6 +6948,7 @@ extension MediaConvert { case telecine = "telecine" case temporalAdaptiveQuantization = "temporalAdaptiveQuantization" case unregisteredSeiTimecode = "unregisteredSeiTimecode" + case writeMp4PackagingType = "writeMp4PackagingType" } } @@ -7842,6 +7869,7 @@ extension MediaConvert { try self.decryptionSettings?.validate(name: "\(name).decryptionSettings") try self.validate(self.dolbyVisionMetadataXml, name: "dolbyVisionMetadataXml", parent: name, min: 14) try self.validate(self.dolbyVisionMetadataXml, name: "dolbyVisionMetadataXml", parent: name, pattern: "^((s3://(.*?)\\.(xml|XML))|(https?://(.*?)\\.(xml|XML)(\\?([^&=]+=[^&]+&)*[^&=]+=[^&]+)?))$") + try self.validate(self.fileInput, name: "fileInput", parent: name, max: 2048) try self.validate(self.fileInput, name: "fileInput", parent: name, pattern: "^s3://([^\\/]+\\/+)+((([^\\/]*)))|^https?://[^\\/].*[^&]$") try self.validate(self.filterStrength, name: "filterStrength", parent: name, max: 5) try self.validate(self.filterStrength, name: "filterStrength", parent: name, min: 0) @@ -8666,7 +8694,7 @@ extension MediaConvert { try self.validate(self.contentReference, name: "contentReference", parent: name, pattern: "^[a-zA-Z0-9_\\/_+=.@-]*$") try self.validate(self.credentialsSecretName, name: "credentialsSecretName", parent: name, max: 2048) try self.validate(self.credentialsSecretName, name: "credentialsSecretName", parent: name, min: 1) - try self.validate(self.credentialsSecretName, name: "credentialsSecretName", parent: name, pattern: "^(arn:(aws|aws-us-gov|aws-cn):secretsmanager:(us(-gov)?|ap|ca|cn|eu|sa)-(central|(north|south)?(east|west)?)-\\d:\\d{12}:secret:)?[a-zA-Z0-9_\\/_+=.@-]*$") + try self.validate(self.credentialsSecretName, name: "credentialsSecretName", parent: name, pattern: "^(arn:[a-z-]+:secretsmanager:[\\w-]+:\\d{12}:secret:)?[a-zA-Z0-9_\\/_+=.@-]*$") try self.validate(self.kantarLicenseId, name: "kantarLicenseId", parent: name, max: 2147483647) try self.validate(self.kantarLicenseId, name: "kantarLicenseId", parent: name, min: 0) try self.validate(self.kantarServerUrl, name: "kantarServerUrl", parent: name, pattern: "^https:\\/\\/.*.kantarmedia.*$") @@ -10362,6 +10390,7 @@ extension MediaConvert { try $0.validate(name: "\(name).captionDescriptions[]") } try self.containerSettings?.validate(name: "\(name).containerSettings") + try self.validate(self.`extension`, name: "`extension`", parent: name, max: 256) try self.validate(self.nameModifier, name: "nameModifier", parent: name, max: 256) try self.validate(self.nameModifier, name: "nameModifier", parent: name, min: 1) try self.videoDescription?.validate(name: "\(name).videoDescription") @@ -10445,6 +10474,7 @@ extension MediaConvert { public func validate(name: String) throws { try self.automatedEncodingSettings?.validate(name: "\(name).automatedEncodingSettings") + try self.validate(self.name, name: "name", parent: name, max: 2048) try self.outputGroupSettings?.validate(name: "\(name).outputGroupSettings") try self.outputs?.forEach { try $0.validate(name: "\(name).outputs[]") @@ -11909,7 +11939,7 @@ extension MediaConvert { public let colorMetadata: ColorMetadata? /// Use Cropping selection to specify the video area that the service will include in the output video frame. public let crop: Rectangle? - /// Applies only to 29.97 fps outputs. When this feature is enabled, the service will use drop-frame timecode on outputs. If it is not possible to use drop-frame timecode, the system will fall back to non-drop-frame. This setting is enabled by default when Timecode insertion is enabled. + /// Applies only to 29.97 fps outputs. When this feature is enabled, the service will use drop-frame timecode on outputs. If it is not possible to use drop-frame timecode, the system will fall back to non-drop-frame. This setting is enabled by default when Timecode insertion or Timecode track is enabled. public let dropFrameTimecode: DropFrameTimecode? /// Applies only if you set AFD Signaling to Fixed. Use Fixed to specify a four-bit AFD value which the service will write on all frames of this video output. public let fixedAfd: Int? @@ -11925,13 +11955,15 @@ extension MediaConvert { public let sharpness: Int? /// Applies only to H.264, H.265, MPEG2, and ProRes outputs. Only enable Timecode insertion when the input frame rate is identical to the output frame rate. To include timecodes in this output, set Timecode insertion to PIC_TIMING_SEI. To leave them out, set it to DISABLED. Default is DISABLED. When the service inserts timecodes in an output, by default, it uses any embedded timecodes from the input. If none are present, the service will set the timecode for the first output frame to zero. To change this default behavior, adjust the settings under Timecode configuration. In the console, these settings are located under Job > Job settings > Timecode configuration. Note - Timecode source under input settings does not affect the timecodes that are inserted in the output. Source under Job settings > Timecode configuration does. public let timecodeInsertion: VideoTimecodeInsertion? + /// To include a timecode track in your MP4 output: Choose Enabled. MediaConvert writes the timecode track in the Null Media Header box (NMHD), without any timecode text formatting information. You can also specify dropframe or non-dropframe timecode under the Drop Frame Timecode setting. To not include a timecode track: Keep the default value, Disabled. + public let timecodeTrack: TimecodeTrack? /// Find additional transcoding features under Preprocessors. Enable the features at each output individually. These features are disabled by default. public let videoPreprocessors: VideoPreprocessor? /// Use Width to define the video resolution width, in pixels, for this output. To use the same resolution as your input: Leave both Width and Height blank. To evenly scale from your input resolution: Leave Width blank and enter a value for Height. For example, if your input is 1920x1080 and you set Height to 720, your output will be 1280x720. public let width: Int? @inlinable - public init(afdSignaling: AfdSignaling? = nil, antiAlias: AntiAlias? = nil, codecSettings: VideoCodecSettings? = nil, colorMetadata: ColorMetadata? = nil, crop: Rectangle? = nil, dropFrameTimecode: DropFrameTimecode? = nil, fixedAfd: Int? = nil, height: Int? = nil, position: Rectangle? = nil, respondToAfd: RespondToAfd? = nil, scalingBehavior: ScalingBehavior? = nil, sharpness: Int? = nil, timecodeInsertion: VideoTimecodeInsertion? = nil, videoPreprocessors: VideoPreprocessor? = nil, width: Int? = nil) { + public init(afdSignaling: AfdSignaling? = nil, antiAlias: AntiAlias? = nil, codecSettings: VideoCodecSettings? = nil, colorMetadata: ColorMetadata? = nil, crop: Rectangle? = nil, dropFrameTimecode: DropFrameTimecode? = nil, fixedAfd: Int? = nil, height: Int? = nil, position: Rectangle? = nil, respondToAfd: RespondToAfd? = nil, scalingBehavior: ScalingBehavior? = nil, sharpness: Int? = nil, timecodeInsertion: VideoTimecodeInsertion? = nil, timecodeTrack: TimecodeTrack? = nil, videoPreprocessors: VideoPreprocessor? = nil, width: Int? = nil) { self.afdSignaling = afdSignaling self.antiAlias = antiAlias self.codecSettings = codecSettings @@ -11945,6 +11977,7 @@ extension MediaConvert { self.scalingBehavior = scalingBehavior self.sharpness = sharpness self.timecodeInsertion = timecodeInsertion + self.timecodeTrack = timecodeTrack self.videoPreprocessors = videoPreprocessors self.width = width } @@ -11978,6 +12011,7 @@ extension MediaConvert { case scalingBehavior = "scalingBehavior" case sharpness = "sharpness" case timecodeInsertion = "timecodeInsertion" + case timecodeTrack = "timecodeTrack" case videoPreprocessors = "videoPreprocessors" case width = "width" } diff --git a/Sources/Soto/Services/MediaLive/MediaLive_api.swift b/Sources/Soto/Services/MediaLive/MediaLive_api.swift index 0785de2915..a28f25c80d 100644 --- a/Sources/Soto/Services/MediaLive/MediaLive_api.swift +++ b/Sources/Soto/Services/MediaLive/MediaLive_api.swift @@ -331,7 +331,9 @@ public struct MediaLive: AWSService { /// - anywhereSettings: The Elemental Anywhere settings for this channel. /// - cdiInputSpecification: Specification of CDI inputs for this channel /// - channelClass: The class for this channel. STANDARD for a channel with two pipelines or SINGLE_PIPELINE for a channel with one pipeline. + /// - channelEngineVersion: The desired engine version for this channel. /// - destinations: + /// - dryRun: /// - encoderSettings: /// - inputAttachments: List of input attachments for channel. /// - inputSpecification: Specification of network and file inputs for this channel @@ -348,7 +350,9 @@ public struct MediaLive: AWSService { anywhereSettings: AnywhereSettings? = nil, cdiInputSpecification: CdiInputSpecification? = nil, channelClass: ChannelClass? = nil, + channelEngineVersion: ChannelEngineVersionRequest? = nil, destinations: [OutputDestination]? = nil, + dryRun: Bool? = nil, encoderSettings: EncoderSettings? = nil, inputAttachments: [InputAttachment]? = nil, inputSpecification: InputSpecification? = nil, @@ -365,7 +369,9 @@ public struct MediaLive: AWSService { anywhereSettings: anywhereSettings, cdiInputSpecification: cdiInputSpecification, channelClass: channelClass, + channelEngineVersion: channelEngineVersion, destinations: destinations, + dryRun: dryRun, encoderSettings: encoderSettings, inputAttachments: inputAttachments, inputSpecification: inputSpecification, @@ -2899,6 +2905,32 @@ public struct MediaLive: AWSService { return try await self.listTagsForResource(input, logger: logger) } + /// Retrieves an array of all the encoder engine versions that are available in this AWS account. + @Sendable + @inlinable + public func listVersions(_ input: ListVersionsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListVersionsResponse { + try await self.client.execute( + operation: "ListVersions", + path: "/prod/versions", + httpMethod: .GET, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Retrieves an array of all the encoder engine versions that are available in this AWS account. + /// + /// Parameters: + /// - logger: Logger use during operation + @inlinable + public func listVersions( + logger: Logger = AWSClient.loggingDisabled + ) async throws -> ListVersionsResponse { + let input = ListVersionsRequest( + ) + return try await self.listVersions(input, logger: logger) + } + /// Purchase an offering and create a reservation. @Sendable @inlinable @@ -3434,8 +3466,10 @@ public struct MediaLive: AWSService { /// /// Parameters: /// - cdiInputSpecification: Specification of CDI inputs for this channel + /// - channelEngineVersion: Channel engine version for this channel /// - channelId: channel ID /// - destinations: A list of output destinations for this channel. + /// - dryRun: /// - encoderSettings: The encoder settings for this channel. /// - inputAttachments: /// - inputSpecification: Specification of network and file inputs for this channel @@ -3447,8 +3481,10 @@ public struct MediaLive: AWSService { @inlinable public func updateChannel( cdiInputSpecification: CdiInputSpecification? = nil, + channelEngineVersion: ChannelEngineVersionRequest? = nil, channelId: String, destinations: [OutputDestination]? = nil, + dryRun: Bool? = nil, encoderSettings: EncoderSettings? = nil, inputAttachments: [InputAttachment]? = nil, inputSpecification: InputSpecification? = nil, @@ -3460,8 +3496,10 @@ public struct MediaLive: AWSService { ) async throws -> UpdateChannelResponse { let input = UpdateChannelRequest( cdiInputSpecification: cdiInputSpecification, + channelEngineVersion: channelEngineVersion, channelId: channelId, destinations: destinations, + dryRun: dryRun, encoderSettings: encoderSettings, inputAttachments: inputAttachments, inputSpecification: inputSpecification, diff --git a/Sources/Soto/Services/MediaLive/MediaLive_shapes.swift b/Sources/Soto/Services/MediaLive/MediaLive_shapes.swift index e7399b108b..52bc2cc23f 100644 --- a/Sources/Soto/Services/MediaLive/MediaLive_shapes.swift +++ b/Sources/Soto/Services/MediaLive/MediaLive_shapes.swift @@ -418,6 +418,12 @@ extension MediaLive { public var description: String { return self.rawValue } } + public enum CmafId3Behavior: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case disabled = "DISABLED" + case enabled = "ENABLED" + public var description: String { return self.rawValue } + } + public enum CmafIngestSegmentLengthUnits: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case milliseconds = "MILLISECONDS" case seconds = "SECONDS" @@ -3922,6 +3928,8 @@ extension MediaLive { public let cdiInputSpecification: CdiInputSpecification? /// The class for this channel. STANDARD for a channel with two pipelines or SINGLE_PIPELINE for a channel with one pipeline. public let channelClass: ChannelClass? + /// Requested engine version for this channel. + public let channelEngineVersion: ChannelEngineVersionResponse? /// A list of destinations of the channel. For UDP outputs, there is one /// destination per output. For other types (HLS, for example), there is /// one destination per packager. @@ -3954,11 +3962,12 @@ extension MediaLive { public let vpc: VpcOutputSettingsDescription? @inlinable - public init(anywhereSettings: DescribeAnywhereSettings? = nil, arn: String? = nil, cdiInputSpecification: CdiInputSpecification? = nil, channelClass: ChannelClass? = nil, destinations: [OutputDestination]? = nil, egressEndpoints: [ChannelEgressEndpoint]? = nil, encoderSettings: EncoderSettings? = nil, id: String? = nil, inputAttachments: [InputAttachment]? = nil, inputSpecification: InputSpecification? = nil, logLevel: LogLevel? = nil, maintenance: MaintenanceStatus? = nil, name: String? = nil, pipelineDetails: [PipelineDetail]? = nil, pipelinesRunningCount: Int? = nil, roleArn: String? = nil, state: ChannelState? = nil, tags: [String: String]? = nil, vpc: VpcOutputSettingsDescription? = nil) { + public init(anywhereSettings: DescribeAnywhereSettings? = nil, arn: String? = nil, cdiInputSpecification: CdiInputSpecification? = nil, channelClass: ChannelClass? = nil, channelEngineVersion: ChannelEngineVersionResponse? = nil, destinations: [OutputDestination]? = nil, egressEndpoints: [ChannelEgressEndpoint]? = nil, encoderSettings: EncoderSettings? = nil, id: String? = nil, inputAttachments: [InputAttachment]? = nil, inputSpecification: InputSpecification? = nil, logLevel: LogLevel? = nil, maintenance: MaintenanceStatus? = nil, name: String? = nil, pipelineDetails: [PipelineDetail]? = nil, pipelinesRunningCount: Int? = nil, roleArn: String? = nil, state: ChannelState? = nil, tags: [String: String]? = nil, vpc: VpcOutputSettingsDescription? = nil) { self.anywhereSettings = anywhereSettings self.arn = arn self.cdiInputSpecification = cdiInputSpecification self.channelClass = channelClass + self.channelEngineVersion = channelEngineVersion self.destinations = destinations self.egressEndpoints = egressEndpoints self.encoderSettings = encoderSettings @@ -3981,6 +3990,7 @@ extension MediaLive { case arn = "arn" case cdiInputSpecification = "cdiInputSpecification" case channelClass = "channelClass" + case channelEngineVersion = "channelEngineVersion" case destinations = "destinations" case egressEndpoints = "egressEndpoints" case encoderSettings = "encoderSettings" @@ -4013,6 +4023,39 @@ extension MediaLive { } } + public struct ChannelEngineVersionRequest: AWSEncodableShape { + /// The build identifier of the engine version to use for this channel. Specify 'DEFAULT' to reset to the default version. + public let version: String? + + @inlinable + public init(version: String? = nil) { + self.version = version + } + + private enum CodingKeys: String, CodingKey { + case version = "version" + } + } + + public struct ChannelEngineVersionResponse: AWSDecodableShape { + /// The UTC time when the version expires. + @OptionalCustomCoding + public var expirationDate: Date? + /// The build identifier for this version of the channel version. + public let version: String? + + @inlinable + public init(expirationDate: Date? = nil, version: String? = nil) { + self.expirationDate = expirationDate + self.version = version + } + + private enum CodingKeys: String, CodingKey { + case expirationDate = "expirationDate" + case version = "version" + } + } + public struct ChannelSummary: AWSDecodableShape { /// AnywhereSettings settings for this channel. public let anywhereSettings: DescribeAnywhereSettings? @@ -4022,6 +4065,8 @@ extension MediaLive { public let cdiInputSpecification: CdiInputSpecification? /// The class for this channel. STANDARD for a channel with two pipelines or SINGLE_PIPELINE for a channel with one pipeline. public let channelClass: ChannelClass? + /// The engine version that you requested for this channel. + public let channelEngineVersion: ChannelEngineVersionResponse? /// A list of destinations of the channel. For UDP outputs, there is one /// destination per output. For other types (HLS, for example), there is /// one destination per packager. @@ -4047,15 +4092,18 @@ extension MediaLive { public let state: ChannelState? /// A collection of key-value pairs. public let tags: [String: String]? + /// The engine version that the running pipelines are using. + public let usedChannelEngineVersions: [ChannelEngineVersionResponse]? /// Settings for any VPC outputs. public let vpc: VpcOutputSettingsDescription? @inlinable - public init(anywhereSettings: DescribeAnywhereSettings? = nil, arn: String? = nil, cdiInputSpecification: CdiInputSpecification? = nil, channelClass: ChannelClass? = nil, destinations: [OutputDestination]? = nil, egressEndpoints: [ChannelEgressEndpoint]? = nil, id: String? = nil, inputAttachments: [InputAttachment]? = nil, inputSpecification: InputSpecification? = nil, logLevel: LogLevel? = nil, maintenance: MaintenanceStatus? = nil, name: String? = nil, pipelinesRunningCount: Int? = nil, roleArn: String? = nil, state: ChannelState? = nil, tags: [String: String]? = nil, vpc: VpcOutputSettingsDescription? = nil) { + public init(anywhereSettings: DescribeAnywhereSettings? = nil, arn: String? = nil, cdiInputSpecification: CdiInputSpecification? = nil, channelClass: ChannelClass? = nil, channelEngineVersion: ChannelEngineVersionResponse? = nil, destinations: [OutputDestination]? = nil, egressEndpoints: [ChannelEgressEndpoint]? = nil, id: String? = nil, inputAttachments: [InputAttachment]? = nil, inputSpecification: InputSpecification? = nil, logLevel: LogLevel? = nil, maintenance: MaintenanceStatus? = nil, name: String? = nil, pipelinesRunningCount: Int? = nil, roleArn: String? = nil, state: ChannelState? = nil, tags: [String: String]? = nil, usedChannelEngineVersions: [ChannelEngineVersionResponse]? = nil, vpc: VpcOutputSettingsDescription? = nil) { self.anywhereSettings = anywhereSettings self.arn = arn self.cdiInputSpecification = cdiInputSpecification self.channelClass = channelClass + self.channelEngineVersion = channelEngineVersion self.destinations = destinations self.egressEndpoints = egressEndpoints self.id = id @@ -4068,6 +4116,7 @@ extension MediaLive { self.roleArn = roleArn self.state = state self.tags = tags + self.usedChannelEngineVersions = usedChannelEngineVersions self.vpc = vpc } @@ -4076,6 +4125,7 @@ extension MediaLive { case arn = "arn" case cdiInputSpecification = "cdiInputSpecification" case channelClass = "channelClass" + case channelEngineVersion = "channelEngineVersion" case destinations = "destinations" case egressEndpoints = "egressEndpoints" case id = "id" @@ -4088,6 +4138,7 @@ extension MediaLive { case roleArn = "roleArn" case state = "state" case tags = "tags" + case usedChannelEngineVersions = "usedChannelEngineVersions" case vpc = "vpc" } } @@ -4281,6 +4332,10 @@ extension MediaLive { public struct CmafIngestGroupSettings: AWSEncodableShape & AWSDecodableShape { /// A HTTP destination for the tracks public let destination: OutputLocationRef? + /// Set to ENABLED to enable ID3 metadata insertion. To include metadata, you configure other parameters in the output group, or you add an ID3 action to the channel schedule. + public let id3Behavior: CmafId3Behavior? + /// Change the modifier that MediaLive automatically adds to the Streams() name that identifies an ID3 track. The default is "id3", which means the default name will be Streams(id3.cmfm). Any string you enter here will replace the "id3" string.\nThe modifier can only contain: numbers, letters, plus (+), minus (-), underscore (_) and period (.) and has a maximum length of 100 characters. + public let id3NameModifier: String? /// If set to passthrough, passes any KLV data from the input source to this output. public let klvBehavior: CmafKLVBehavior? /// Change the modifier that MediaLive automatically adds to the Streams() name that identifies a KLV track. The default is "klv", which means the default name will be Streams(klv.cmfm). Any string you enter here will replace the "klv" string.\nThe modifier can only contain: numbers, letters, plus (+), minus (-), underscore (_) and period (.) and has a maximum length of 100 characters. @@ -4301,8 +4356,10 @@ extension MediaLive { public let sendDelayMs: Int? @inlinable - public init(destination: OutputLocationRef? = nil, klvBehavior: CmafKLVBehavior? = nil, klvNameModifier: String? = nil, nielsenId3Behavior: CmafNielsenId3Behavior? = nil, nielsenId3NameModifier: String? = nil, scte35NameModifier: String? = nil, scte35Type: Scte35Type? = nil, segmentLength: Int? = nil, segmentLengthUnits: CmafIngestSegmentLengthUnits? = nil, sendDelayMs: Int? = nil) { + public init(destination: OutputLocationRef? = nil, id3Behavior: CmafId3Behavior? = nil, id3NameModifier: String? = nil, klvBehavior: CmafKLVBehavior? = nil, klvNameModifier: String? = nil, nielsenId3Behavior: CmafNielsenId3Behavior? = nil, nielsenId3NameModifier: String? = nil, scte35NameModifier: String? = nil, scte35Type: Scte35Type? = nil, segmentLength: Int? = nil, segmentLengthUnits: CmafIngestSegmentLengthUnits? = nil, sendDelayMs: Int? = nil) { self.destination = destination + self.id3Behavior = id3Behavior + self.id3NameModifier = id3NameModifier self.klvBehavior = klvBehavior self.klvNameModifier = klvNameModifier self.nielsenId3Behavior = nielsenId3Behavior @@ -4315,6 +4372,7 @@ extension MediaLive { } public func validate(name: String) throws { + try self.validate(self.id3NameModifier, name: "id3NameModifier", parent: name, max: 100) try self.validate(self.klvNameModifier, name: "klvNameModifier", parent: name, max: 100) try self.validate(self.nielsenId3NameModifier, name: "nielsenId3NameModifier", parent: name, max: 100) try self.validate(self.scte35NameModifier, name: "scte35NameModifier", parent: name, max: 100) @@ -4325,6 +4383,8 @@ extension MediaLive { private enum CodingKeys: String, CodingKey { case destination = "destination" + case id3Behavior = "id3Behavior" + case id3NameModifier = "id3NameModifier" case klvBehavior = "klvBehavior" case klvNameModifier = "klvNameModifier" case nielsenId3Behavior = "nielsenId3Behavior" @@ -4475,7 +4535,10 @@ extension MediaLive { public let cdiInputSpecification: CdiInputSpecification? /// The class for this channel. STANDARD for a channel with two pipelines or SINGLE_PIPELINE for a channel with one pipeline. public let channelClass: ChannelClass? + /// The desired engine version for this channel. + public let channelEngineVersion: ChannelEngineVersionRequest? public let destinations: [OutputDestination]? + public let dryRun: Bool? public let encoderSettings: EncoderSettings? /// List of input attachments for channel. public let inputAttachments: [InputAttachment]? @@ -4500,11 +4563,13 @@ extension MediaLive { public let vpc: VpcOutputSettings? @inlinable - public init(anywhereSettings: AnywhereSettings? = nil, cdiInputSpecification: CdiInputSpecification? = nil, channelClass: ChannelClass? = nil, destinations: [OutputDestination]? = nil, encoderSettings: EncoderSettings? = nil, inputAttachments: [InputAttachment]? = nil, inputSpecification: InputSpecification? = nil, logLevel: LogLevel? = nil, maintenance: MaintenanceCreateSettings? = nil, name: String? = nil, requestId: String? = CreateChannelRequest.idempotencyToken(), roleArn: String? = nil, tags: [String: String]? = nil, vpc: VpcOutputSettings? = nil) { + public init(anywhereSettings: AnywhereSettings? = nil, cdiInputSpecification: CdiInputSpecification? = nil, channelClass: ChannelClass? = nil, channelEngineVersion: ChannelEngineVersionRequest? = nil, destinations: [OutputDestination]? = nil, dryRun: Bool? = nil, encoderSettings: EncoderSettings? = nil, inputAttachments: [InputAttachment]? = nil, inputSpecification: InputSpecification? = nil, logLevel: LogLevel? = nil, maintenance: MaintenanceCreateSettings? = nil, name: String? = nil, requestId: String? = CreateChannelRequest.idempotencyToken(), roleArn: String? = nil, tags: [String: String]? = nil, vpc: VpcOutputSettings? = nil) { self.anywhereSettings = anywhereSettings self.cdiInputSpecification = cdiInputSpecification self.channelClass = channelClass + self.channelEngineVersion = channelEngineVersion self.destinations = destinations + self.dryRun = dryRun self.encoderSettings = encoderSettings self.inputAttachments = inputAttachments self.inputSpecification = inputSpecification @@ -4520,11 +4585,13 @@ extension MediaLive { @available(*, deprecated, message: "Members reserved have been deprecated") @inlinable - public init(anywhereSettings: AnywhereSettings? = nil, cdiInputSpecification: CdiInputSpecification? = nil, channelClass: ChannelClass? = nil, destinations: [OutputDestination]? = nil, encoderSettings: EncoderSettings? = nil, inputAttachments: [InputAttachment]? = nil, inputSpecification: InputSpecification? = nil, logLevel: LogLevel? = nil, maintenance: MaintenanceCreateSettings? = nil, name: String? = nil, requestId: String? = CreateChannelRequest.idempotencyToken(), reserved: String? = nil, roleArn: String? = nil, tags: [String: String]? = nil, vpc: VpcOutputSettings? = nil) { + public init(anywhereSettings: AnywhereSettings? = nil, cdiInputSpecification: CdiInputSpecification? = nil, channelClass: ChannelClass? = nil, channelEngineVersion: ChannelEngineVersionRequest? = nil, destinations: [OutputDestination]? = nil, dryRun: Bool? = nil, encoderSettings: EncoderSettings? = nil, inputAttachments: [InputAttachment]? = nil, inputSpecification: InputSpecification? = nil, logLevel: LogLevel? = nil, maintenance: MaintenanceCreateSettings? = nil, name: String? = nil, requestId: String? = CreateChannelRequest.idempotencyToken(), reserved: String? = nil, roleArn: String? = nil, tags: [String: String]? = nil, vpc: VpcOutputSettings? = nil) { self.anywhereSettings = anywhereSettings self.cdiInputSpecification = cdiInputSpecification self.channelClass = channelClass + self.channelEngineVersion = channelEngineVersion self.destinations = destinations + self.dryRun = dryRun self.encoderSettings = encoderSettings self.inputAttachments = inputAttachments self.inputSpecification = inputSpecification @@ -4553,7 +4620,9 @@ extension MediaLive { case anywhereSettings = "anywhereSettings" case cdiInputSpecification = "cdiInputSpecification" case channelClass = "channelClass" + case channelEngineVersion = "channelEngineVersion" case destinations = "destinations" + case dryRun = "dryRun" case encoderSettings = "encoderSettings" case inputAttachments = "inputAttachments" case inputSpecification = "inputSpecification" @@ -5725,6 +5794,8 @@ extension MediaLive { public let cdiInputSpecification: CdiInputSpecification? /// The class for this channel. STANDARD for a channel with two pipelines or SINGLE_PIPELINE for a channel with one pipeline. public let channelClass: ChannelClass? + /// Requested engine version for this channel. + public let channelEngineVersion: ChannelEngineVersionResponse? /// A list of destinations of the channel. For UDP outputs, there is one /// destination per output. For other types (HLS, for example), there is /// one destination per packager. @@ -5757,11 +5828,12 @@ extension MediaLive { public let vpc: VpcOutputSettingsDescription? @inlinable - public init(anywhereSettings: DescribeAnywhereSettings? = nil, arn: String? = nil, cdiInputSpecification: CdiInputSpecification? = nil, channelClass: ChannelClass? = nil, destinations: [OutputDestination]? = nil, egressEndpoints: [ChannelEgressEndpoint]? = nil, encoderSettings: EncoderSettings? = nil, id: String? = nil, inputAttachments: [InputAttachment]? = nil, inputSpecification: InputSpecification? = nil, logLevel: LogLevel? = nil, maintenance: MaintenanceStatus? = nil, name: String? = nil, pipelineDetails: [PipelineDetail]? = nil, pipelinesRunningCount: Int? = nil, roleArn: String? = nil, state: ChannelState? = nil, tags: [String: String]? = nil, vpc: VpcOutputSettingsDescription? = nil) { + public init(anywhereSettings: DescribeAnywhereSettings? = nil, arn: String? = nil, cdiInputSpecification: CdiInputSpecification? = nil, channelClass: ChannelClass? = nil, channelEngineVersion: ChannelEngineVersionResponse? = nil, destinations: [OutputDestination]? = nil, egressEndpoints: [ChannelEgressEndpoint]? = nil, encoderSettings: EncoderSettings? = nil, id: String? = nil, inputAttachments: [InputAttachment]? = nil, inputSpecification: InputSpecification? = nil, logLevel: LogLevel? = nil, maintenance: MaintenanceStatus? = nil, name: String? = nil, pipelineDetails: [PipelineDetail]? = nil, pipelinesRunningCount: Int? = nil, roleArn: String? = nil, state: ChannelState? = nil, tags: [String: String]? = nil, vpc: VpcOutputSettingsDescription? = nil) { self.anywhereSettings = anywhereSettings self.arn = arn self.cdiInputSpecification = cdiInputSpecification self.channelClass = channelClass + self.channelEngineVersion = channelEngineVersion self.destinations = destinations self.egressEndpoints = egressEndpoints self.encoderSettings = encoderSettings @@ -5784,6 +5856,7 @@ extension MediaLive { case arn = "arn" case cdiInputSpecification = "cdiInputSpecification" case channelClass = "channelClass" + case channelEngineVersion = "channelEngineVersion" case destinations = "destinations" case egressEndpoints = "egressEndpoints" case encoderSettings = "encoderSettings" @@ -6549,6 +6622,8 @@ extension MediaLive { public let cdiInputSpecification: CdiInputSpecification? /// The class for this channel. STANDARD for a channel with two pipelines or SINGLE_PIPELINE for a channel with one pipeline. public let channelClass: ChannelClass? + /// Requested engine version for this channel. + public let channelEngineVersion: ChannelEngineVersionResponse? /// A list of destinations of the channel. For UDP outputs, there is one /// destination per output. For other types (HLS, for example), there is /// one destination per packager. @@ -6581,11 +6656,12 @@ extension MediaLive { public let vpc: VpcOutputSettingsDescription? @inlinable - public init(anywhereSettings: DescribeAnywhereSettings? = nil, arn: String? = nil, cdiInputSpecification: CdiInputSpecification? = nil, channelClass: ChannelClass? = nil, destinations: [OutputDestination]? = nil, egressEndpoints: [ChannelEgressEndpoint]? = nil, encoderSettings: EncoderSettings? = nil, id: String? = nil, inputAttachments: [InputAttachment]? = nil, inputSpecification: InputSpecification? = nil, logLevel: LogLevel? = nil, maintenance: MaintenanceStatus? = nil, name: String? = nil, pipelineDetails: [PipelineDetail]? = nil, pipelinesRunningCount: Int? = nil, roleArn: String? = nil, state: ChannelState? = nil, tags: [String: String]? = nil, vpc: VpcOutputSettingsDescription? = nil) { + public init(anywhereSettings: DescribeAnywhereSettings? = nil, arn: String? = nil, cdiInputSpecification: CdiInputSpecification? = nil, channelClass: ChannelClass? = nil, channelEngineVersion: ChannelEngineVersionResponse? = nil, destinations: [OutputDestination]? = nil, egressEndpoints: [ChannelEgressEndpoint]? = nil, encoderSettings: EncoderSettings? = nil, id: String? = nil, inputAttachments: [InputAttachment]? = nil, inputSpecification: InputSpecification? = nil, logLevel: LogLevel? = nil, maintenance: MaintenanceStatus? = nil, name: String? = nil, pipelineDetails: [PipelineDetail]? = nil, pipelinesRunningCount: Int? = nil, roleArn: String? = nil, state: ChannelState? = nil, tags: [String: String]? = nil, vpc: VpcOutputSettingsDescription? = nil) { self.anywhereSettings = anywhereSettings self.arn = arn self.cdiInputSpecification = cdiInputSpecification self.channelClass = channelClass + self.channelEngineVersion = channelEngineVersion self.destinations = destinations self.egressEndpoints = egressEndpoints self.encoderSettings = encoderSettings @@ -6608,6 +6684,7 @@ extension MediaLive { case arn = "arn" case cdiInputSpecification = "cdiInputSpecification" case channelClass = "channelClass" + case channelEngineVersion = "channelEngineVersion" case destinations = "destinations" case egressEndpoints = "egressEndpoints" case encoderSettings = "encoderSettings" @@ -10083,6 +10160,24 @@ extension MediaLive { public init() {} } + public struct Id3SegmentTaggingScheduleActionSettings: AWSEncodableShape & AWSDecodableShape { + /// Complete this parameter if you want to specify the entire ID3 metadata. Enter a base64 string that contains one or more fully formed ID3 tags, according to the ID3 specification: http://id3.org/id3v2.4.0-structure + public let id3: String? + /// Complete this parameter if you want to specify only the metadata, not the entire frame. MediaLive will insert the metadata in a TXXX frame. Enter the value as plain text. You can include standard MediaLive variable data such as the current segment number. + public let tag: String? + + @inlinable + public init(id3: String? = nil, tag: String? = nil) { + self.id3 = id3 + self.tag = tag + } + + private enum CodingKeys: String, CodingKey { + case id3 = "id3" + case tag = "tag" + } + } + public struct ImmediateModeScheduleActionStartSettings: AWSEncodableShape & AWSDecodableShape { public init() {} } @@ -12136,6 +12231,24 @@ extension MediaLive { } } + public struct ListVersionsRequest: AWSEncodableShape { + public init() {} + } + + public struct ListVersionsResponse: AWSDecodableShape { + /// List of engine versions that are available for this AWS account. + public let versions: [ChannelEngineVersionResponse]? + + @inlinable + public init(versions: [ChannelEngineVersionResponse]? = nil) { + self.versions = versions + } + + private enum CodingKeys: String, CodingKey { + case versions = "versions" + } + } + public struct M2tsSettings: AWSEncodableShape & AWSDecodableShape { /// When set to drop, output audio streams will be removed from the program if the selected input audio stream is removed from the input. This allows the output audio configuration to dynamically change based on input configuration. If this is set to encodeSilence, all output audio streams will output encoded silence when not connected to an active input stream. public let absentInputAudioBehavior: M2tsAbsentInputAudioBehavior? @@ -14160,15 +14273,18 @@ extension MediaLive { public let activeMotionGraphicsActionName: String? /// The current URI being used for HTML5 motion graphics for this pipeline. public let activeMotionGraphicsUri: String? + /// Current engine version of the encoder for this pipeline. + public let channelEngineVersion: ChannelEngineVersionResponse? /// Pipeline ID public let pipelineId: String? @inlinable - public init(activeInputAttachmentName: String? = nil, activeInputSwitchActionName: String? = nil, activeMotionGraphicsActionName: String? = nil, activeMotionGraphicsUri: String? = nil, pipelineId: String? = nil) { + public init(activeInputAttachmentName: String? = nil, activeInputSwitchActionName: String? = nil, activeMotionGraphicsActionName: String? = nil, activeMotionGraphicsUri: String? = nil, channelEngineVersion: ChannelEngineVersionResponse? = nil, pipelineId: String? = nil) { self.activeInputAttachmentName = activeInputAttachmentName self.activeInputSwitchActionName = activeInputSwitchActionName self.activeMotionGraphicsActionName = activeMotionGraphicsActionName self.activeMotionGraphicsUri = activeMotionGraphicsUri + self.channelEngineVersion = channelEngineVersion self.pipelineId = pipelineId } @@ -14177,6 +14293,7 @@ extension MediaLive { case activeInputSwitchActionName = "activeInputSwitchActionName" case activeMotionGraphicsActionName = "activeMotionGraphicsActionName" case activeMotionGraphicsUri = "activeMotionGraphicsUri" + case channelEngineVersion = "channelEngineVersion" case pipelineId = "pipelineId" } } @@ -14544,6 +14661,8 @@ extension MediaLive { public let cdiInputSpecification: CdiInputSpecification? /// The class for this channel. STANDARD for a channel with two pipelines or SINGLE_PIPELINE for a channel with one pipeline. public let channelClass: ChannelClass? + /// Requested engine version for this channel. + public let channelEngineVersion: ChannelEngineVersionResponse? /// A list of destinations of the channel. For UDP outputs, there is one /// destination per output. For other types (HLS, for example), there is /// one destination per packager. @@ -14578,11 +14697,12 @@ extension MediaLive { public let vpc: VpcOutputSettingsDescription? @inlinable - public init(anywhereSettings: DescribeAnywhereSettings? = nil, arn: String? = nil, cdiInputSpecification: CdiInputSpecification? = nil, channelClass: ChannelClass? = nil, destinations: [OutputDestination]? = nil, egressEndpoints: [ChannelEgressEndpoint]? = nil, encoderSettings: EncoderSettings? = nil, id: String? = nil, inputAttachments: [InputAttachment]? = nil, inputSpecification: InputSpecification? = nil, logLevel: LogLevel? = nil, maintenance: MaintenanceStatus? = nil, maintenanceStatus: String? = nil, name: String? = nil, pipelineDetails: [PipelineDetail]? = nil, pipelinesRunningCount: Int? = nil, roleArn: String? = nil, state: ChannelState? = nil, tags: [String: String]? = nil, vpc: VpcOutputSettingsDescription? = nil) { + public init(anywhereSettings: DescribeAnywhereSettings? = nil, arn: String? = nil, cdiInputSpecification: CdiInputSpecification? = nil, channelClass: ChannelClass? = nil, channelEngineVersion: ChannelEngineVersionResponse? = nil, destinations: [OutputDestination]? = nil, egressEndpoints: [ChannelEgressEndpoint]? = nil, encoderSettings: EncoderSettings? = nil, id: String? = nil, inputAttachments: [InputAttachment]? = nil, inputSpecification: InputSpecification? = nil, logLevel: LogLevel? = nil, maintenance: MaintenanceStatus? = nil, maintenanceStatus: String? = nil, name: String? = nil, pipelineDetails: [PipelineDetail]? = nil, pipelinesRunningCount: Int? = nil, roleArn: String? = nil, state: ChannelState? = nil, tags: [String: String]? = nil, vpc: VpcOutputSettingsDescription? = nil) { self.anywhereSettings = anywhereSettings self.arn = arn self.cdiInputSpecification = cdiInputSpecification self.channelClass = channelClass + self.channelEngineVersion = channelEngineVersion self.destinations = destinations self.egressEndpoints = egressEndpoints self.encoderSettings = encoderSettings @@ -14606,6 +14726,7 @@ extension MediaLive { case arn = "arn" case cdiInputSpecification = "cdiInputSpecification" case channelClass = "channelClass" + case channelEngineVersion = "channelEngineVersion" case destinations = "destinations" case egressEndpoints = "egressEndpoints" case encoderSettings = "encoderSettings" @@ -14794,6 +14915,8 @@ extension MediaLive { public let hlsId3SegmentTaggingSettings: HlsId3SegmentTaggingScheduleActionSettings? /// Action to insert ID3 metadata once, in HLS output groups public let hlsTimedMetadataSettings: HlsTimedMetadataScheduleActionSettings? + /// Action to insert ID3 metadata in every segment, in applicable output groups + public let id3SegmentTaggingSettings: Id3SegmentTaggingScheduleActionSettings? /// Action to prepare an input for a future immediate input switch public let inputPrepareSettings: InputPrepareScheduleActionSettings? /// Action to switch the input @@ -14820,11 +14943,14 @@ extension MediaLive { public let staticImageOutputActivateSettings: StaticImageOutputActivateScheduleActionSettings? /// Action to deactivate a static image overlay in one or more specified outputs public let staticImageOutputDeactivateSettings: StaticImageOutputDeactivateScheduleActionSettings? + /// Action to insert ID3 metadata once, in applicable output groups + public let timedMetadataSettings: TimedMetadataScheduleActionSettings? @inlinable - public init(hlsId3SegmentTaggingSettings: HlsId3SegmentTaggingScheduleActionSettings? = nil, hlsTimedMetadataSettings: HlsTimedMetadataScheduleActionSettings? = nil, inputPrepareSettings: InputPrepareScheduleActionSettings? = nil, inputSwitchSettings: InputSwitchScheduleActionSettings? = nil, motionGraphicsImageActivateSettings: MotionGraphicsActivateScheduleActionSettings? = nil, motionGraphicsImageDeactivateSettings: MotionGraphicsDeactivateScheduleActionSettings? = nil, pauseStateSettings: PauseStateScheduleActionSettings? = nil, scte35InputSettings: Scte35InputScheduleActionSettings? = nil, scte35ReturnToNetworkSettings: Scte35ReturnToNetworkScheduleActionSettings? = nil, scte35SpliceInsertSettings: Scte35SpliceInsertScheduleActionSettings? = nil, scte35TimeSignalSettings: Scte35TimeSignalScheduleActionSettings? = nil, staticImageActivateSettings: StaticImageActivateScheduleActionSettings? = nil, staticImageDeactivateSettings: StaticImageDeactivateScheduleActionSettings? = nil, staticImageOutputActivateSettings: StaticImageOutputActivateScheduleActionSettings? = nil, staticImageOutputDeactivateSettings: StaticImageOutputDeactivateScheduleActionSettings? = nil) { + public init(hlsId3SegmentTaggingSettings: HlsId3SegmentTaggingScheduleActionSettings? = nil, hlsTimedMetadataSettings: HlsTimedMetadataScheduleActionSettings? = nil, id3SegmentTaggingSettings: Id3SegmentTaggingScheduleActionSettings? = nil, inputPrepareSettings: InputPrepareScheduleActionSettings? = nil, inputSwitchSettings: InputSwitchScheduleActionSettings? = nil, motionGraphicsImageActivateSettings: MotionGraphicsActivateScheduleActionSettings? = nil, motionGraphicsImageDeactivateSettings: MotionGraphicsDeactivateScheduleActionSettings? = nil, pauseStateSettings: PauseStateScheduleActionSettings? = nil, scte35InputSettings: Scte35InputScheduleActionSettings? = nil, scte35ReturnToNetworkSettings: Scte35ReturnToNetworkScheduleActionSettings? = nil, scte35SpliceInsertSettings: Scte35SpliceInsertScheduleActionSettings? = nil, scte35TimeSignalSettings: Scte35TimeSignalScheduleActionSettings? = nil, staticImageActivateSettings: StaticImageActivateScheduleActionSettings? = nil, staticImageDeactivateSettings: StaticImageDeactivateScheduleActionSettings? = nil, staticImageOutputActivateSettings: StaticImageOutputActivateScheduleActionSettings? = nil, staticImageOutputDeactivateSettings: StaticImageOutputDeactivateScheduleActionSettings? = nil, timedMetadataSettings: TimedMetadataScheduleActionSettings? = nil) { self.hlsId3SegmentTaggingSettings = hlsId3SegmentTaggingSettings self.hlsTimedMetadataSettings = hlsTimedMetadataSettings + self.id3SegmentTaggingSettings = id3SegmentTaggingSettings self.inputPrepareSettings = inputPrepareSettings self.inputSwitchSettings = inputSwitchSettings self.motionGraphicsImageActivateSettings = motionGraphicsImageActivateSettings @@ -14838,6 +14964,7 @@ extension MediaLive { self.staticImageDeactivateSettings = staticImageDeactivateSettings self.staticImageOutputActivateSettings = staticImageOutputActivateSettings self.staticImageOutputDeactivateSettings = staticImageOutputDeactivateSettings + self.timedMetadataSettings = timedMetadataSettings } public func validate(name: String) throws { @@ -14854,6 +14981,7 @@ extension MediaLive { private enum CodingKeys: String, CodingKey { case hlsId3SegmentTaggingSettings = "hlsId3SegmentTaggingSettings" case hlsTimedMetadataSettings = "hlsTimedMetadataSettings" + case id3SegmentTaggingSettings = "id3SegmentTaggingSettings" case inputPrepareSettings = "inputPrepareSettings" case inputSwitchSettings = "inputSwitchSettings" case motionGraphicsImageActivateSettings = "motionGraphicsImageActivateSettings" @@ -14867,6 +14995,7 @@ extension MediaLive { case staticImageDeactivateSettings = "staticImageDeactivateSettings" case staticImageOutputActivateSettings = "staticImageOutputActivateSettings" case staticImageOutputDeactivateSettings = "staticImageOutputDeactivateSettings" + case timedMetadataSettings = "timedMetadataSettings" } } @@ -15504,6 +15633,8 @@ extension MediaLive { public let cdiInputSpecification: CdiInputSpecification? /// The class for this channel. STANDARD for a channel with two pipelines or SINGLE_PIPELINE for a channel with one pipeline. public let channelClass: ChannelClass? + /// Requested engine version for this channel. + public let channelEngineVersion: ChannelEngineVersionResponse? /// A list of destinations of the channel. For UDP outputs, there is one /// destination per output. For other types (HLS, for example), there is /// one destination per packager. @@ -15536,11 +15667,12 @@ extension MediaLive { public let vpc: VpcOutputSettingsDescription? @inlinable - public init(anywhereSettings: DescribeAnywhereSettings? = nil, arn: String? = nil, cdiInputSpecification: CdiInputSpecification? = nil, channelClass: ChannelClass? = nil, destinations: [OutputDestination]? = nil, egressEndpoints: [ChannelEgressEndpoint]? = nil, encoderSettings: EncoderSettings? = nil, id: String? = nil, inputAttachments: [InputAttachment]? = nil, inputSpecification: InputSpecification? = nil, logLevel: LogLevel? = nil, maintenance: MaintenanceStatus? = nil, name: String? = nil, pipelineDetails: [PipelineDetail]? = nil, pipelinesRunningCount: Int? = nil, roleArn: String? = nil, state: ChannelState? = nil, tags: [String: String]? = nil, vpc: VpcOutputSettingsDescription? = nil) { + public init(anywhereSettings: DescribeAnywhereSettings? = nil, arn: String? = nil, cdiInputSpecification: CdiInputSpecification? = nil, channelClass: ChannelClass? = nil, channelEngineVersion: ChannelEngineVersionResponse? = nil, destinations: [OutputDestination]? = nil, egressEndpoints: [ChannelEgressEndpoint]? = nil, encoderSettings: EncoderSettings? = nil, id: String? = nil, inputAttachments: [InputAttachment]? = nil, inputSpecification: InputSpecification? = nil, logLevel: LogLevel? = nil, maintenance: MaintenanceStatus? = nil, name: String? = nil, pipelineDetails: [PipelineDetail]? = nil, pipelinesRunningCount: Int? = nil, roleArn: String? = nil, state: ChannelState? = nil, tags: [String: String]? = nil, vpc: VpcOutputSettingsDescription? = nil) { self.anywhereSettings = anywhereSettings self.arn = arn self.cdiInputSpecification = cdiInputSpecification self.channelClass = channelClass + self.channelEngineVersion = channelEngineVersion self.destinations = destinations self.egressEndpoints = egressEndpoints self.encoderSettings = encoderSettings @@ -15563,6 +15695,7 @@ extension MediaLive { case arn = "arn" case cdiInputSpecification = "cdiInputSpecification" case channelClass = "channelClass" + case channelEngineVersion = "channelEngineVersion" case destinations = "destinations" case egressEndpoints = "egressEndpoints" case encoderSettings = "encoderSettings" @@ -16269,6 +16402,8 @@ extension MediaLive { public let cdiInputSpecification: CdiInputSpecification? /// The class for this channel. STANDARD for a channel with two pipelines or SINGLE_PIPELINE for a channel with one pipeline. public let channelClass: ChannelClass? + /// Requested engine version for this channel. + public let channelEngineVersion: ChannelEngineVersionResponse? /// A list of destinations of the channel. For UDP outputs, there is one /// destination per output. For other types (HLS, for example), there is /// one destination per packager. @@ -16301,11 +16436,12 @@ extension MediaLive { public let vpc: VpcOutputSettingsDescription? @inlinable - public init(anywhereSettings: DescribeAnywhereSettings? = nil, arn: String? = nil, cdiInputSpecification: CdiInputSpecification? = nil, channelClass: ChannelClass? = nil, destinations: [OutputDestination]? = nil, egressEndpoints: [ChannelEgressEndpoint]? = nil, encoderSettings: EncoderSettings? = nil, id: String? = nil, inputAttachments: [InputAttachment]? = nil, inputSpecification: InputSpecification? = nil, logLevel: LogLevel? = nil, maintenance: MaintenanceStatus? = nil, name: String? = nil, pipelineDetails: [PipelineDetail]? = nil, pipelinesRunningCount: Int? = nil, roleArn: String? = nil, state: ChannelState? = nil, tags: [String: String]? = nil, vpc: VpcOutputSettingsDescription? = nil) { + public init(anywhereSettings: DescribeAnywhereSettings? = nil, arn: String? = nil, cdiInputSpecification: CdiInputSpecification? = nil, channelClass: ChannelClass? = nil, channelEngineVersion: ChannelEngineVersionResponse? = nil, destinations: [OutputDestination]? = nil, egressEndpoints: [ChannelEgressEndpoint]? = nil, encoderSettings: EncoderSettings? = nil, id: String? = nil, inputAttachments: [InputAttachment]? = nil, inputSpecification: InputSpecification? = nil, logLevel: LogLevel? = nil, maintenance: MaintenanceStatus? = nil, name: String? = nil, pipelineDetails: [PipelineDetail]? = nil, pipelinesRunningCount: Int? = nil, roleArn: String? = nil, state: ChannelState? = nil, tags: [String: String]? = nil, vpc: VpcOutputSettingsDescription? = nil) { self.anywhereSettings = anywhereSettings self.arn = arn self.cdiInputSpecification = cdiInputSpecification self.channelClass = channelClass + self.channelEngineVersion = channelEngineVersion self.destinations = destinations self.egressEndpoints = egressEndpoints self.encoderSettings = encoderSettings @@ -16328,6 +16464,7 @@ extension MediaLive { case arn = "arn" case cdiInputSpecification = "cdiInputSpecification" case channelClass = "channelClass" + case channelEngineVersion = "channelEngineVersion" case destinations = "destinations" case egressEndpoints = "egressEndpoints" case encoderSettings = "encoderSettings" @@ -16624,6 +16761,20 @@ extension MediaLive { } } + public struct TimedMetadataScheduleActionSettings: AWSEncodableShape & AWSDecodableShape { + /// Enter a base64 string that contains one or more fully formed ID3 tags.See the ID3 specification: http://id3.org/id3v2.4.0-structure + public let id3: String? + + @inlinable + public init(id3: String? = nil) { + self.id3 = id3 + } + + private enum CodingKeys: String, CodingKey { + case id3 = "id3" + } + } + public struct TransferInputDeviceRequest: AWSEncodableShape { /// The unique ID of this input device. For example, hd-123456789abcdef. public let inputDeviceId: String @@ -16925,10 +17076,13 @@ extension MediaLive { public struct UpdateChannelRequest: AWSEncodableShape { /// Specification of CDI inputs for this channel public let cdiInputSpecification: CdiInputSpecification? + /// Channel engine version for this channel + public let channelEngineVersion: ChannelEngineVersionRequest? /// channel ID public let channelId: String /// A list of output destinations for this channel. public let destinations: [OutputDestination]? + public let dryRun: Bool? /// The encoder settings for this channel. public let encoderSettings: EncoderSettings? public let inputAttachments: [InputAttachment]? @@ -16944,10 +17098,12 @@ extension MediaLive { public let roleArn: String? @inlinable - public init(cdiInputSpecification: CdiInputSpecification? = nil, channelId: String, destinations: [OutputDestination]? = nil, encoderSettings: EncoderSettings? = nil, inputAttachments: [InputAttachment]? = nil, inputSpecification: InputSpecification? = nil, logLevel: LogLevel? = nil, maintenance: MaintenanceUpdateSettings? = nil, name: String? = nil, roleArn: String? = nil) { + public init(cdiInputSpecification: CdiInputSpecification? = nil, channelEngineVersion: ChannelEngineVersionRequest? = nil, channelId: String, destinations: [OutputDestination]? = nil, dryRun: Bool? = nil, encoderSettings: EncoderSettings? = nil, inputAttachments: [InputAttachment]? = nil, inputSpecification: InputSpecification? = nil, logLevel: LogLevel? = nil, maintenance: MaintenanceUpdateSettings? = nil, name: String? = nil, roleArn: String? = nil) { self.cdiInputSpecification = cdiInputSpecification + self.channelEngineVersion = channelEngineVersion self.channelId = channelId self.destinations = destinations + self.dryRun = dryRun self.encoderSettings = encoderSettings self.inputAttachments = inputAttachments self.inputSpecification = inputSpecification @@ -16961,8 +17117,10 @@ extension MediaLive { let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer var container = encoder.container(keyedBy: CodingKeys.self) try container.encodeIfPresent(self.cdiInputSpecification, forKey: .cdiInputSpecification) + try container.encodeIfPresent(self.channelEngineVersion, forKey: .channelEngineVersion) request.encodePath(self.channelId, key: "ChannelId") try container.encodeIfPresent(self.destinations, forKey: .destinations) + try container.encodeIfPresent(self.dryRun, forKey: .dryRun) try container.encodeIfPresent(self.encoderSettings, forKey: .encoderSettings) try container.encodeIfPresent(self.inputAttachments, forKey: .inputAttachments) try container.encodeIfPresent(self.inputSpecification, forKey: .inputSpecification) @@ -16985,7 +17143,9 @@ extension MediaLive { private enum CodingKeys: String, CodingKey { case cdiInputSpecification = "cdiInputSpecification" + case channelEngineVersion = "channelEngineVersion" case destinations = "destinations" + case dryRun = "dryRun" case encoderSettings = "encoderSettings" case inputAttachments = "inputAttachments" case inputSpecification = "inputSpecification" diff --git a/Sources/Soto/Services/NetworkFirewall/NetworkFirewall_api.swift b/Sources/Soto/Services/NetworkFirewall/NetworkFirewall_api.swift index 105791400d..1fe6a353ab 100644 --- a/Sources/Soto/Services/NetworkFirewall/NetworkFirewall_api.swift +++ b/Sources/Soto/Services/NetworkFirewall/NetworkFirewall_api.swift @@ -25,7 +25,7 @@ import Foundation /// Service object for interacting with AWS NetworkFirewall service. /// -/// This is the API Reference for Network Firewall. This guide is for developers who need detailed information about the Network Firewall API actions, data types, and errors. The REST API requires you to handle connection details, such as calculating signatures, handling request retries, and error handling. For general information about using the Amazon Web Services REST APIs, see Amazon Web Services APIs. To access Network Firewall using the REST API endpoint: https://network-firewall..amazonaws.com Alternatively, you can use one of the Amazon Web Services SDKs to access an API that's tailored to the programming language or platform that you're using. For more information, see Amazon Web Services SDKs. For descriptions of Network Firewall features, including and step-by-step instructions on how to use them through the Network Firewall console, see the Network Firewall Developer Guide. Network Firewall is a stateful, managed, network firewall and intrusion detection and prevention service for Amazon Virtual Private Cloud (Amazon VPC). With Network Firewall, you can filter traffic at the perimeter of your VPC. This includes filtering traffic going to and coming from an internet gateway, NAT gateway, or over VPN or Direct Connect. Network Firewall uses rules that are compatible with Suricata, a free, open source network analysis and threat detection engine. You can use Network Firewall to monitor and protect your VPC traffic in a number of ways. The following are just a few examples: Allow domains or IP addresses for known Amazon Web Services service endpoints, such as Amazon S3, and block all other forms of traffic. Use custom lists of known bad domains to limit the types of domain names that your applications can access. Perform deep packet inspection on traffic entering or leaving your VPC. Use stateful protocol detection to filter protocols like HTTPS, regardless of the port used. To enable Network Firewall for your VPCs, you perform steps in both Amazon VPC and in Network Firewall. For information about using Amazon VPC, see Amazon VPC User Guide. To start using Network Firewall, do the following: (Optional) If you don't already have a VPC that you want to protect, create it in Amazon VPC. In Amazon VPC, in each Availability Zone where you want to have a firewall endpoint, create a subnet for the sole use of Network Firewall. In Network Firewall, create stateless and stateful rule groups, to define the components of the network traffic filtering behavior that you want your firewall to have. In Network Firewall, create a firewall policy that uses your rule groups and specifies additional default traffic filtering behavior. In Network Firewall, create a firewall and specify your new firewall policy and VPC subnets. Network Firewall creates a firewall endpoint in each subnet that you specify, with the behavior that's defined in the firewall policy. In Amazon VPC, use ingress routing enhancements to route traffic through the new firewall endpoints. +/// This is the API Reference for Network Firewall. This guide is for developers who need detailed information about the Network Firewall API actions, data types, and errors. The REST API requires you to handle connection details, such as calculating signatures, handling request retries, and error handling. For general information about using the Amazon Web Services REST APIs, see Amazon Web Services APIs. To view the complete list of Amazon Web Services Regions where Network Firewall is available, see Service endpoints and quotas in the Amazon Web Services General Reference. To access Network Firewall using the IPv4 REST API endpoint: https://network-firewall..amazonaws.com To access Network Firewall using the Dualstack (IPv4 and IPv6) REST API endpoint: https://network-firewall..aws.api Alternatively, you can use one of the Amazon Web Services SDKs to access an API that's tailored to the programming language or platform that you're using. For more information, see Amazon Web Services SDKs. For descriptions of Network Firewall features, including and step-by-step instructions on how to use them through the Network Firewall console, see the Network Firewall Developer Guide. Network Firewall is a stateful, managed, network firewall and intrusion detection and prevention service for Amazon Virtual Private Cloud (Amazon VPC). With Network Firewall, you can filter traffic at the perimeter of your VPC. This includes filtering traffic going to and coming from an internet gateway, NAT gateway, or over VPN or Direct Connect. Network Firewall uses rules that are compatible with Suricata, a free, open source network analysis and threat detection engine. You can use Network Firewall to monitor and protect your VPC traffic in a number of ways. The following are just a few examples: Allow domains or IP addresses for known Amazon Web Services service endpoints, such as Amazon S3, and block all other forms of traffic. Use custom lists of known bad domains to limit the types of domain names that your applications can access. Perform deep packet inspection on traffic entering or leaving your VPC. Use stateful protocol detection to filter protocols like HTTPS, regardless of the port used. To enable Network Firewall for your VPCs, you perform steps in both Amazon VPC and in Network Firewall. For information about using Amazon VPC, see Amazon VPC User Guide. To start using Network Firewall, do the following: (Optional) If you don't already have a VPC that you want to protect, create it in Amazon VPC. In Amazon VPC, in each Availability Zone where you want to have a firewall endpoint, create a subnet for the sole use of Network Firewall. In Network Firewall, create stateless and stateful rule groups, to define the components of the network traffic filtering behavior that you want your firewall to have. In Network Firewall, create a firewall policy that uses your rule groups and specifies additional default traffic filtering behavior. In Network Firewall, create a firewall and specify your new firewall policy and VPC subnets. Network Firewall creates a firewall endpoint in each subnet that you specify, with the behavior that's defined in the firewall policy. In Amazon VPC, use ingress routing enhancements to route traffic through the new firewall endpoints. public struct NetworkFirewall: AWSService { // MARK: Member variables diff --git a/Sources/Soto/Services/Notifications/Notifications_api.swift b/Sources/Soto/Services/Notifications/Notifications_api.swift index 7474cb3d8c..d859017ec5 100644 --- a/Sources/Soto/Services/Notifications/Notifications_api.swift +++ b/Sources/Soto/Services/Notifications/Notifications_api.swift @@ -25,7 +25,7 @@ import Foundation /// Service object for interacting with AWS Notifications service. /// -/// The AWS User Notifications API Reference provides descriptions, API request parameters, and the JSON response for each of the User Notification API actions. User Notification control APIs are currently available in US East (Virginia) - us-east-1. GetNotificationEvent +/// The Amazon Web Services User Notifications API Reference provides descriptions, API request parameters, and the JSON response for each of the User Notification API actions. User Notification control plane APIs are currently available in US East (Virginia) - us-east-1. GetNotificationEvent /// and ListNotificationEvents APIs are currently available in /// commercial partition Regions and only return notifications stored in the same Region in which they're called. The User Notifications console can only be used in US East (Virginia). Your data however, is stored in each Region chosen as a /// notification hub in addition to US East (Virginia). @@ -93,6 +93,7 @@ public struct Notifications: AWSService { "ap-southeast-3": "notifications.ap-southeast-3.api.aws", "ap-southeast-4": "notifications.ap-southeast-4.api.aws", "ap-southeast-5": "notifications.ap-southeast-5.api.aws", + "ap-southeast-7": "notifications.ap-southeast-7.api.aws", "ca-central-1": "notifications.ca-central-1.api.aws", "ca-west-1": "notifications.ca-west-1.api.aws", "cn-north-1": "notifications.cn-north-1.api.amazonwebservices.com.cn", @@ -108,6 +109,7 @@ public struct Notifications: AWSService { "il-central-1": "notifications.il-central-1.api.aws", "me-central-1": "notifications.me-central-1.api.aws", "me-south-1": "notifications.me-south-1.api.aws", + "mx-central-1": "notifications.mx-central-1.api.aws", "sa-east-1": "notifications.sa-east-1.api.aws", "us-east-1": "notifications.us-east-1.api.aws", "us-east-2": "notifications.us-east-2.api.aws", @@ -133,6 +135,7 @@ public struct Notifications: AWSService { "ap-southeast-3": "notifications-fips.ap-southeast-3.api.aws", "ap-southeast-4": "notifications-fips.ap-southeast-4.api.aws", "ap-southeast-5": "notifications-fips.ap-southeast-5.api.aws", + "ap-southeast-7": "notifications-fips.ap-southeast-7.api.aws", "ca-central-1": "notifications-fips.ca-central-1.api.aws", "ca-west-1": "notifications-fips.ca-west-1.api.aws", "cn-north-1": "notifications-fips.cn-north-1.api.amazonwebservices.com.cn", @@ -148,6 +151,7 @@ public struct Notifications: AWSService { "il-central-1": "notifications-fips.il-central-1.api.aws", "me-central-1": "notifications-fips.me-central-1.api.aws", "me-south-1": "notifications-fips.me-south-1.api.aws", + "mx-central-1": "notifications-fips.mx-central-1.api.aws", "sa-east-1": "notifications-fips.sa-east-1.api.aws", "us-east-1": "notifications-fips.us-east-1.api.aws", "us-east-2": "notifications-fips.us-east-2.api.aws", @@ -160,8 +164,8 @@ public struct Notifications: AWSService { // MARK: API Calls - /// Associates a delivery Channel with a particular NotificationConfiguration. Supported Channels include AWS Chatbot, - /// the AWS Console Mobile Application, and emails (notifications-contacts). + /// Associates a delivery Channel with a particular NotificationConfiguration. Supported Channels include Chatbot, + /// the Console Mobile Application, and emails (notifications-contacts). @Sendable @inlinable public func associateChannel(_ input: AssociateChannelRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> AssociateChannelResponse { @@ -174,11 +178,11 @@ public struct Notifications: AWSService { logger: logger ) } - /// Associates a delivery Channel with a particular NotificationConfiguration. Supported Channels include AWS Chatbot, - /// the AWS Console Mobile Application, and emails (notifications-contacts). + /// Associates a delivery Channel with a particular NotificationConfiguration. Supported Channels include Chatbot, + /// the Console Mobile Application, and emails (notifications-contacts). /// /// Parameters: - /// - arn: The Amazon Resource Name (ARN) of the Channel to associate with the NotificationConfiguration. Supported ARNs include AWS Chatbot, the Console Mobile Application, and notifications-contacts. + /// - arn: The Amazon Resource Name (ARN) of the Channel to associate with the NotificationConfiguration. Supported ARNs include Chatbot, the Console Mobile Application, and notifications-contacts. /// - notificationConfigurationArn: The ARN of the NotificationConfiguration to associate with the Channel. /// - logger: Logger use during operation @inlinable @@ -194,7 +198,71 @@ public struct Notifications: AWSService { return try await self.associateChannel(input, logger: logger) } - /// Creates an EventRule that is associated with a specified Notification Configuration. + /// Associates an Account Contact with a particular ManagedNotificationConfiguration. + @Sendable + @inlinable + public func associateManagedNotificationAccountContact(_ input: AssociateManagedNotificationAccountContactRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> AssociateManagedNotificationAccountContactResponse { + try await self.client.execute( + operation: "AssociateManagedNotificationAccountContact", + path: "/contacts/associate-managed-notification/{contactIdentifier}", + httpMethod: .PUT, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Associates an Account Contact with a particular ManagedNotificationConfiguration. + /// + /// Parameters: + /// - contactIdentifier: A unique value of an Account Contact Type to associate with the ManagedNotificationConfiguration. + /// - managedNotificationConfigurationArn: The Amazon Resource Name (ARN) of the ManagedNotificationConfiguration to associate with the Account Contact. + /// - logger: Logger use during operation + @inlinable + public func associateManagedNotificationAccountContact( + contactIdentifier: AccountContactType, + managedNotificationConfigurationArn: String, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> AssociateManagedNotificationAccountContactResponse { + let input = AssociateManagedNotificationAccountContactRequest( + contactIdentifier: contactIdentifier, + managedNotificationConfigurationArn: managedNotificationConfigurationArn + ) + return try await self.associateManagedNotificationAccountContact(input, logger: logger) + } + + /// Associates an additional Channel with a particular ManagedNotificationConfiguration. Supported Channels include Chatbot, the Console Mobile Application, and emails (notifications-contacts). + @Sendable + @inlinable + public func associateManagedNotificationAdditionalChannel(_ input: AssociateManagedNotificationAdditionalChannelRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> AssociateManagedNotificationAdditionalChannelResponse { + try await self.client.execute( + operation: "AssociateManagedNotificationAdditionalChannel", + path: "/channels/associate-managed-notification/{channelArn}", + httpMethod: .PUT, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Associates an additional Channel with a particular ManagedNotificationConfiguration. Supported Channels include Chatbot, the Console Mobile Application, and emails (notifications-contacts). + /// + /// Parameters: + /// - channelArn: The Amazon Resource Name (ARN) of the Channel to associate with the ManagedNotificationConfiguration. Supported ARNs include Chatbot, the Console Mobile Application, and email (notifications-contacts). + /// - managedNotificationConfigurationArn: The Amazon Resource Name (ARN) of the ManagedNotificationConfiguration to associate with the additional Channel. + /// - logger: Logger use during operation + @inlinable + public func associateManagedNotificationAdditionalChannel( + channelArn: String, + managedNotificationConfigurationArn: String, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> AssociateManagedNotificationAdditionalChannelResponse { + let input = AssociateManagedNotificationAdditionalChannelRequest( + channelArn: channelArn, + managedNotificationConfigurationArn: managedNotificationConfigurationArn + ) + return try await self.associateManagedNotificationAdditionalChannel(input, logger: logger) + } + + /// Creates an EventRule that is associated with a specified NotificationConfiguration. @Sendable @inlinable public func createEventRule(_ input: CreateEventRuleRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateEventRuleResponse { @@ -207,14 +275,14 @@ public struct Notifications: AWSService { logger: logger ) } - /// Creates an EventRule that is associated with a specified Notification Configuration. + /// Creates an EventRule that is associated with a specified NotificationConfiguration. /// /// Parameters: /// - eventPattern: An additional event pattern used to further filter the events this EventRule receives. For more information, see Amazon EventBridge event patterns in the Amazon EventBridge User Guide. - /// - eventType: The event type to match. Must match one of the valid Amazon EventBridge event types. For example, EC2 Instance State-change Notification and AWS CloudWatch Alarm State Change. For more information, see Event delivery from AWS services in the Amazon EventBridge User Guide. + /// - eventType: The event type to match. Must match one of the valid Amazon EventBridge event types. For example, EC2 Instance State-change Notification and Amazon CloudWatch Alarm State Change. For more information, see Event delivery from Amazon Web Services services in the Amazon EventBridge User Guide. /// - notificationConfigurationArn: The Amazon Resource Name (ARN) of the NotificationConfiguration associated with this EventRule. - /// - regions: A list of AWS Regions that send events to this EventRule. - /// - source: The matched event source. Must match one of the valid EventBridge sources. Only AWS service sourced events are supported. For example, aws.ec2 and aws.cloudwatch. For more information, see Event delivery from AWS services in the Amazon EventBridge User Guide. + /// - regions: A list of Amazon Web Services Regions that send events to this EventRule. + /// - source: The matched event source. Must match one of the valid EventBridge sources. Only Amazon Web Services service sourced events are supported. For example, aws.ec2 and aws.cloudwatch. For more information, see Event delivery from Amazon Web Services services in the Amazon EventBridge User Guide. /// - logger: Logger use during operation @inlinable public func createEventRule( @@ -251,7 +319,7 @@ public struct Notifications: AWSService { /// Creates a new NotificationConfiguration. /// /// Parameters: - /// - aggregationDuration: The aggregation preference of the NotificationConfiguration. Values: LONG Aggregate notifications for long periods of time (12 hours). SHORT Aggregate notifications for short periods of time (5 minutes). NONE Don't aggregate notifications. No delay in delivery. + /// - aggregationDuration: The aggregation preference of the NotificationConfiguration. Values: LONG Aggregate notifications for long periods of time (12 hours). SHORT Aggregate notifications for short periods of time (5 minutes). NONE Don't aggregate notifications. /// - description: The description of the NotificationConfiguration. /// - name: The name of the NotificationConfiguration. Supports RFC 3986's unreserved characters. /// - tags: A map of tags assigned to a resource. A tag is a string-to-string map of key-value pairs. @@ -331,7 +399,7 @@ public struct Notifications: AWSService { return try await self.deleteNotificationConfiguration(input, logger: logger) } - /// Deregisters a NotificationHub in the specified Region. You can't deregister the last NotificationHub in the account. NotificationEvents stored in the deregistered NotificationHub are no longer be visible. Recreating a new NotificationHub in the same Region restores access to those NotificationEvents. + /// Deregisters a NotificationConfiguration in the specified Region. You can't deregister the last NotificationHub in the account. NotificationEvents stored in the deregistered NotificationConfiguration are no longer be visible. Recreating a new NotificationConfiguration in the same Region restores access to those NotificationEvents. @Sendable @inlinable public func deregisterNotificationHub(_ input: DeregisterNotificationHubRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DeregisterNotificationHubResponse { @@ -344,10 +412,10 @@ public struct Notifications: AWSService { logger: logger ) } - /// Deregisters a NotificationHub in the specified Region. You can't deregister the last NotificationHub in the account. NotificationEvents stored in the deregistered NotificationHub are no longer be visible. Recreating a new NotificationHub in the same Region restores access to those NotificationEvents. + /// Deregisters a NotificationConfiguration in the specified Region. You can't deregister the last NotificationHub in the account. NotificationEvents stored in the deregistered NotificationConfiguration are no longer be visible. Recreating a new NotificationConfiguration in the same Region restores access to those NotificationEvents. /// /// Parameters: - /// - notificationHubRegion: The NotificationHub Region. + /// - notificationHubRegion: The NotificationConfiguration Region. /// - logger: Logger use during operation @inlinable public func deregisterNotificationHub( @@ -360,7 +428,33 @@ public struct Notifications: AWSService { return try await self.deregisterNotificationHub(input, logger: logger) } - /// Disassociates a Channel from a specified NotificationConfiguration. Supported Channels include AWS Chatbot, the AWS Console Mobile Application, and emails (notifications-contacts). + /// Disables service trust between User Notifications and Amazon Web Services Organizations. + @Sendable + @inlinable + public func disableNotificationsAccessForOrganization(_ input: DisableNotificationsAccessForOrganizationRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DisableNotificationsAccessForOrganizationResponse { + try await self.client.execute( + operation: "DisableNotificationsAccessForOrganization", + path: "/organization/access", + httpMethod: .DELETE, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Disables service trust between User Notifications and Amazon Web Services Organizations. + /// + /// Parameters: + /// - logger: Logger use during operation + @inlinable + public func disableNotificationsAccessForOrganization( + logger: Logger = AWSClient.loggingDisabled + ) async throws -> DisableNotificationsAccessForOrganizationResponse { + let input = DisableNotificationsAccessForOrganizationRequest( + ) + return try await self.disableNotificationsAccessForOrganization(input, logger: logger) + } + + /// Disassociates a Channel from a specified NotificationConfiguration. Supported Channels include Chatbot, the Console Mobile Application, and emails (notifications-contacts). @Sendable @inlinable public func disassociateChannel(_ input: DisassociateChannelRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DisassociateChannelResponse { @@ -373,7 +467,7 @@ public struct Notifications: AWSService { logger: logger ) } - /// Disassociates a Channel from a specified NotificationConfiguration. Supported Channels include AWS Chatbot, the AWS Console Mobile Application, and emails (notifications-contacts). + /// Disassociates a Channel from a specified NotificationConfiguration. Supported Channels include Chatbot, the Console Mobile Application, and emails (notifications-contacts). /// /// Parameters: /// - arn: The Amazon Resource Name (ARN) of the Channel to disassociate. @@ -392,6 +486,96 @@ public struct Notifications: AWSService { return try await self.disassociateChannel(input, logger: logger) } + /// Disassociates an Account Contact with a particular ManagedNotificationConfiguration. + @Sendable + @inlinable + public func disassociateManagedNotificationAccountContact(_ input: DisassociateManagedNotificationAccountContactRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DisassociateManagedNotificationAccountContactResponse { + try await self.client.execute( + operation: "DisassociateManagedNotificationAccountContact", + path: "/contacts/disassociate-managed-notification/{contactIdentifier}", + httpMethod: .PUT, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Disassociates an Account Contact with a particular ManagedNotificationConfiguration. + /// + /// Parameters: + /// - contactIdentifier: The unique value of an Account Contact Type to associate with the ManagedNotificationConfiguration. + /// - managedNotificationConfigurationArn: The Amazon Resource Name (ARN) of the ManagedNotificationConfiguration to associate with the Account Contact. + /// - logger: Logger use during operation + @inlinable + public func disassociateManagedNotificationAccountContact( + contactIdentifier: AccountContactType, + managedNotificationConfigurationArn: String, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> DisassociateManagedNotificationAccountContactResponse { + let input = DisassociateManagedNotificationAccountContactRequest( + contactIdentifier: contactIdentifier, + managedNotificationConfigurationArn: managedNotificationConfigurationArn + ) + return try await self.disassociateManagedNotificationAccountContact(input, logger: logger) + } + + /// Disassociates an additional Channel from a particular ManagedNotificationConfiguration. Supported Channels include Chatbot, the Console Mobile Application, and emails (notifications-contacts). + @Sendable + @inlinable + public func disassociateManagedNotificationAdditionalChannel(_ input: DisassociateManagedNotificationAdditionalChannelRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DisassociateManagedNotificationAdditionalChannelResponse { + try await self.client.execute( + operation: "DisassociateManagedNotificationAdditionalChannel", + path: "/channels/disassociate-managed-notification/{channelArn}", + httpMethod: .PUT, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Disassociates an additional Channel from a particular ManagedNotificationConfiguration. Supported Channels include Chatbot, the Console Mobile Application, and emails (notifications-contacts). + /// + /// Parameters: + /// - channelArn: The Amazon Resource Name (ARN) of the Channel to associate with the ManagedNotificationConfiguration. + /// - managedNotificationConfigurationArn: The Amazon Resource Name (ARN) of the Managed Notification Configuration to associate with the additional Channel. + /// - logger: Logger use during operation + @inlinable + public func disassociateManagedNotificationAdditionalChannel( + channelArn: String, + managedNotificationConfigurationArn: String, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> DisassociateManagedNotificationAdditionalChannelResponse { + let input = DisassociateManagedNotificationAdditionalChannelRequest( + channelArn: channelArn, + managedNotificationConfigurationArn: managedNotificationConfigurationArn + ) + return try await self.disassociateManagedNotificationAdditionalChannel(input, logger: logger) + } + + /// Enables service trust between User Notifications and Amazon Web Services Organizations. + @Sendable + @inlinable + public func enableNotificationsAccessForOrganization(_ input: EnableNotificationsAccessForOrganizationRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> EnableNotificationsAccessForOrganizationResponse { + try await self.client.execute( + operation: "EnableNotificationsAccessForOrganization", + path: "/organization/access", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Enables service trust between User Notifications and Amazon Web Services Organizations. + /// + /// Parameters: + /// - logger: Logger use during operation + @inlinable + public func enableNotificationsAccessForOrganization( + logger: Logger = AWSClient.loggingDisabled + ) async throws -> EnableNotificationsAccessForOrganizationResponse { + let input = EnableNotificationsAccessForOrganizationRequest( + ) + return try await self.enableNotificationsAccessForOrganization(input, logger: logger) + } + /// Returns a specified EventRule. @Sendable @inlinable @@ -421,6 +605,99 @@ public struct Notifications: AWSService { return try await self.getEventRule(input, logger: logger) } + /// Returns the child event of a specific given ManagedNotificationEvent. + @Sendable + @inlinable + public func getManagedNotificationChildEvent(_ input: GetManagedNotificationChildEventRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetManagedNotificationChildEventResponse { + try await self.client.execute( + operation: "GetManagedNotificationChildEvent", + path: "/managed-notification-child-events/{arn}", + httpMethod: .GET, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Returns the child event of a specific given ManagedNotificationEvent. + /// + /// Parameters: + /// - arn: The Amazon Resource Name (ARN) of the ManagedNotificationChildEvent to return. + /// - locale: The locale code of the language used for the retrieved ManagedNotificationChildEvent. The default locale is English en_US. + /// - logger: Logger use during operation + @inlinable + public func getManagedNotificationChildEvent( + arn: String, + locale: LocaleCode? = nil, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> GetManagedNotificationChildEventResponse { + let input = GetManagedNotificationChildEventRequest( + arn: arn, + locale: locale + ) + return try await self.getManagedNotificationChildEvent(input, logger: logger) + } + + /// Returns a specified ManagedNotificationConfiguration. + @Sendable + @inlinable + public func getManagedNotificationConfiguration(_ input: GetManagedNotificationConfigurationRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetManagedNotificationConfigurationResponse { + try await self.client.execute( + operation: "GetManagedNotificationConfiguration", + path: "/managed-notification-configurations/{arn}", + httpMethod: .GET, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Returns a specified ManagedNotificationConfiguration. + /// + /// Parameters: + /// - arn: The Amazon Resource Name (ARN) of the ManagedNotificationConfiguration to return. + /// - logger: Logger use during operation + @inlinable + public func getManagedNotificationConfiguration( + arn: String, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> GetManagedNotificationConfigurationResponse { + let input = GetManagedNotificationConfigurationRequest( + arn: arn + ) + return try await self.getManagedNotificationConfiguration(input, logger: logger) + } + + /// Returns a specified ManagedNotificationEvent. + @Sendable + @inlinable + public func getManagedNotificationEvent(_ input: GetManagedNotificationEventRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetManagedNotificationEventResponse { + try await self.client.execute( + operation: "GetManagedNotificationEvent", + path: "/managed-notification-events/{arn}", + httpMethod: .GET, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Returns a specified ManagedNotificationEvent. + /// + /// Parameters: + /// - arn: The Amazon Resource Name (ARN) of the ManagedNotificationEvent to return. + /// - locale: The locale code of the language used for the retrieved ManagedNotificationEvent. The default locale is English (en_US). + /// - logger: Logger use during operation + @inlinable + public func getManagedNotificationEvent( + arn: String, + locale: LocaleCode? = nil, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> GetManagedNotificationEventResponse { + let input = GetManagedNotificationEventRequest( + arn: arn, + locale: locale + ) + return try await self.getManagedNotificationEvent(input, logger: logger) + } + /// Returns a specified NotificationConfiguration. @Sendable @inlinable @@ -452,7 +729,7 @@ public struct Notifications: AWSService { /// Returns a specified NotificationEvent. User Notifications stores notifications in the individual Regions you register as notification hubs and the Region of the source event rule. GetNotificationEvent only returns notifications stored in the same Region in which the action is called. /// User Notifications doesn't backfill notifications to new Regions selected as notification hubs. For this reason, we recommend that you make calls in your oldest registered notification hub. - /// For more information, see Notification hubs in the AWS User Notifications User Guide. + /// For more information, see Notification hubs in the Amazon Web Services User Notifications User Guide. @Sendable @inlinable public func getNotificationEvent(_ input: GetNotificationEventRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetNotificationEventResponse { @@ -467,7 +744,7 @@ public struct Notifications: AWSService { } /// Returns a specified NotificationEvent. User Notifications stores notifications in the individual Regions you register as notification hubs and the Region of the source event rule. GetNotificationEvent only returns notifications stored in the same Region in which the action is called. /// User Notifications doesn't backfill notifications to new Regions selected as notification hubs. For this reason, we recommend that you make calls in your oldest registered notification hub. - /// For more information, see Notification hubs in the AWS User Notifications User Guide. + /// For more information, see Notification hubs in the Amazon Web Services User Notifications User Guide. /// /// Parameters: /// - arn: The Amazon Resource Name (ARN) of the NotificationEvent to return. @@ -486,6 +763,32 @@ public struct Notifications: AWSService { return try await self.getNotificationEvent(input, logger: logger) } + /// Returns the AccessStatus of Service Trust Enablement for User Notifications and Amazon Web Services Organizations. + @Sendable + @inlinable + public func getNotificationsAccessForOrganization(_ input: GetNotificationsAccessForOrganizationRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetNotificationsAccessForOrganizationResponse { + try await self.client.execute( + operation: "GetNotificationsAccessForOrganization", + path: "/organization/access", + httpMethod: .GET, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Returns the AccessStatus of Service Trust Enablement for User Notifications and Amazon Web Services Organizations. + /// + /// Parameters: + /// - logger: Logger use during operation + @inlinable + public func getNotificationsAccessForOrganization( + logger: Logger = AWSClient.loggingDisabled + ) async throws -> GetNotificationsAccessForOrganizationResponse { + let input = GetNotificationsAccessForOrganizationRequest( + ) + return try await self.getNotificationsAccessForOrganization(input, logger: logger) + } + /// Returns a list of Channels for a NotificationConfiguration. @Sendable @inlinable @@ -556,6 +859,176 @@ public struct Notifications: AWSService { return try await self.listEventRules(input, logger: logger) } + /// Returns a list of Account contacts and Channels associated with a ManagedNotificationConfiguration, in paginated format. + @Sendable + @inlinable + public func listManagedNotificationChannelAssociations(_ input: ListManagedNotificationChannelAssociationsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListManagedNotificationChannelAssociationsResponse { + try await self.client.execute( + operation: "ListManagedNotificationChannelAssociations", + path: "/channels/list-managed-notification-channel-associations", + httpMethod: .GET, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Returns a list of Account contacts and Channels associated with a ManagedNotificationConfiguration, in paginated format. + /// + /// Parameters: + /// - managedNotificationConfigurationArn: The Amazon Resource Name (ARN) of the ManagedNotificationConfiguration to match. + /// - maxResults: The maximum number of results to be returned in this call. Defaults to 20. + /// - nextToken: The start token for paginated calls. Retrieved from the response of a previous ListManagedNotificationChannelAssociations call. + /// - logger: Logger use during operation + @inlinable + public func listManagedNotificationChannelAssociations( + managedNotificationConfigurationArn: String, + maxResults: Int? = nil, + nextToken: String? = nil, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> ListManagedNotificationChannelAssociationsResponse { + let input = ListManagedNotificationChannelAssociationsRequest( + managedNotificationConfigurationArn: managedNotificationConfigurationArn, + maxResults: maxResults, + nextToken: nextToken + ) + return try await self.listManagedNotificationChannelAssociations(input, logger: logger) + } + + /// Returns a list of ManagedNotificationChildEvents for a specified aggregate ManagedNotificationEvent, ordered by creation time in reverse chronological order (newest first). + @Sendable + @inlinable + public func listManagedNotificationChildEvents(_ input: ListManagedNotificationChildEventsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListManagedNotificationChildEventsResponse { + try await self.client.execute( + operation: "ListManagedNotificationChildEvents", + path: "/list-managed-notification-child-events/{aggregateManagedNotificationEventArn}", + httpMethod: .GET, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Returns a list of ManagedNotificationChildEvents for a specified aggregate ManagedNotificationEvent, ordered by creation time in reverse chronological order (newest first). + /// + /// Parameters: + /// - aggregateManagedNotificationEventArn: The Amazon Resource Name (ARN) of the ManagedNotificationEvent. + /// - endTime: Latest time of events to return from this call. + /// - locale: The locale code of the language used for the retrieved NotificationEvent. The default locale is English.en_US. + /// - maxResults: The maximum number of results to be returned in this call. Defaults to 20. + /// - nextToken: The start token for paginated calls. Retrieved from the response of a previous ListManagedNotificationChannelAssociations call. Next token uses Base64 encoding. + /// - organizationalUnitId: The identifier of the Amazon Web Services Organizations organizational unit (OU) associated with the Managed Notification Child Events. + /// - relatedAccount: The Amazon Web Services account ID associated with the Managed Notification Child Events. + /// - startTime: The earliest time of events to return from this call. + /// - logger: Logger use during operation + @inlinable + public func listManagedNotificationChildEvents( + aggregateManagedNotificationEventArn: String, + endTime: Date? = nil, + locale: LocaleCode? = nil, + maxResults: Int? = nil, + nextToken: String? = nil, + organizationalUnitId: String? = nil, + relatedAccount: String? = nil, + startTime: Date? = nil, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> ListManagedNotificationChildEventsResponse { + let input = ListManagedNotificationChildEventsRequest( + aggregateManagedNotificationEventArn: aggregateManagedNotificationEventArn, + endTime: endTime, + locale: locale, + maxResults: maxResults, + nextToken: nextToken, + organizationalUnitId: organizationalUnitId, + relatedAccount: relatedAccount, + startTime: startTime + ) + return try await self.listManagedNotificationChildEvents(input, logger: logger) + } + + /// Returns a list of Managed Notification Configurations according to specified filters, ordered by creation time in reverse chronological order (newest first). + @Sendable + @inlinable + public func listManagedNotificationConfigurations(_ input: ListManagedNotificationConfigurationsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListManagedNotificationConfigurationsResponse { + try await self.client.execute( + operation: "ListManagedNotificationConfigurations", + path: "/managed-notification-configurations", + httpMethod: .GET, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Returns a list of Managed Notification Configurations according to specified filters, ordered by creation time in reverse chronological order (newest first). + /// + /// Parameters: + /// - channelIdentifier: The identifier or ARN of the notification channel to filter configurations by. + /// - maxResults: The maximum number of results to be returned in this call. Defaults to 20. + /// - nextToken: The start token for paginated calls. Retrieved from the response of a previous ListManagedNotificationChannelAssociations call. Next token uses Base64 encoding. + /// - logger: Logger use during operation + @inlinable + public func listManagedNotificationConfigurations( + channelIdentifier: String? = nil, + maxResults: Int? = nil, + nextToken: String? = nil, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> ListManagedNotificationConfigurationsResponse { + let input = ListManagedNotificationConfigurationsRequest( + channelIdentifier: channelIdentifier, + maxResults: maxResults, + nextToken: nextToken + ) + return try await self.listManagedNotificationConfigurations(input, logger: logger) + } + + /// Returns a list of Managed Notification Events according to specified filters, ordered by creation time in reverse chronological order (newest first). + @Sendable + @inlinable + public func listManagedNotificationEvents(_ input: ListManagedNotificationEventsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListManagedNotificationEventsResponse { + try await self.client.execute( + operation: "ListManagedNotificationEvents", + path: "/managed-notification-events", + httpMethod: .GET, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Returns a list of Managed Notification Events according to specified filters, ordered by creation time in reverse chronological order (newest first). + /// + /// Parameters: + /// - endTime: Latest time of events to return from this call. + /// - locale: The locale code of the language used for the retrieved NotificationEvent. The default locale is English (en_US). + /// - maxResults: The maximum number of results to be returned in this call. Defaults to 20. + /// - nextToken: The start token for paginated calls. Retrieved from the response of a previous ListManagedNotificationChannelAssociations call. Next token uses Base64 encoding. + /// - organizationalUnitId: The Organizational Unit Id that an Amazon Web Services account belongs to. + /// - relatedAccount: The Amazon Web Services account ID associated with the Managed Notification Events. + /// - source: The Amazon Web Services service the event originates from. For example aws.cloudwatch. + /// - startTime: The earliest time of events to return from this call. + /// - logger: Logger use during operation + @inlinable + public func listManagedNotificationEvents( + endTime: Date? = nil, + locale: LocaleCode? = nil, + maxResults: Int? = nil, + nextToken: String? = nil, + organizationalUnitId: String? = nil, + relatedAccount: String? = nil, + source: String? = nil, + startTime: Date? = nil, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> ListManagedNotificationEventsResponse { + let input = ListManagedNotificationEventsRequest( + endTime: endTime, + locale: locale, + maxResults: maxResults, + nextToken: nextToken, + organizationalUnitId: organizationalUnitId, + relatedAccount: relatedAccount, + source: source, + startTime: startTime + ) + return try await self.listManagedNotificationEvents(input, logger: logger) + } + /// Returns a list of abbreviated NotificationConfigurations according to specified filters, in reverse chronological order (newest first). @Sendable @inlinable @@ -573,7 +1046,7 @@ public struct Notifications: AWSService { /// /// Parameters: /// - channelArn: The Amazon Resource Name (ARN) of the Channel to match. - /// - eventRuleSource: The matched event source. Must match one of the valid EventBridge sources. Only AWS service sourced events are supported. For example, aws.ec2 and aws.cloudwatch. For more information, see Event delivery from AWS services in the Amazon EventBridge User Guide. + /// - eventRuleSource: The matched event source. Must match one of the valid EventBridge sources. Only Amazon Web Services service sourced events are supported. For example, aws.ec2 and aws.cloudwatch. For more information, see Event delivery from Amazon Web Services services in the Amazon EventBridge User Guide. /// - maxResults: The maximum number of results to be returned in this call. Defaults to 20. /// - nextToken: The start token for paginated calls. Retrieved from the response of a previous ListEventRules call. Next token uses Base64 encoding. /// - status: The NotificationConfiguration status to match. Values: ACTIVE All EventRules are ACTIVE and any call can be run. PARTIALLY_ACTIVE Some EventRules are ACTIVE and some are INACTIVE. Any call can be run. Any call can be run. INACTIVE All EventRules are INACTIVE and any call can be run. DELETING This NotificationConfiguration is being deleted. Only GET and LIST calls can be run. @@ -599,7 +1072,7 @@ public struct Notifications: AWSService { /// Returns a list of NotificationEvents according to specified filters, in reverse chronological order (newest first). User Notifications stores notifications in the individual Regions you register as notification hubs and the Region of the source event rule. ListNotificationEvents only returns notifications stored in the same Region in which the action is called. /// User Notifications doesn't backfill notifications to new Regions selected as notification hubs. For this reason, we recommend that you make calls in your oldest registered notification hub. - /// For more information, see Notification hubs in the AWS User Notifications User Guide. + /// For more information, see Notification hubs in the Amazon Web Services User Notifications User Guide. @Sendable @inlinable public func listNotificationEvents(_ input: ListNotificationEventsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListNotificationEventsResponse { @@ -614,7 +1087,7 @@ public struct Notifications: AWSService { } /// Returns a list of NotificationEvents according to specified filters, in reverse chronological order (newest first). User Notifications stores notifications in the individual Regions you register as notification hubs and the Region of the source event rule. ListNotificationEvents only returns notifications stored in the same Region in which the action is called. /// User Notifications doesn't backfill notifications to new Regions selected as notification hubs. For this reason, we recommend that you make calls in your oldest registered notification hub. - /// For more information, see Notification hubs in the AWS User Notifications User Guide. + /// For more information, see Notification hubs in the Amazon Web Services User Notifications User Guide. /// /// Parameters: /// - aggregateNotificationEventArn: The Amazon Resource Name (ARN) of the aggregatedNotificationEventArn to match. @@ -623,7 +1096,7 @@ public struct Notifications: AWSService { /// - locale: The locale code of the language used for the retrieved NotificationEvent. The default locale is English (en_US). /// - maxResults: The maximum number of results to be returned in this call. Defaults to 20. /// - nextToken: The start token for paginated calls. Retrieved from the response of a previous ListEventRules call. Next token uses Base64 encoding. - /// - source: The matched event source. Must match one of the valid EventBridge sources. Only AWS service sourced events are supported. For example, aws.ec2 and aws.cloudwatch. For more information, see Event delivery from AWS services in the Amazon EventBridge User Guide. + /// - source: The matched event source. Must match one of the valid EventBridge sources. Only Amazon Web Services service sourced events are supported. For example, aws.ec2 and aws.cloudwatch. For more information, see Event delivery from Amazon Web Services services in the Amazon EventBridge User Guide. /// - startTime: The earliest time of events to return from this call. /// - logger: Logger use during operation @inlinable @@ -683,7 +1156,7 @@ public struct Notifications: AWSService { return try await self.listNotificationHubs(input, logger: logger) } - /// Returns a list of tags for a specified Amazon Resource Name (ARN). For more information, see Tagging your AWS resources in the Tagging AWS Resources User Guide. This is only supported for NotificationConfigurations. + /// Returns a list of tags for a specified Amazon Resource Name (ARN). For more information, see Tagging your Amazon Web Services resources in the Tagging Amazon Web Services Resources User Guide. This is only supported for NotificationConfigurations. @Sendable @inlinable public func listTagsForResource(_ input: ListTagsForResourceRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListTagsForResourceResponse { @@ -696,7 +1169,7 @@ public struct Notifications: AWSService { logger: logger ) } - /// Returns a list of tags for a specified Amazon Resource Name (ARN). For more information, see Tagging your AWS resources in the Tagging AWS Resources User Guide. This is only supported for NotificationConfigurations. + /// Returns a list of tags for a specified Amazon Resource Name (ARN). For more information, see Tagging your Amazon Web Services resources in the Tagging Amazon Web Services Resources User Guide. This is only supported for NotificationConfigurations. /// /// Parameters: /// - arn: The Amazon Resource Name (ARN) to use to list tags. @@ -712,7 +1185,7 @@ public struct Notifications: AWSService { return try await self.listTagsForResource(input, logger: logger) } - /// Registers a NotificationHub in the specified Region. There is a maximum of one NotificationHub per Region. You can have a maximum of 3 NotificationHubs at a time. + /// Registers a NotificationConfiguration in the specified Region. There is a maximum of one NotificationConfiguration per Region. You can have a maximum of 3 NotificationHub resources at a time. @Sendable @inlinable public func registerNotificationHub(_ input: RegisterNotificationHubRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> RegisterNotificationHubResponse { @@ -725,7 +1198,7 @@ public struct Notifications: AWSService { logger: logger ) } - /// Registers a NotificationHub in the specified Region. There is a maximum of one NotificationHub per Region. You can have a maximum of 3 NotificationHubs at a time. + /// Registers a NotificationConfiguration in the specified Region. There is a maximum of one NotificationConfiguration per Region. You can have a maximum of 3 NotificationHub resources at a time. /// /// Parameters: /// - notificationHubRegion: The Region of the NotificationHub. @@ -741,7 +1214,7 @@ public struct Notifications: AWSService { return try await self.registerNotificationHub(input, logger: logger) } - /// Tags the resource with a tag key and value. For more information, see Tagging your AWS resources in the Tagging AWS Resources User Guide. This is only supported for NotificationConfigurations. + /// Tags the resource with a tag key and value. For more information, see Tagging your Amazon Web Services resources in the Tagging Amazon Web Services Resources User Guide. This is only supported for NotificationConfigurations. @Sendable @inlinable public func tagResource(_ input: TagResourceRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> TagResourceResponse { @@ -754,7 +1227,7 @@ public struct Notifications: AWSService { logger: logger ) } - /// Tags the resource with a tag key and value. For more information, see Tagging your AWS resources in the Tagging AWS Resources User Guide. This is only supported for NotificationConfigurations. + /// Tags the resource with a tag key and value. For more information, see Tagging your Amazon Web Services resources in the Tagging Amazon Web Services Resources User Guide. This is only supported for NotificationConfigurations. /// /// Parameters: /// - arn: The Amazon Resource Name (ARN) to use to tag a resource. @@ -773,7 +1246,7 @@ public struct Notifications: AWSService { return try await self.tagResource(input, logger: logger) } - /// Untags a resource with a specified Amazon Resource Name (ARN). For more information, see Tagging your AWS resources in the Tagging AWS Resources User Guide. + /// Untags a resource with a specified Amazon Resource Name (ARN). For more information, see Tagging your Amazon Web Services resources in the Tagging Amazon Web Services Resources User Guide. @Sendable @inlinable public func untagResource(_ input: UntagResourceRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UntagResourceResponse { @@ -786,7 +1259,7 @@ public struct Notifications: AWSService { logger: logger ) } - /// Untags a resource with a specified Amazon Resource Name (ARN). For more information, see Tagging your AWS resources in the Tagging AWS Resources User Guide. + /// Untags a resource with a specified Amazon Resource Name (ARN). For more information, see Tagging your Amazon Web Services resources in the Tagging Amazon Web Services Resources User Guide. /// /// Parameters: /// - arn: The Amazon Resource Name (ARN) to use to untag a resource. @@ -823,7 +1296,7 @@ public struct Notifications: AWSService { /// Parameters: /// - arn: The Amazon Resource Name (ARN) to use to update the EventRule. /// - eventPattern: An additional event pattern used to further filter the events this EventRule receives. For more information, see Amazon EventBridge event patterns in the Amazon EventBridge User Guide. - /// - regions: A list of AWS Regions that sends events to this EventRule. + /// - regions: A list of Amazon Web Services Regions that sends events to this EventRule. /// - logger: Logger use during operation @inlinable public func updateEventRule( @@ -856,7 +1329,7 @@ public struct Notifications: AWSService { /// Updates a NotificationConfiguration. /// /// Parameters: - /// - aggregationDuration: The status of this NotificationConfiguration. The status should always be INACTIVE when part of the CreateNotificationConfiguration response. Values: ACTIVE All EventRules are ACTIVE and any call can be run. PARTIALLY_ACTIVE Some EventRules are ACTIVE and some are INACTIVE. Any call can be run. Any call can be run. INACTIVE All EventRules are INACTIVE and any call can be run. DELETING This NotificationConfiguration is being deleted. Only GET and LIST calls can be run. + /// - aggregationDuration: The aggregation preference of the NotificationConfiguration. Values: LONG Aggregate notifications for long periods of time (12 hours). SHORT Aggregate notifications for short periods of time (5 minutes). NONE Don't aggregate notifications. /// - arn: The Amazon Resource Name (ARN) used to update the NotificationConfiguration. /// - description: The description of the NotificationConfiguration. /// - name: The name of the NotificationConfiguration. @@ -966,6 +1439,184 @@ extension Notifications { return self.listEventRulesPaginator(input, logger: logger) } + /// Return PaginatorSequence for operation ``listManagedNotificationChannelAssociations(_:logger:)``. + /// + /// - Parameters: + /// - input: Input for operation + /// - logger: Logger used for logging + @inlinable + public func listManagedNotificationChannelAssociationsPaginator( + _ input: ListManagedNotificationChannelAssociationsRequest, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + return .init( + input: input, + command: self.listManagedNotificationChannelAssociations, + inputKey: \ListManagedNotificationChannelAssociationsRequest.nextToken, + outputKey: \ListManagedNotificationChannelAssociationsResponse.nextToken, + logger: logger + ) + } + /// Return PaginatorSequence for operation ``listManagedNotificationChannelAssociations(_:logger:)``. + /// + /// - Parameters: + /// - managedNotificationConfigurationArn: The Amazon Resource Name (ARN) of the ManagedNotificationConfiguration to match. + /// - maxResults: The maximum number of results to be returned in this call. Defaults to 20. + /// - logger: Logger used for logging + @inlinable + public func listManagedNotificationChannelAssociationsPaginator( + managedNotificationConfigurationArn: String, + maxResults: Int? = nil, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + let input = ListManagedNotificationChannelAssociationsRequest( + managedNotificationConfigurationArn: managedNotificationConfigurationArn, + maxResults: maxResults + ) + return self.listManagedNotificationChannelAssociationsPaginator(input, logger: logger) + } + + /// Return PaginatorSequence for operation ``listManagedNotificationChildEvents(_:logger:)``. + /// + /// - Parameters: + /// - input: Input for operation + /// - logger: Logger used for logging + @inlinable + public func listManagedNotificationChildEventsPaginator( + _ input: ListManagedNotificationChildEventsRequest, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + return .init( + input: input, + command: self.listManagedNotificationChildEvents, + inputKey: \ListManagedNotificationChildEventsRequest.nextToken, + outputKey: \ListManagedNotificationChildEventsResponse.nextToken, + logger: logger + ) + } + /// Return PaginatorSequence for operation ``listManagedNotificationChildEvents(_:logger:)``. + /// + /// - Parameters: + /// - aggregateManagedNotificationEventArn: The Amazon Resource Name (ARN) of the ManagedNotificationEvent. + /// - endTime: Latest time of events to return from this call. + /// - locale: The locale code of the language used for the retrieved NotificationEvent. The default locale is English.en_US. + /// - maxResults: The maximum number of results to be returned in this call. Defaults to 20. + /// - organizationalUnitId: The identifier of the Amazon Web Services Organizations organizational unit (OU) associated with the Managed Notification Child Events. + /// - relatedAccount: The Amazon Web Services account ID associated with the Managed Notification Child Events. + /// - startTime: The earliest time of events to return from this call. + /// - logger: Logger used for logging + @inlinable + public func listManagedNotificationChildEventsPaginator( + aggregateManagedNotificationEventArn: String, + endTime: Date? = nil, + locale: LocaleCode? = nil, + maxResults: Int? = nil, + organizationalUnitId: String? = nil, + relatedAccount: String? = nil, + startTime: Date? = nil, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + let input = ListManagedNotificationChildEventsRequest( + aggregateManagedNotificationEventArn: aggregateManagedNotificationEventArn, + endTime: endTime, + locale: locale, + maxResults: maxResults, + organizationalUnitId: organizationalUnitId, + relatedAccount: relatedAccount, + startTime: startTime + ) + return self.listManagedNotificationChildEventsPaginator(input, logger: logger) + } + + /// Return PaginatorSequence for operation ``listManagedNotificationConfigurations(_:logger:)``. + /// + /// - Parameters: + /// - input: Input for operation + /// - logger: Logger used for logging + @inlinable + public func listManagedNotificationConfigurationsPaginator( + _ input: ListManagedNotificationConfigurationsRequest, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + return .init( + input: input, + command: self.listManagedNotificationConfigurations, + inputKey: \ListManagedNotificationConfigurationsRequest.nextToken, + outputKey: \ListManagedNotificationConfigurationsResponse.nextToken, + logger: logger + ) + } + /// Return PaginatorSequence for operation ``listManagedNotificationConfigurations(_:logger:)``. + /// + /// - Parameters: + /// - channelIdentifier: The identifier or ARN of the notification channel to filter configurations by. + /// - maxResults: The maximum number of results to be returned in this call. Defaults to 20. + /// - logger: Logger used for logging + @inlinable + public func listManagedNotificationConfigurationsPaginator( + channelIdentifier: String? = nil, + maxResults: Int? = nil, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + let input = ListManagedNotificationConfigurationsRequest( + channelIdentifier: channelIdentifier, + maxResults: maxResults + ) + return self.listManagedNotificationConfigurationsPaginator(input, logger: logger) + } + + /// Return PaginatorSequence for operation ``listManagedNotificationEvents(_:logger:)``. + /// + /// - Parameters: + /// - input: Input for operation + /// - logger: Logger used for logging + @inlinable + public func listManagedNotificationEventsPaginator( + _ input: ListManagedNotificationEventsRequest, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + return .init( + input: input, + command: self.listManagedNotificationEvents, + inputKey: \ListManagedNotificationEventsRequest.nextToken, + outputKey: \ListManagedNotificationEventsResponse.nextToken, + logger: logger + ) + } + /// Return PaginatorSequence for operation ``listManagedNotificationEvents(_:logger:)``. + /// + /// - Parameters: + /// - endTime: Latest time of events to return from this call. + /// - locale: The locale code of the language used for the retrieved NotificationEvent. The default locale is English (en_US). + /// - maxResults: The maximum number of results to be returned in this call. Defaults to 20. + /// - organizationalUnitId: The Organizational Unit Id that an Amazon Web Services account belongs to. + /// - relatedAccount: The Amazon Web Services account ID associated with the Managed Notification Events. + /// - source: The Amazon Web Services service the event originates from. For example aws.cloudwatch. + /// - startTime: The earliest time of events to return from this call. + /// - logger: Logger used for logging + @inlinable + public func listManagedNotificationEventsPaginator( + endTime: Date? = nil, + locale: LocaleCode? = nil, + maxResults: Int? = nil, + organizationalUnitId: String? = nil, + relatedAccount: String? = nil, + source: String? = nil, + startTime: Date? = nil, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + let input = ListManagedNotificationEventsRequest( + endTime: endTime, + locale: locale, + maxResults: maxResults, + organizationalUnitId: organizationalUnitId, + relatedAccount: relatedAccount, + source: source, + startTime: startTime + ) + return self.listManagedNotificationEventsPaginator(input, logger: logger) + } + /// Return PaginatorSequence for operation ``listNotificationConfigurations(_:logger:)``. /// /// - Parameters: @@ -988,7 +1639,7 @@ extension Notifications { /// /// - Parameters: /// - channelArn: The Amazon Resource Name (ARN) of the Channel to match. - /// - eventRuleSource: The matched event source. Must match one of the valid EventBridge sources. Only AWS service sourced events are supported. For example, aws.ec2 and aws.cloudwatch. For more information, see Event delivery from AWS services in the Amazon EventBridge User Guide. + /// - eventRuleSource: The matched event source. Must match one of the valid EventBridge sources. Only Amazon Web Services service sourced events are supported. For example, aws.ec2 and aws.cloudwatch. For more information, see Event delivery from Amazon Web Services services in the Amazon EventBridge User Guide. /// - maxResults: The maximum number of results to be returned in this call. Defaults to 20. /// - status: The NotificationConfiguration status to match. Values: ACTIVE All EventRules are ACTIVE and any call can be run. PARTIALLY_ACTIVE Some EventRules are ACTIVE and some are INACTIVE. Any call can be run. Any call can be run. INACTIVE All EventRules are INACTIVE and any call can be run. DELETING This NotificationConfiguration is being deleted. Only GET and LIST calls can be run. /// - logger: Logger used for logging @@ -1035,7 +1686,7 @@ extension Notifications { /// - includeChildEvents: Include aggregated child events in the result. /// - locale: The locale code of the language used for the retrieved NotificationEvent. The default locale is English (en_US). /// - maxResults: The maximum number of results to be returned in this call. Defaults to 20. - /// - source: The matched event source. Must match one of the valid EventBridge sources. Only AWS service sourced events are supported. For example, aws.ec2 and aws.cloudwatch. For more information, see Event delivery from AWS services in the Amazon EventBridge User Guide. + /// - source: The matched event source. Must match one of the valid EventBridge sources. Only Amazon Web Services service sourced events are supported. For example, aws.ec2 and aws.cloudwatch. For more information, see Event delivery from Amazon Web Services services in the Amazon EventBridge User Guide. /// - startTime: The earliest time of events to return from this call. /// - logger: Logger used for logging @inlinable @@ -1118,6 +1769,60 @@ extension Notifications.ListEventRulesRequest: AWSPaginateToken { } } +extension Notifications.ListManagedNotificationChannelAssociationsRequest: AWSPaginateToken { + @inlinable + public func usingPaginationToken(_ token: String) -> Notifications.ListManagedNotificationChannelAssociationsRequest { + return .init( + managedNotificationConfigurationArn: self.managedNotificationConfigurationArn, + maxResults: self.maxResults, + nextToken: token + ) + } +} + +extension Notifications.ListManagedNotificationChildEventsRequest: AWSPaginateToken { + @inlinable + public func usingPaginationToken(_ token: String) -> Notifications.ListManagedNotificationChildEventsRequest { + return .init( + aggregateManagedNotificationEventArn: self.aggregateManagedNotificationEventArn, + endTime: self.endTime, + locale: self.locale, + maxResults: self.maxResults, + nextToken: token, + organizationalUnitId: self.organizationalUnitId, + relatedAccount: self.relatedAccount, + startTime: self.startTime + ) + } +} + +extension Notifications.ListManagedNotificationConfigurationsRequest: AWSPaginateToken { + @inlinable + public func usingPaginationToken(_ token: String) -> Notifications.ListManagedNotificationConfigurationsRequest { + return .init( + channelIdentifier: self.channelIdentifier, + maxResults: self.maxResults, + nextToken: token + ) + } +} + +extension Notifications.ListManagedNotificationEventsRequest: AWSPaginateToken { + @inlinable + public func usingPaginationToken(_ token: String) -> Notifications.ListManagedNotificationEventsRequest { + return .init( + endTime: self.endTime, + locale: self.locale, + maxResults: self.maxResults, + nextToken: token, + organizationalUnitId: self.organizationalUnitId, + relatedAccount: self.relatedAccount, + source: self.source, + startTime: self.startTime + ) + } +} + extension Notifications.ListNotificationConfigurationsRequest: AWSPaginateToken { @inlinable public func usingPaginationToken(_ token: String) -> Notifications.ListNotificationConfigurationsRequest { diff --git a/Sources/Soto/Services/Notifications/Notifications_shapes.swift b/Sources/Soto/Services/Notifications/Notifications_shapes.swift index 4818d05087..b5392f6cbd 100644 --- a/Sources/Soto/Services/Notifications/Notifications_shapes.swift +++ b/Sources/Soto/Services/Notifications/Notifications_shapes.swift @@ -26,6 +26,25 @@ import Foundation extension Notifications { // MARK: Enums + public enum AccessStatus: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case disabled = "DISABLED" + case enabled = "ENABLED" + case pending = "PENDING" + public var description: String { return self.rawValue } + } + + public enum AccountContactType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + /// Alternate Billing Contact managed by AWS Account Management Service. + case accountAlternateBilling = "ACCOUNT_ALTERNATE_BILLING" + /// Alternate Operations Contact managed by AWS Account Management Service. + case accountAlternateOperations = "ACCOUNT_ALTERNATE_OPERATIONS" + /// Alternate Security Contact managed by AWS Account Management Service. + case accountAlternateSecurity = "ACCOUNT_ALTERNATE_SECURITY" + /// Primary Contact managed by AWS Account Management Service. + case accountPrimary = "ACCOUNT_PRIMARY" + public var description: String { return self.rawValue } + } + public enum AggregationDuration: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { /// Aggregate notifications for long periods of time (12 hours) case long = "LONG" @@ -43,6 +62,26 @@ extension Notifications { public var description: String { return self.rawValue } } + public enum ChannelAssociationOverrideOption: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + /// AWS User Notification service users can not associate or disassociate a Channel with a notification configuration. + case disabled = "DISABLED" + /// AWS User Notification service users can associate or disassociate a Channel with a notification configuration. + case enabled = "ENABLED" + public var description: String { return self.rawValue } + } + + public enum ChannelType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + /// User Notification Service sends notifications to Account Managed contacts. + case accountContact = "ACCOUNT_CONTACT" + /// Chatbot sends notifications to group platforms, like Slack or Chime. Link:https://aws.amazon.com/chatbot/ + case chatbot = "CHATBOT" + /// Email sends notifications to email addresses. + case email = "EMAIL" + /// AWS Console Mobile App sends notifications to mobile devices. Link:https://aws.amazon.com/console/mobile/ + case mobile = "MOBILE" + public var description: String { return self.rawValue } + } + public enum EventRuleStatus: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { /// EventRule is processing events. Any call can be executed. case active = "ACTIVE" @@ -148,8 +187,74 @@ extension Notifications { // MARK: Shapes + public struct AggregationDetail: AWSDecodableShape { + /// Properties used to summarize aggregated events. + public let summarizationDimensions: [SummarizationDimensionDetail]? + + @inlinable + public init(summarizationDimensions: [SummarizationDimensionDetail]? = nil) { + self.summarizationDimensions = summarizationDimensions + } + + private enum CodingKeys: String, CodingKey { + case summarizationDimensions = "summarizationDimensions" + } + } + + public struct AggregationKey: AWSDecodableShape { + /// Indicates the type of aggregation key. + public let name: String + /// Indicates the value associated with the aggregation key name. + public let value: String + + @inlinable + public init(name: String, value: String) { + self.name = name + self.value = value + } + + private enum CodingKeys: String, CodingKey { + case name = "name" + case value = "value" + } + } + + public struct AggregationSummary: AWSDecodableShape { + /// List of additional dimensions used to group and summarize data. + public let additionalSummarizationDimensions: [SummarizationDimensionOverview]? + /// Indicates the Amazon Web Services accounts in the aggregation key. + public let aggregatedAccounts: SummarizationDimensionOverview + /// Indicates the criteria or rules by which notifications have been grouped together. + public let aggregatedBy: [AggregationKey] + /// Indicates the collection of organizational units that are involved in the aggregation key. + public let aggregatedOrganizationalUnits: SummarizationDimensionOverview? + /// Indicates the Amazon Web Services Regions in the aggregation key. + public let aggregatedRegions: SummarizationDimensionOverview + /// Indicates the number of events associated with the aggregation key. + public let eventCount: Int + + @inlinable + public init(additionalSummarizationDimensions: [SummarizationDimensionOverview]? = nil, aggregatedAccounts: SummarizationDimensionOverview, aggregatedBy: [AggregationKey], aggregatedOrganizationalUnits: SummarizationDimensionOverview? = nil, aggregatedRegions: SummarizationDimensionOverview, eventCount: Int) { + self.additionalSummarizationDimensions = additionalSummarizationDimensions + self.aggregatedAccounts = aggregatedAccounts + self.aggregatedBy = aggregatedBy + self.aggregatedOrganizationalUnits = aggregatedOrganizationalUnits + self.aggregatedRegions = aggregatedRegions + self.eventCount = eventCount + } + + private enum CodingKeys: String, CodingKey { + case additionalSummarizationDimensions = "additionalSummarizationDimensions" + case aggregatedAccounts = "aggregatedAccounts" + case aggregatedBy = "aggregatedBy" + case aggregatedOrganizationalUnits = "aggregatedOrganizationalUnits" + case aggregatedRegions = "aggregatedRegions" + case eventCount = "eventCount" + } + } + public struct AssociateChannelRequest: AWSEncodableShape { - /// The Amazon Resource Name (ARN) of the Channel to associate with the NotificationConfiguration. Supported ARNs include AWS Chatbot, the Console Mobile Application, and notifications-contacts. + /// The Amazon Resource Name (ARN) of the Channel to associate with the NotificationConfiguration. Supported ARNs include Chatbot, the Console Mobile Application, and notifications-contacts. public let arn: String /// The ARN of the NotificationConfiguration to associate with the Channel. public let notificationConfigurationArn: String @@ -181,16 +286,81 @@ extension Notifications { public init() {} } + public struct AssociateManagedNotificationAccountContactRequest: AWSEncodableShape { + /// A unique value of an Account Contact Type to associate with the ManagedNotificationConfiguration. + public let contactIdentifier: AccountContactType + /// The Amazon Resource Name (ARN) of the ManagedNotificationConfiguration to associate with the Account Contact. + public let managedNotificationConfigurationArn: String + + @inlinable + public init(contactIdentifier: AccountContactType, managedNotificationConfigurationArn: String) { + self.contactIdentifier = contactIdentifier + self.managedNotificationConfigurationArn = managedNotificationConfigurationArn + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + var container = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.contactIdentifier, key: "contactIdentifier") + try container.encode(self.managedNotificationConfigurationArn, forKey: .managedNotificationConfigurationArn) + } + + public func validate(name: String) throws { + try self.validate(self.managedNotificationConfigurationArn, name: "managedNotificationConfigurationArn", parent: name, pattern: "^arn:[-.a-z0-9]{1,63}:notifications::[0-9]{12}:managed-notification-configuration/category/[a-zA-Z0-9\\-]{3,64}/sub-category/[a-zA-Z0-9\\-]{3,64}$") + } + + private enum CodingKeys: String, CodingKey { + case managedNotificationConfigurationArn = "managedNotificationConfigurationArn" + } + } + + public struct AssociateManagedNotificationAccountContactResponse: AWSDecodableShape { + public init() {} + } + + public struct AssociateManagedNotificationAdditionalChannelRequest: AWSEncodableShape { + /// The Amazon Resource Name (ARN) of the Channel to associate with the ManagedNotificationConfiguration. Supported ARNs include Chatbot, the Console Mobile Application, and email (notifications-contacts). + public let channelArn: String + /// The Amazon Resource Name (ARN) of the ManagedNotificationConfiguration to associate with the additional Channel. + public let managedNotificationConfigurationArn: String + + @inlinable + public init(channelArn: String, managedNotificationConfigurationArn: String) { + self.channelArn = channelArn + self.managedNotificationConfigurationArn = managedNotificationConfigurationArn + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + var container = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.channelArn, key: "channelArn") + try container.encode(self.managedNotificationConfigurationArn, forKey: .managedNotificationConfigurationArn) + } + + public func validate(name: String) throws { + try self.validate(self.channelArn, name: "channelArn", parent: name, pattern: "^arn:aws:(chatbot|consoleapp|notifications-contacts):[a-zA-Z0-9-]*:[0-9]{12}:[a-zA-Z0-9-_.@]+/[a-zA-Z0-9/_.@:-]+$") + try self.validate(self.managedNotificationConfigurationArn, name: "managedNotificationConfigurationArn", parent: name, pattern: "^arn:[-.a-z0-9]{1,63}:notifications::[0-9]{12}:managed-notification-configuration/category/[a-zA-Z0-9\\-]{3,64}/sub-category/[a-zA-Z0-9\\-]{3,64}$") + } + + private enum CodingKeys: String, CodingKey { + case managedNotificationConfigurationArn = "managedNotificationConfigurationArn" + } + } + + public struct AssociateManagedNotificationAdditionalChannelResponse: AWSDecodableShape { + public init() {} + } + public struct CreateEventRuleRequest: AWSEncodableShape { /// An additional event pattern used to further filter the events this EventRule receives. For more information, see Amazon EventBridge event patterns in the Amazon EventBridge User Guide. public let eventPattern: String? - /// The event type to match. Must match one of the valid Amazon EventBridge event types. For example, EC2 Instance State-change Notification and AWS CloudWatch Alarm State Change. For more information, see Event delivery from AWS services in the Amazon EventBridge User Guide. + /// The event type to match. Must match one of the valid Amazon EventBridge event types. For example, EC2 Instance State-change Notification and Amazon CloudWatch Alarm State Change. For more information, see Event delivery from Amazon Web Services services in the Amazon EventBridge User Guide. public let eventType: String /// The Amazon Resource Name (ARN) of the NotificationConfiguration associated with this EventRule. public let notificationConfigurationArn: String - /// A list of AWS Regions that send events to this EventRule. + /// A list of Amazon Web Services Regions that send events to this EventRule. public let regions: [String] - /// The matched event source. Must match one of the valid EventBridge sources. Only AWS service sourced events are supported. For example, aws.ec2 and aws.cloudwatch. For more information, see Event delivery from AWS services in the Amazon EventBridge User Guide. + /// The matched event source. Must match one of the valid EventBridge sources. Only Amazon Web Services service sourced events are supported. For example, aws.ec2 and aws.cloudwatch. For more information, see Event delivery from Amazon Web Services services in the Amazon EventBridge User Guide. public let source: String @inlinable @@ -251,7 +421,7 @@ extension Notifications { } public struct CreateNotificationConfigurationRequest: AWSEncodableShape { - /// The aggregation preference of the NotificationConfiguration. Values: LONG Aggregate notifications for long periods of time (12 hours). SHORT Aggregate notifications for short periods of time (5 minutes). NONE Don't aggregate notifications. No delay in delivery. + /// The aggregation preference of the NotificationConfiguration. Values: LONG Aggregate notifications for long periods of time (12 hours). SHORT Aggregate notifications for short periods of time (5 minutes). NONE Don't aggregate notifications. public let aggregationDuration: AggregationDuration? /// The description of the NotificationConfiguration. public let description: String @@ -290,9 +460,9 @@ extension Notifications { } public struct CreateNotificationConfigurationResponse: AWSDecodableShape { - /// The Amazon Resource Name (ARN) of the the resource. + /// The Amazon Resource Name (ARN) of the NotificationConfiguration. public let arn: String - /// The status of this NotificationConfiguration. The status should always be INACTIVE when part of the CreateNotificationConfiguration response. Values: ACTIVE All EventRules are ACTIVE and any call can be run. PARTIALLY_ACTIVE Some EventRules are ACTIVE and some are INACTIVE. Any call can be run. INACTIVE All EventRules are INACTIVE and any call can be run. DELETING This NotificationConfiguration is being deleted. Only GET and LIST calls can be run. + /// The current status of this NotificationConfiguration. public let status: NotificationConfigurationStatus @inlinable @@ -360,7 +530,7 @@ extension Notifications { } public struct DeregisterNotificationHubRequest: AWSEncodableShape { - /// The NotificationHub Region. + /// The NotificationConfiguration Region. public let notificationHubRegion: String @inlinable @@ -384,9 +554,9 @@ extension Notifications { } public struct DeregisterNotificationHubResponse: AWSDecodableShape { - /// The NotificationHub Region. + /// The NotificationConfiguration Region. public let notificationHubRegion: String - /// NotificationHub status information. + /// NotificationConfiguration status information. public let statusSummary: NotificationHubStatusSummary @inlinable @@ -419,6 +589,14 @@ extension Notifications { } } + public struct DisableNotificationsAccessForOrganizationRequest: AWSEncodableShape { + public init() {} + } + + public struct DisableNotificationsAccessForOrganizationResponse: AWSDecodableShape { + public init() {} + } + public struct DisassociateChannelRequest: AWSEncodableShape { /// The Amazon Resource Name (ARN) of the Channel to disassociate. public let arn: String @@ -452,6 +630,79 @@ extension Notifications { public init() {} } + public struct DisassociateManagedNotificationAccountContactRequest: AWSEncodableShape { + /// The unique value of an Account Contact Type to associate with the ManagedNotificationConfiguration. + public let contactIdentifier: AccountContactType + /// The Amazon Resource Name (ARN) of the ManagedNotificationConfiguration to associate with the Account Contact. + public let managedNotificationConfigurationArn: String + + @inlinable + public init(contactIdentifier: AccountContactType, managedNotificationConfigurationArn: String) { + self.contactIdentifier = contactIdentifier + self.managedNotificationConfigurationArn = managedNotificationConfigurationArn + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + var container = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.contactIdentifier, key: "contactIdentifier") + try container.encode(self.managedNotificationConfigurationArn, forKey: .managedNotificationConfigurationArn) + } + + public func validate(name: String) throws { + try self.validate(self.managedNotificationConfigurationArn, name: "managedNotificationConfigurationArn", parent: name, pattern: "^arn:[-.a-z0-9]{1,63}:notifications::[0-9]{12}:managed-notification-configuration/category/[a-zA-Z0-9\\-]{3,64}/sub-category/[a-zA-Z0-9\\-]{3,64}$") + } + + private enum CodingKeys: String, CodingKey { + case managedNotificationConfigurationArn = "managedNotificationConfigurationArn" + } + } + + public struct DisassociateManagedNotificationAccountContactResponse: AWSDecodableShape { + public init() {} + } + + public struct DisassociateManagedNotificationAdditionalChannelRequest: AWSEncodableShape { + /// The Amazon Resource Name (ARN) of the Channel to associate with the ManagedNotificationConfiguration. + public let channelArn: String + /// The Amazon Resource Name (ARN) of the Managed Notification Configuration to associate with the additional Channel. + public let managedNotificationConfigurationArn: String + + @inlinable + public init(channelArn: String, managedNotificationConfigurationArn: String) { + self.channelArn = channelArn + self.managedNotificationConfigurationArn = managedNotificationConfigurationArn + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + var container = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.channelArn, key: "channelArn") + try container.encode(self.managedNotificationConfigurationArn, forKey: .managedNotificationConfigurationArn) + } + + public func validate(name: String) throws { + try self.validate(self.channelArn, name: "channelArn", parent: name, pattern: "^arn:aws:(chatbot|consoleapp|notifications-contacts):[a-zA-Z0-9-]*:[0-9]{12}:[a-zA-Z0-9-_.@]+/[a-zA-Z0-9/_.@:-]+$") + try self.validate(self.managedNotificationConfigurationArn, name: "managedNotificationConfigurationArn", parent: name, pattern: "^arn:[-.a-z0-9]{1,63}:notifications::[0-9]{12}:managed-notification-configuration/category/[a-zA-Z0-9\\-]{3,64}/sub-category/[a-zA-Z0-9\\-]{3,64}$") + } + + private enum CodingKeys: String, CodingKey { + case managedNotificationConfigurationArn = "managedNotificationConfigurationArn" + } + } + + public struct DisassociateManagedNotificationAdditionalChannelResponse: AWSDecodableShape { + public init() {} + } + + public struct EnableNotificationsAccessForOrganizationRequest: AWSEncodableShape { + public init() {} + } + + public struct EnableNotificationsAccessForOrganizationResponse: AWSDecodableShape { + public init() {} + } + public struct EventRuleStatusSummary: AWSDecodableShape { /// A human-readable reason for EventRuleStatus. public let reason: String @@ -471,22 +722,22 @@ extension Notifications { } public struct EventRuleStructure: AWSDecodableShape { - /// The Amazon Resource Name (ARN) of the resource. + /// The Amazon Resource Name (ARN) of the EventRule. CloudFormation stack generates this ARN and then uses this ARN to associate with the NotificationConfiguration. public let arn: String - /// The creation time of the resource. + /// The creation time of the EventRule. @CustomCoding public var creationTime: Date /// An additional event pattern used to further filter the events this EventRule receives. For more information, see Amazon EventBridge event patterns in the Amazon EventBridge User Guide. public let eventPattern: String - /// The event type to match. Must match one of the valid Amazon EventBridge event types. For example, EC2 Instance State-change Notification and AWS CloudWatch Alarm State Change. For more information, see Event delivery from AWS services in the Amazon EventBridge User Guide. + /// The event type this rule should match with the EventBridge events. It must match with atleast one of the valid EventBridge event types. For example, Amazon EC2 Instance State change Notification and Amazon CloudWatch State Change. For more information, see Event delivery from Amazon Web Services services in the Amazon EventBridge User Guide. public let eventType: String - /// A list of Amazon EventBridge Managed Rule ARNs associated with this EventRule. These are created by AWS User Notifications within your account so your EventRules can function. + /// A list of Amazon EventBridge Managed Rule ARNs associated with this EventRule. These are created by User Notifications within your account so your EventRules can function. public let managedRules: [String] /// The ARN for the NotificationConfiguration associated with this EventRule. public let notificationConfigurationArn: String - /// A list of AWS Regions that send events to this EventRule. + /// A list of Amazon Web Services Regions that send events to this EventRule. public let regions: [String] - /// The matched event source. Must match one of the valid EventBridge sources. Only AWS service sourced events are supported. For example, aws.ec2 and aws.cloudwatch. For more information, see Event delivery from AWS services in the Amazon EventBridge User Guide. + /// The event source this rule should match with the EventBridge event sources. It must match with atleast one of the valid EventBridge event sources. Only Amazon Web Services service sourced events are supported. For example, aws.ec2 and aws.cloudwatch. For more information, see Event delivery from Amazon Web Services services in the Amazon EventBridge User Guide. public let source: String /// A list of an EventRule's status by Region. Regions are mapped to EventRuleStatusSummary. public let statusSummaryByRegion: [String: EventRuleStatusSummary] @@ -547,15 +798,15 @@ extension Notifications { public var creationTime: Date /// An additional event pattern used to further filter the events this EventRule receives. For more information, see Amazon EventBridge event patterns in the Amazon EventBridge User Guide. public let eventPattern: String - /// The event type to match. Must match one of the valid Amazon EventBridge event types. For example, EC2 Instance State-change Notification and AWS CloudWatch Alarm State Change. For more information, see Event delivery from AWS services in the Amazon EventBridge User Guide. + /// The event type to match. Must match one of the valid Amazon EventBridge event types. For example, EC2 Instance State-change Notification and Amazon CloudWatch Alarm State Change. For more information, see Event delivery from Amazon Web Services services in the Amazon EventBridge User Guide. public let eventType: String - /// A list of managed rules from EventBridge that are are associated with this EventRule. These are created by AWS User Notifications within your account so this EventRule functions. + /// A list of managed rules from EventBridge that are associated with this EventRule. These are created by User Notifications within your account so this EventRule functions. public let managedRules: [String] /// The ARN of a NotificationConfiguration. public let notificationConfigurationArn: String - /// A list of AWS Regions that send events to this EventRule. + /// A list of Amazon Web Services Regions that send events to this EventRule. public let regions: [String] - /// The matched event source. Must match one of the valid EventBridge sources. Only AWS service sourced events are supported. For example, aws.ec2 and aws.cloudwatch. For more information, see Event delivery from AWS services in the Amazon EventBridge User Guide. + /// The matched event source. Must match one of the valid EventBridge sources. Only Amazon Web Services service sourced events are supported. For example, aws.ec2 and aws.cloudwatch. For more information, see Event delivery from Amazon Web Services services in the Amazon EventBridge User Guide. public let source: String /// A list of an EventRule's status by Region. Regions are mapped to EventRuleStatusSummary. public let statusSummaryByRegion: [String: EventRuleStatusSummary] @@ -586,6 +837,164 @@ extension Notifications { } } + public struct GetManagedNotificationChildEventRequest: AWSEncodableShape { + /// The Amazon Resource Name (ARN) of the ManagedNotificationChildEvent to return. + public let arn: String + /// The locale code of the language used for the retrieved ManagedNotificationChildEvent. The default locale is English en_US. + public let locale: LocaleCode? + + @inlinable + public init(arn: String, locale: LocaleCode? = nil) { + self.arn = arn + self.locale = locale + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.arn, key: "arn") + request.encodeQuery(self.locale, key: "locale") + } + + public func validate(name: String) throws { + try self.validate(self.arn, name: "arn", parent: name, pattern: "^arn:[-.a-z0-9]{1,63}:notifications::[0-9]{12}:managed-notification-configuration/category/[a-zA-Z0-9\\-]{3,64}/sub-category/[a-zA-Z0-9\\-]{3,64}/event/[a-z0-9]{27}/child-event/[a-z0-9]{27}$") + } + + private enum CodingKeys: CodingKey {} + } + + public struct GetManagedNotificationChildEventResponse: AWSDecodableShape { + /// The ARN of the resource. + public let arn: String + /// The content of the ManagedNotificationChildEvent. + public let content: ManagedNotificationChildEvent + /// The creation time of the ManagedNotificationChildEvent. + @CustomCoding + public var creationTime: Date + /// The Amazon Resource Name (ARN) of the ManagedNotificationConfiguration associated with the ManagedNotificationChildEvent. + public let managedNotificationConfigurationArn: String + + @inlinable + public init(arn: String, content: ManagedNotificationChildEvent, creationTime: Date, managedNotificationConfigurationArn: String) { + self.arn = arn + self.content = content + self.creationTime = creationTime + self.managedNotificationConfigurationArn = managedNotificationConfigurationArn + } + + private enum CodingKeys: String, CodingKey { + case arn = "arn" + case content = "content" + case creationTime = "creationTime" + case managedNotificationConfigurationArn = "managedNotificationConfigurationArn" + } + } + + public struct GetManagedNotificationConfigurationRequest: AWSEncodableShape { + /// The Amazon Resource Name (ARN) of the ManagedNotificationConfiguration to return. + public let arn: String + + @inlinable + public init(arn: String) { + self.arn = arn + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.arn, key: "arn") + } + + public func validate(name: String) throws { + try self.validate(self.arn, name: "arn", parent: name, pattern: "^arn:[-.a-z0-9]{1,63}:notifications::[0-9]{12}:managed-notification-configuration/category/[a-zA-Z0-9\\-]{3,64}/sub-category/[a-zA-Z0-9\\-]{3,64}$") + } + + private enum CodingKeys: CodingKey {} + } + + public struct GetManagedNotificationConfigurationResponse: AWSDecodableShape { + /// The ARN of the ManagedNotificationConfiguration resource. + public let arn: String + /// The category of the ManagedNotificationConfiguration. + public let category: String + /// The description of the ManagedNotificationConfiguration. + public let description: String + /// The name of the ManagedNotificationConfiguration. + public let name: String + /// The subCategory of the ManagedNotificationConfiguration. + public let subCategory: String + + @inlinable + public init(arn: String, category: String, description: String, name: String, subCategory: String) { + self.arn = arn + self.category = category + self.description = description + self.name = name + self.subCategory = subCategory + } + + private enum CodingKeys: String, CodingKey { + case arn = "arn" + case category = "category" + case description = "description" + case name = "name" + case subCategory = "subCategory" + } + } + + public struct GetManagedNotificationEventRequest: AWSEncodableShape { + /// The Amazon Resource Name (ARN) of the ManagedNotificationEvent to return. + public let arn: String + /// The locale code of the language used for the retrieved ManagedNotificationEvent. The default locale is English (en_US). + public let locale: LocaleCode? + + @inlinable + public init(arn: String, locale: LocaleCode? = nil) { + self.arn = arn + self.locale = locale + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.arn, key: "arn") + request.encodeQuery(self.locale, key: "locale") + } + + public func validate(name: String) throws { + try self.validate(self.arn, name: "arn", parent: name, pattern: "^arn:[-.a-z0-9]{1,63}:notifications::[0-9]{12}:managed-notification-configuration/category/[a-zA-Z0-9\\-]{3,64}/sub-category/[a-zA-Z0-9\\-]{3,64}/event/[a-z0-9]{27}$") + } + + private enum CodingKeys: CodingKey {} + } + + public struct GetManagedNotificationEventResponse: AWSDecodableShape { + /// The ARN of the resource. + public let arn: String + /// The content of the ManagedNotificationEvent. + public let content: ManagedNotificationEvent + /// The creation time of the ManagedNotificationEvent. + @CustomCoding + public var creationTime: Date + /// The ARN of the ManagedNotificationConfiguration. + public let managedNotificationConfigurationArn: String + + @inlinable + public init(arn: String, content: ManagedNotificationEvent, creationTime: Date, managedNotificationConfigurationArn: String) { + self.arn = arn + self.content = content + self.creationTime = creationTime + self.managedNotificationConfigurationArn = managedNotificationConfigurationArn + } + + private enum CodingKeys: String, CodingKey { + case arn = "arn" + case content = "content" + case creationTime = "creationTime" + case managedNotificationConfigurationArn = "managedNotificationConfigurationArn" + } + } + public struct GetNotificationConfigurationRequest: AWSEncodableShape { /// The Amazon Resource Name (ARN) of the NotificationConfiguration to return. public let arn: String @@ -609,7 +1018,7 @@ extension Notifications { } public struct GetNotificationConfigurationResponse: AWSDecodableShape { - /// The aggregation preference of the NotificationConfiguration. Values: LONG Aggregate notifications for long periods of time (12 hours). SHORT Aggregate notifications for short periods of time (5 minutes). NONE Don't aggregate notifications. No delay in delivery. + /// The aggregation preference of the NotificationConfiguration. Values: LONG Aggregate notifications for long periods of time (12 hours). SHORT Aggregate notifications for short periods of time (5 minutes). NONE Don't aggregate notifications. public let aggregationDuration: AggregationDuration? /// The ARN of the resource. public let arn: String @@ -620,7 +1029,7 @@ extension Notifications { public let description: String /// The name of the NotificationConfiguration. public let name: String - /// The status of this NotificationConfiguration. The status should always be INACTIVE when part of the CreateNotificationConfiguration response. Values: ACTIVE All EventRules are ACTIVE and any call can be run. PARTIALLY_ACTIVE Some EventRules are ACTIVE and some are INACTIVE. Any call can be run. INACTIVE All EventRules are INACTIVE and any call can be run. DELETING This NotificationConfiguration is being deleted. Only GET and LIST calls can be run. Only GET and LIST calls can be run. + /// The status of this NotificationConfiguration. public let status: NotificationConfigurationStatus @inlinable @@ -696,9 +1105,27 @@ extension Notifications { } } - public struct ListChannelsRequest: AWSEncodableShape { - /// The maximum number of results to be returned in this call. The default value is 20. - public let maxResults: Int? + public struct GetNotificationsAccessForOrganizationRequest: AWSEncodableShape { + public init() {} + } + + public struct GetNotificationsAccessForOrganizationResponse: AWSDecodableShape { + /// The AccessStatus of Service Trust Enablement for User Notifications to Amazon Web Services Organizations. + public let notificationsAccessForOrganization: NotificationsAccessForOrganization + + @inlinable + public init(notificationsAccessForOrganization: NotificationsAccessForOrganization) { + self.notificationsAccessForOrganization = notificationsAccessForOrganization + } + + private enum CodingKeys: String, CodingKey { + case notificationsAccessForOrganization = "notificationsAccessForOrganization" + } + } + + public struct ListChannelsRequest: AWSEncodableShape { + /// The maximum number of results to be returned in this call. The default value is 20. + public let maxResults: Int? /// The start token for paginated calls. Retrieved from the response of a previous ListNotificationEvents call. NextToken uses Base64 encoding. public let nextToken: String? /// The Amazon Resource Name (ARN) of the NotificationConfiguration. @@ -798,10 +1225,260 @@ extension Notifications { } } + public struct ListManagedNotificationChannelAssociationsRequest: AWSEncodableShape { + /// The Amazon Resource Name (ARN) of the ManagedNotificationConfiguration to match. + public let managedNotificationConfigurationArn: String + /// The maximum number of results to be returned in this call. Defaults to 20. + public let maxResults: Int? + /// The start token for paginated calls. Retrieved from the response of a previous ListManagedNotificationChannelAssociations call. + public let nextToken: String? + + @inlinable + public init(managedNotificationConfigurationArn: String, maxResults: Int? = nil, nextToken: String? = nil) { + self.managedNotificationConfigurationArn = managedNotificationConfigurationArn + self.maxResults = maxResults + self.nextToken = nextToken + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodeQuery(self.managedNotificationConfigurationArn, key: "managedNotificationConfigurationArn") + request.encodeQuery(self.maxResults, key: "maxResults") + request.encodeQuery(self.nextToken, key: "nextToken") + } + + public func validate(name: String) throws { + try self.validate(self.managedNotificationConfigurationArn, name: "managedNotificationConfigurationArn", parent: name, pattern: "^arn:[-.a-z0-9]{1,63}:notifications::[0-9]{12}:managed-notification-configuration/category/[a-zA-Z0-9\\-]{3,64}/sub-category/[a-zA-Z0-9\\-]{3,64}$") + try self.validate(self.nextToken, name: "nextToken", parent: name, max: 4096) + try self.validate(self.nextToken, name: "nextToken", parent: name, min: 1) + try self.validate(self.nextToken, name: "nextToken", parent: name, pattern: "^[\\w+-/=]+$") + } + + private enum CodingKeys: CodingKey {} + } + + public struct ListManagedNotificationChannelAssociationsResponse: AWSDecodableShape { + /// A list that contains the following information about a channel association. + public let channelAssociations: [ManagedNotificationChannelAssociationSummary] + /// A pagination token. If a non-null pagination token is returned in a result, pass its value in another request to retrieve more entries. + public let nextToken: String? + + @inlinable + public init(channelAssociations: [ManagedNotificationChannelAssociationSummary], nextToken: String? = nil) { + self.channelAssociations = channelAssociations + self.nextToken = nextToken + } + + private enum CodingKeys: String, CodingKey { + case channelAssociations = "channelAssociations" + case nextToken = "nextToken" + } + } + + public struct ListManagedNotificationChildEventsRequest: AWSEncodableShape { + /// The Amazon Resource Name (ARN) of the ManagedNotificationEvent. + public let aggregateManagedNotificationEventArn: String + /// Latest time of events to return from this call. + public let endTime: Date? + /// The locale code of the language used for the retrieved NotificationEvent. The default locale is English.en_US. + public let locale: LocaleCode? + /// The maximum number of results to be returned in this call. Defaults to 20. + public let maxResults: Int? + /// The start token for paginated calls. Retrieved from the response of a previous ListManagedNotificationChannelAssociations call. Next token uses Base64 encoding. + public let nextToken: String? + /// The identifier of the Amazon Web Services Organizations organizational unit (OU) associated with the Managed Notification Child Events. + public let organizationalUnitId: String? + /// The Amazon Web Services account ID associated with the Managed Notification Child Events. + public let relatedAccount: String? + /// The earliest time of events to return from this call. + public let startTime: Date? + + @inlinable + public init(aggregateManagedNotificationEventArn: String, endTime: Date? = nil, locale: LocaleCode? = nil, maxResults: Int? = nil, nextToken: String? = nil, organizationalUnitId: String? = nil, relatedAccount: String? = nil, startTime: Date? = nil) { + self.aggregateManagedNotificationEventArn = aggregateManagedNotificationEventArn + self.endTime = endTime + self.locale = locale + self.maxResults = maxResults + self.nextToken = nextToken + self.organizationalUnitId = organizationalUnitId + self.relatedAccount = relatedAccount + self.startTime = startTime + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.aggregateManagedNotificationEventArn, key: "aggregateManagedNotificationEventArn") + request.encodeQuery(self.endTime, key: "endTime") + request.encodeQuery(self.locale, key: "locale") + request.encodeQuery(self.maxResults, key: "maxResults") + request.encodeQuery(self.nextToken, key: "nextToken") + request.encodeQuery(self.organizationalUnitId, key: "organizationalUnitId") + request.encodeQuery(self.relatedAccount, key: "relatedAccount") + request.encodeQuery(self.startTime, key: "startTime") + } + + public func validate(name: String) throws { + try self.validate(self.aggregateManagedNotificationEventArn, name: "aggregateManagedNotificationEventArn", parent: name, pattern: "^arn:[-.a-z0-9]{1,63}:notifications::[0-9]{12}:managed-notification-configuration/category/[a-zA-Z0-9\\-]{3,64}/sub-category/[a-zA-Z0-9\\-]{3,64}/event/[a-z0-9]{27}$") + try self.validate(self.nextToken, name: "nextToken", parent: name, max: 4096) + try self.validate(self.nextToken, name: "nextToken", parent: name, min: 1) + try self.validate(self.nextToken, name: "nextToken", parent: name, pattern: "^[\\w+-/=]+$") + try self.validate(self.organizationalUnitId, name: "organizationalUnitId", parent: name, pattern: "^Root|ou-[0-9a-z]{4,32}-[a-z0-9]{8,32}$") + try self.validate(self.relatedAccount, name: "relatedAccount", parent: name, pattern: "^\\d{12}$") + } + + private enum CodingKeys: CodingKey {} + } + + public struct ListManagedNotificationChildEventsResponse: AWSDecodableShape { + /// A pagination token. If a non-null pagination token is returned in a result, pass its value in another request to retrieve more entries. + public let managedNotificationChildEvents: [ManagedNotificationChildEventOverview] + /// A pagination token. If a non-null pagination token is returned in a result, pass its value in another request to retrieve more entries. + public let nextToken: String? + + @inlinable + public init(managedNotificationChildEvents: [ManagedNotificationChildEventOverview], nextToken: String? = nil) { + self.managedNotificationChildEvents = managedNotificationChildEvents + self.nextToken = nextToken + } + + private enum CodingKeys: String, CodingKey { + case managedNotificationChildEvents = "managedNotificationChildEvents" + case nextToken = "nextToken" + } + } + + public struct ListManagedNotificationConfigurationsRequest: AWSEncodableShape { + /// The identifier or ARN of the notification channel to filter configurations by. + public let channelIdentifier: String? + /// The maximum number of results to be returned in this call. Defaults to 20. + public let maxResults: Int? + /// The start token for paginated calls. Retrieved from the response of a previous ListManagedNotificationChannelAssociations call. Next token uses Base64 encoding. + public let nextToken: String? + + @inlinable + public init(channelIdentifier: String? = nil, maxResults: Int? = nil, nextToken: String? = nil) { + self.channelIdentifier = channelIdentifier + self.maxResults = maxResults + self.nextToken = nextToken + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodeQuery(self.channelIdentifier, key: "channelIdentifier") + request.encodeQuery(self.maxResults, key: "maxResults") + request.encodeQuery(self.nextToken, key: "nextToken") + } + + public func validate(name: String) throws { + try self.validate(self.channelIdentifier, name: "channelIdentifier", parent: name, pattern: "^ACCOUNT_PRIMARY|ACCOUNT_ALTERNATE_BILLING|ACCOUNT_ALTERNATE_OPERATIONS|ACCOUNT_ALTERNATE_SECURITY|arn:aws:(chatbot|consoleapp|notifications-contacts):[a-zA-Z0-9-]*:[0-9]{12}:[a-zA-Z0-9-_.@]+/[a-zA-Z0-9/_.@:-]+$") + try self.validate(self.nextToken, name: "nextToken", parent: name, max: 4096) + try self.validate(self.nextToken, name: "nextToken", parent: name, min: 1) + try self.validate(self.nextToken, name: "nextToken", parent: name, pattern: "^[\\w+-/=]+$") + } + + private enum CodingKeys: CodingKey {} + } + + public struct ListManagedNotificationConfigurationsResponse: AWSDecodableShape { + /// A list of Managed Notification Configurations matching the request criteria. + public let managedNotificationConfigurations: [ManagedNotificationConfigurationStructure] + /// A pagination token. If a non-null pagination token is returned in a result, pass its value in another request to retrieve more entries. + public let nextToken: String? + + @inlinable + public init(managedNotificationConfigurations: [ManagedNotificationConfigurationStructure], nextToken: String? = nil) { + self.managedNotificationConfigurations = managedNotificationConfigurations + self.nextToken = nextToken + } + + private enum CodingKeys: String, CodingKey { + case managedNotificationConfigurations = "managedNotificationConfigurations" + case nextToken = "nextToken" + } + } + + public struct ListManagedNotificationEventsRequest: AWSEncodableShape { + /// Latest time of events to return from this call. + public let endTime: Date? + /// The locale code of the language used for the retrieved NotificationEvent. The default locale is English (en_US). + public let locale: LocaleCode? + /// The maximum number of results to be returned in this call. Defaults to 20. + public let maxResults: Int? + /// The start token for paginated calls. Retrieved from the response of a previous ListManagedNotificationChannelAssociations call. Next token uses Base64 encoding. + public let nextToken: String? + /// The Organizational Unit Id that an Amazon Web Services account belongs to. + public let organizationalUnitId: String? + /// The Amazon Web Services account ID associated with the Managed Notification Events. + public let relatedAccount: String? + /// The Amazon Web Services service the event originates from. For example aws.cloudwatch. + public let source: String? + /// The earliest time of events to return from this call. + public let startTime: Date? + + @inlinable + public init(endTime: Date? = nil, locale: LocaleCode? = nil, maxResults: Int? = nil, nextToken: String? = nil, organizationalUnitId: String? = nil, relatedAccount: String? = nil, source: String? = nil, startTime: Date? = nil) { + self.endTime = endTime + self.locale = locale + self.maxResults = maxResults + self.nextToken = nextToken + self.organizationalUnitId = organizationalUnitId + self.relatedAccount = relatedAccount + self.source = source + self.startTime = startTime + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodeQuery(self.endTime, key: "endTime") + request.encodeQuery(self.locale, key: "locale") + request.encodeQuery(self.maxResults, key: "maxResults") + request.encodeQuery(self.nextToken, key: "nextToken") + request.encodeQuery(self.organizationalUnitId, key: "organizationalUnitId") + request.encodeQuery(self.relatedAccount, key: "relatedAccount") + request.encodeQuery(self.source, key: "source") + request.encodeQuery(self.startTime, key: "startTime") + } + + public func validate(name: String) throws { + try self.validate(self.nextToken, name: "nextToken", parent: name, max: 4096) + try self.validate(self.nextToken, name: "nextToken", parent: name, min: 1) + try self.validate(self.nextToken, name: "nextToken", parent: name, pattern: "^[\\w+-/=]+$") + try self.validate(self.organizationalUnitId, name: "organizationalUnitId", parent: name, pattern: "^Root|ou-[0-9a-z]{4,32}-[a-z0-9]{8,32}$") + try self.validate(self.relatedAccount, name: "relatedAccount", parent: name, pattern: "^\\d{12}$") + try self.validate(self.source, name: "source", parent: name, max: 36) + try self.validate(self.source, name: "source", parent: name, min: 1) + try self.validate(self.source, name: "source", parent: name, pattern: "^aws.([a-z0-9\\-])+$") + } + + private enum CodingKeys: CodingKey {} + } + + public struct ListManagedNotificationEventsResponse: AWSDecodableShape { + /// A list of Managed Notification Events matching the request criteria. + public let managedNotificationEvents: [ManagedNotificationEventOverview] + /// A pagination token. If a non-null pagination token is returned in a result, pass its value in another request to retrieve more entries. + public let nextToken: String? + + @inlinable + public init(managedNotificationEvents: [ManagedNotificationEventOverview], nextToken: String? = nil) { + self.managedNotificationEvents = managedNotificationEvents + self.nextToken = nextToken + } + + private enum CodingKeys: String, CodingKey { + case managedNotificationEvents = "managedNotificationEvents" + case nextToken = "nextToken" + } + } + public struct ListNotificationConfigurationsRequest: AWSEncodableShape { /// The Amazon Resource Name (ARN) of the Channel to match. public let channelArn: String? - /// The matched event source. Must match one of the valid EventBridge sources. Only AWS service sourced events are supported. For example, aws.ec2 and aws.cloudwatch. For more information, see Event delivery from AWS services in the Amazon EventBridge User Guide. + /// The matched event source. Must match one of the valid EventBridge sources. Only Amazon Web Services service sourced events are supported. For example, aws.ec2 and aws.cloudwatch. For more information, see Event delivery from Amazon Web Services services in the Amazon EventBridge User Guide. public let eventRuleSource: String? /// The maximum number of results to be returned in this call. Defaults to 20. public let maxResults: Int? @@ -873,7 +1550,7 @@ extension Notifications { public let maxResults: Int? /// The start token for paginated calls. Retrieved from the response of a previous ListEventRules call. Next token uses Base64 encoding. public let nextToken: String? - /// The matched event source. Must match one of the valid EventBridge sources. Only AWS service sourced events are supported. For example, aws.ec2 and aws.cloudwatch. For more information, see Event delivery from AWS services in the Amazon EventBridge User Guide. + /// The matched event source. Must match one of the valid EventBridge sources. Only Amazon Web Services service sourced events are supported. For example, aws.ec2 and aws.cloudwatch. For more information, see Event delivery from Amazon Web Services services in the Amazon EventBridge User Guide. public let source: String? /// The earliest time of events to return from this call. public let startTime: Date? @@ -1016,6 +1693,339 @@ extension Notifications { } } + public struct ManagedNotificationChannelAssociationSummary: AWSDecodableShape { + /// The unique identifier for the notification channel. + public let channelIdentifier: String + /// The type of notification channel used for message delivery. Values: ACCOUNT_CONTACT Delivers notifications to Account Managed contacts through the User Notification Service. MOBILE Delivers notifications through the Amazon Web Services Console Mobile Application to mobile devices. CHATBOT Delivers notifications through Chatbot to collaboration platforms (Slack, Chime). EMAIL Delivers notifications to email addresses. + public let channelType: ChannelType + /// Controls whether users can modify channel associations for a notification configuration. Values: ENABLED Users can associate or disassociate channels with the notification configuration. DISABLED Users cannot associate or disassociate channels with the notification configuration. + public let overrideOption: ChannelAssociationOverrideOption? + + @inlinable + public init(channelIdentifier: String, channelType: ChannelType, overrideOption: ChannelAssociationOverrideOption? = nil) { + self.channelIdentifier = channelIdentifier + self.channelType = channelType + self.overrideOption = overrideOption + } + + private enum CodingKeys: String, CodingKey { + case channelIdentifier = "channelIdentifier" + case channelType = "channelType" + case overrideOption = "overrideOption" + } + } + + public struct ManagedNotificationChildEvent: AWSDecodableShape { + /// The Amazon Resource Name (ARN) of the ManagedNotificationEvent that is associated with this Managed Notification Child Event. + public let aggregateManagedNotificationEventArn: String + /// Provides detailed information about the dimensions used for event summarization and aggregation. + public let aggregationDetail: AggregationDetail? + /// The end time of the event. + public let endTime: Date? + /// The assesed nature of the event. Values: HEALTHY All EventRules are ACTIVE. UNHEALTHY Some EventRules are ACTIVE and some are INACTIVE. + public let eventStatus: EventStatus? + /// The unique identifier for a Managed Notification Child Event. + public let id: String + public let messageComponents: MessageComponents + /// The type of event causing the notification. Values: ALERT A notification about an event where something was triggered, initiated, reopened, deployed, or a threshold was breached. WARNING A notification about an event where an issue is about to arise. For example, something is approaching a threshold. ANNOUNCEMENT A notification about an important event. For example, a step in a workflow or escalation path or that a workflow was updated. INFORMATIONAL A notification about informational messages. For example, recommendations, service announcements, or reminders. + public let notificationType: NotificationType + /// The Organizational Unit Id that an Amazon Web Services account belongs to. + public let organizationalUnitId: String? + /// The schema version of the Managed Notification Child Event. + public let schemaVersion: SchemaVersion + /// The source event URL. + public let sourceEventDetailUrl: String? + /// The detailed URL for the source event. + public let sourceEventDetailUrlDisplayText: String? + /// The notification event start time. + public let startTime: Date? + /// A list of text values. + public let textParts: [String: TextPartValue] + + @inlinable + public init(aggregateManagedNotificationEventArn: String, aggregationDetail: AggregationDetail? = nil, endTime: Date? = nil, eventStatus: EventStatus? = nil, id: String, messageComponents: MessageComponents, notificationType: NotificationType, organizationalUnitId: String? = nil, schemaVersion: SchemaVersion, sourceEventDetailUrl: String? = nil, sourceEventDetailUrlDisplayText: String? = nil, startTime: Date? = nil, textParts: [String: TextPartValue]) { + self.aggregateManagedNotificationEventArn = aggregateManagedNotificationEventArn + self.aggregationDetail = aggregationDetail + self.endTime = endTime + self.eventStatus = eventStatus + self.id = id + self.messageComponents = messageComponents + self.notificationType = notificationType + self.organizationalUnitId = organizationalUnitId + self.schemaVersion = schemaVersion + self.sourceEventDetailUrl = sourceEventDetailUrl + self.sourceEventDetailUrlDisplayText = sourceEventDetailUrlDisplayText + self.startTime = startTime + self.textParts = textParts + } + + private enum CodingKeys: String, CodingKey { + case aggregateManagedNotificationEventArn = "aggregateManagedNotificationEventArn" + case aggregationDetail = "aggregationDetail" + case endTime = "endTime" + case eventStatus = "eventStatus" + case id = "id" + case messageComponents = "messageComponents" + case notificationType = "notificationType" + case organizationalUnitId = "organizationalUnitId" + case schemaVersion = "schemaVersion" + case sourceEventDetailUrl = "sourceEventDetailUrl" + case sourceEventDetailUrlDisplayText = "sourceEventDetailUrlDisplayText" + case startTime = "startTime" + case textParts = "textParts" + } + } + + public struct ManagedNotificationChildEventOverview: AWSDecodableShape { + /// The Amazon Resource Name (ARN) of the ManagedNotificationEvent that is associated with this ManagedNotificationChildEvent. + public let aggregateManagedNotificationEventArn: String + /// The Amazon Resource Name (ARN) of the ManagedNotificationChildEvent. + public let arn: String + /// The content of the ManagedNotificationChildEvent. + public let childEvent: ManagedNotificationChildEventSummary + /// The creation time of the ManagedNotificationChildEvent. + @CustomCoding + public var creationTime: Date + /// The Amazon Resource Name (ARN) of the ManagedNotificationConfiguration. + public let managedNotificationConfigurationArn: String + /// The Organizational Unit Id that an AWS account belongs to. + public let organizationalUnitId: String? + /// The account that related to the ManagedNotificationChildEvent. + public let relatedAccount: String + + @inlinable + public init(aggregateManagedNotificationEventArn: String, arn: String, childEvent: ManagedNotificationChildEventSummary, creationTime: Date, managedNotificationConfigurationArn: String, organizationalUnitId: String? = nil, relatedAccount: String) { + self.aggregateManagedNotificationEventArn = aggregateManagedNotificationEventArn + self.arn = arn + self.childEvent = childEvent + self.creationTime = creationTime + self.managedNotificationConfigurationArn = managedNotificationConfigurationArn + self.organizationalUnitId = organizationalUnitId + self.relatedAccount = relatedAccount + } + + private enum CodingKeys: String, CodingKey { + case aggregateManagedNotificationEventArn = "aggregateManagedNotificationEventArn" + case arn = "arn" + case childEvent = "childEvent" + case creationTime = "creationTime" + case managedNotificationConfigurationArn = "managedNotificationConfigurationArn" + case organizationalUnitId = "organizationalUnitId" + case relatedAccount = "relatedAccount" + } + } + + public struct ManagedNotificationChildEventSummary: AWSDecodableShape { + /// Provides detailed information about the dimensions used for event summarization and aggregation. + public let aggregationDetail: AggregationDetail + /// The perceived nature of the event. Values: HEALTHY All EventRules are ACTIVE and any call can be run. UNHEALTHY Some EventRules are ACTIVE and some are INACTIVE. Any call can be run. + public let eventStatus: EventStatus + public let messageComponents: MessageComponentsSummary + /// The Type of the event causing this notification. Values: ALERT A notification about an event where something was triggered, initiated, reopened, deployed, or a threshold was breached. WARNING A notification about an event where an issue is about to arise. For example, something is approaching a threshold. ANNOUNCEMENT A notification about an important event. For example, a step in a workflow or escalation path or that a workflow was updated. INFORMATIONAL A notification about informational messages. For example, recommendations, service announcements, or reminders. + public let notificationType: NotificationType + /// The schema version of the ManagedNotificationChildEvent. + public let schemaVersion: SchemaVersion + /// Contains all event metadata present identically across all NotificationEvents. All fields are present in Source Events via Eventbridge. + public let sourceEventMetadata: ManagedSourceEventMetadataSummary + + @inlinable + public init(aggregationDetail: AggregationDetail, eventStatus: EventStatus, messageComponents: MessageComponentsSummary, notificationType: NotificationType, schemaVersion: SchemaVersion, sourceEventMetadata: ManagedSourceEventMetadataSummary) { + self.aggregationDetail = aggregationDetail + self.eventStatus = eventStatus + self.messageComponents = messageComponents + self.notificationType = notificationType + self.schemaVersion = schemaVersion + self.sourceEventMetadata = sourceEventMetadata + } + + private enum CodingKeys: String, CodingKey { + case aggregationDetail = "aggregationDetail" + case eventStatus = "eventStatus" + case messageComponents = "messageComponents" + case notificationType = "notificationType" + case schemaVersion = "schemaVersion" + case sourceEventMetadata = "sourceEventMetadata" + } + } + + public struct ManagedNotificationConfigurationStructure: AWSDecodableShape { + /// The Amazon Resource Name (ARN) of the ManagedNotificationConfiguration. + public let arn: String + /// The description of the ManagedNotificationConfiguration. + public let description: String + /// The name of the ManagedNotificationConfiguration. + public let name: String + + @inlinable + public init(arn: String, description: String, name: String) { + self.arn = arn + self.description = description + self.name = name + } + + private enum CodingKeys: String, CodingKey { + case arn = "arn" + case description = "description" + case name = "name" + } + } + + public struct ManagedNotificationEvent: AWSDecodableShape { + /// The notifications aggregation type. + public let aggregationEventType: AggregationEventType? + public let aggregationSummary: AggregationSummary? + /// The end time of the notification event. + public let endTime: Date? + /// The status of an event. Values: HEALTHY All EventRules are ACTIVE and any call can be run. UNHEALTHY Some EventRules are ACTIVE and some are INACTIVE. Any call can be run. + public let eventStatus: EventStatus? + /// Unique identifier for a ManagedNotificationEvent. + public let id: String + public let messageComponents: MessageComponents + /// The nature of the event causing this notification. Values: ALERT A notification about an event where something was triggered, initiated, reopened, deployed, or a threshold was breached. WARNING A notification about an event where an issue is about to arise. For example, something is approaching a threshold. ANNOUNCEMENT A notification about an important event. For example, a step in a workflow or escalation path or that a workflow was updated. INFORMATIONAL A notification about informational messages. For example, recommendations, service announcements, or reminders. + public let notificationType: NotificationType + /// The Organizational Unit Id that an Amazon Web Services account belongs to. + public let organizationalUnitId: String? + /// Version of the ManagedNotificationEvent schema. + public let schemaVersion: SchemaVersion + /// URL defined by Source Service to be used by notification consumers to get additional information about event. + public let sourceEventDetailUrl: String? + /// Text that needs to be hyperlinked with the sourceEventDetailUrl. For example, the description of the sourceEventDetailUrl. + public let sourceEventDetailUrlDisplayText: String? + /// The earliest time of events to return from this call. + public let startTime: Date? + /// A list of text values. + public let textParts: [String: TextPartValue] + + @inlinable + public init(aggregationEventType: AggregationEventType? = nil, aggregationSummary: AggregationSummary? = nil, endTime: Date? = nil, eventStatus: EventStatus? = nil, id: String, messageComponents: MessageComponents, notificationType: NotificationType, organizationalUnitId: String? = nil, schemaVersion: SchemaVersion, sourceEventDetailUrl: String? = nil, sourceEventDetailUrlDisplayText: String? = nil, startTime: Date? = nil, textParts: [String: TextPartValue]) { + self.aggregationEventType = aggregationEventType + self.aggregationSummary = aggregationSummary + self.endTime = endTime + self.eventStatus = eventStatus + self.id = id + self.messageComponents = messageComponents + self.notificationType = notificationType + self.organizationalUnitId = organizationalUnitId + self.schemaVersion = schemaVersion + self.sourceEventDetailUrl = sourceEventDetailUrl + self.sourceEventDetailUrlDisplayText = sourceEventDetailUrlDisplayText + self.startTime = startTime + self.textParts = textParts + } + + private enum CodingKeys: String, CodingKey { + case aggregationEventType = "aggregationEventType" + case aggregationSummary = "aggregationSummary" + case endTime = "endTime" + case eventStatus = "eventStatus" + case id = "id" + case messageComponents = "messageComponents" + case notificationType = "notificationType" + case organizationalUnitId = "organizationalUnitId" + case schemaVersion = "schemaVersion" + case sourceEventDetailUrl = "sourceEventDetailUrl" + case sourceEventDetailUrlDisplayText = "sourceEventDetailUrlDisplayText" + case startTime = "startTime" + case textParts = "textParts" + } + } + + public struct ManagedNotificationEventOverview: AWSDecodableShape { + /// The list of the regions where the aggregated notifications in this NotificationEvent originated. + public let aggregatedNotificationRegions: [String]? + /// The notifications aggregation type. Values: AGGREGATE The notification event is an aggregate notification. Aggregate notifications summarize grouped events over a specified time period. CHILD Some EventRules are ACTIVE and some are INACTIVE. Any call can be run. NONE The notification isn't aggregated. + public let aggregationEventType: AggregationEventType? + public let aggregationSummary: AggregationSummary? + /// The Amazon Resource Name (ARN) of the ManagedNotificationEvent. + public let arn: String + /// The creation time of the ManagedNotificationEvent. + @CustomCoding + public var creationTime: Date + /// The Amazon Resource Name (ARN) of the ManagedNotificationConfiguration. + public let managedNotificationConfigurationArn: String + public let notificationEvent: ManagedNotificationEventSummary + /// The Organizational Unit Id that an Amazon Web Services account belongs to. + public let organizationalUnitId: String? + /// The account that related to the ManagedNotificationEvent. + public let relatedAccount: String + + @inlinable + public init(aggregatedNotificationRegions: [String]? = nil, aggregationEventType: AggregationEventType? = nil, aggregationSummary: AggregationSummary? = nil, arn: String, creationTime: Date, managedNotificationConfigurationArn: String, notificationEvent: ManagedNotificationEventSummary, organizationalUnitId: String? = nil, relatedAccount: String) { + self.aggregatedNotificationRegions = aggregatedNotificationRegions + self.aggregationEventType = aggregationEventType + self.aggregationSummary = aggregationSummary + self.arn = arn + self.creationTime = creationTime + self.managedNotificationConfigurationArn = managedNotificationConfigurationArn + self.notificationEvent = notificationEvent + self.organizationalUnitId = organizationalUnitId + self.relatedAccount = relatedAccount + } + + private enum CodingKeys: String, CodingKey { + case aggregatedNotificationRegions = "aggregatedNotificationRegions" + case aggregationEventType = "aggregationEventType" + case aggregationSummary = "aggregationSummary" + case arn = "arn" + case creationTime = "creationTime" + case managedNotificationConfigurationArn = "managedNotificationConfigurationArn" + case notificationEvent = "notificationEvent" + case organizationalUnitId = "organizationalUnitId" + case relatedAccount = "relatedAccount" + } + } + + public struct ManagedNotificationEventSummary: AWSDecodableShape { + /// The managed notification event status. Values: HEALTHY All EventRules are ACTIVE. UNHEALTHY Some EventRules are ACTIVE and some are INACTIVE. + public let eventStatus: EventStatus + public let messageComponents: MessageComponentsSummary + /// The Type of event causing the notification. Values: ALERT A notification about an event where something was triggered, initiated, reopened, deployed, or a threshold was breached. WARNING A notification about an event where an issue is about to arise. For example, something is approaching a threshold. ANNOUNCEMENT A notification about an important event. For example, a step in a workflow or escalation path or that a workflow was updated. INFORMATIONAL A notification about informational messages. For example, recommendations, service announcements, or reminders. + public let notificationType: NotificationType + /// The schema version of the ManagedNotificationEvent. + public let schemaVersion: SchemaVersion + /// Contains metadata about the event that caused the ManagedNotificationEvent. + public let sourceEventMetadata: ManagedSourceEventMetadataSummary + + @inlinable + public init(eventStatus: EventStatus, messageComponents: MessageComponentsSummary, notificationType: NotificationType, schemaVersion: SchemaVersion, sourceEventMetadata: ManagedSourceEventMetadataSummary) { + self.eventStatus = eventStatus + self.messageComponents = messageComponents + self.notificationType = notificationType + self.schemaVersion = schemaVersion + self.sourceEventMetadata = sourceEventMetadata + } + + private enum CodingKeys: String, CodingKey { + case eventStatus = "eventStatus" + case messageComponents = "messageComponents" + case notificationType = "notificationType" + case schemaVersion = "schemaVersion" + case sourceEventMetadata = "sourceEventMetadata" + } + } + + public struct ManagedSourceEventMetadataSummary: AWSDecodableShape { + /// The Region where the notification originated. + public let eventOriginRegion: String? + /// The event Type of the notification. + public let eventType: String + /// The source service of the notification. Must match one of the valid EventBridge sources. Only Amazon Web Services service sourced events are supported. For example, aws.ec2 and aws.cloudwatch. For more information, see Event delivery from Amazon Web Services services in the Amazon EventBridge User Guide. + public let source: String + + @inlinable + public init(eventOriginRegion: String? = nil, eventType: String, source: String) { + self.eventOriginRegion = eventOriginRegion + self.eventType = eventType + self.source = source + } + + private enum CodingKeys: String, CodingKey { + case eventOriginRegion = "eventOriginRegion" + case eventType = "eventType" + case source = "source" + } + } + public struct MediaElement: AWSDecodableShape { /// The caption of the media. public let caption: String @@ -1023,7 +2033,7 @@ extension Notifications { public let mediaId: String /// The type of media. public let type: MediaElementType - /// The url of the media. + /// The URL of the media. public let url: String @inlinable @@ -1049,7 +2059,7 @@ extension Notifications { public let dimensions: [Dimension]? /// A sentence long summary. For example, titles or an email subject line. public let headline: String? - /// A paragraph long or multiple sentence summary. For example, AWS Chatbot notifications. + /// A paragraph long or multiple sentence summary. For example, Chatbot notifications. public let paragraphSummary: String? @inlinable @@ -1083,18 +2093,18 @@ extension Notifications { } public struct NotificationConfigurationStructure: AWSDecodableShape { - /// The aggregation preference of the NotificationConfiguration. Values: LONG Aggregate notifications for long periods of time (12 hours). SHORT Aggregate notifications for short periods of time (5 minutes). NONE Don't aggregate notifications. No delay in delivery. + /// The aggregation preference of the NotificationConfiguration. Values: LONG Aggregate notifications for long periods of time (12 hours). SHORT Aggregate notifications for short periods of time (5 minutes). NONE Don't aggregate notifications. public let aggregationDuration: AggregationDuration? - /// The Amazon Resource Name (ARN) of the resource. + /// The Amazon Resource Name (ARN) of the NotificationConfiguration resource. public let arn: String - /// The creation time of the resource. + /// The creation time of the NotificationConfiguration. @CustomCoding public var creationTime: Date /// The description of the NotificationConfiguration. public let description: String /// The name of the NotificationConfiguration. Supports RFC 3986's unreserved characters. public let name: String - /// The status of this NotificationConfiguration. The status should always be INACTIVE when part of the CreateNotificationConfiguration response. Values: ACTIVE All EventRules are ACTIVE and any call can be run. PARTIALLY_ACTIVE Some EventRules are ACTIVE and some are INACTIVE. Any call can be run. INACTIVE All EventRules are INACTIVE and any call can be run. DELETING This NotificationConfiguration is being deleted. Only GET and LIST calls can be run. Only GET and LIST calls can be run. + /// The current status of the NotificationConfiguration. public let status: NotificationConfigurationStatus @inlinable @@ -1122,6 +2132,8 @@ extension Notifications { public let aggregateNotificationEventArn: String? /// The NotificationConfiguration's aggregation type. Values: AGGREGATE The notification event is an aggregate notification. Aggregate notifications summarize grouped events over a specified time period. CHILD Some EventRules are ACTIVE and some are INACTIVE. Any call can be run. NONE The notification isn't aggregated. public let aggregationEventType: AggregationEventType? + /// Provides an aggregated summary data for notification events. + public let aggregationSummary: AggregationSummary? /// The Amazon Resource Name (ARN) of the resource. public let arn: String /// The creation time of the NotificationEvent. @@ -1135,9 +2147,10 @@ extension Notifications { public let relatedAccount: String @inlinable - public init(aggregateNotificationEventArn: String? = nil, aggregationEventType: AggregationEventType? = nil, arn: String, creationTime: Date, notificationConfigurationArn: String, notificationEvent: NotificationEventSummary, relatedAccount: String) { + public init(aggregateNotificationEventArn: String? = nil, aggregationEventType: AggregationEventType? = nil, aggregationSummary: AggregationSummary? = nil, arn: String, creationTime: Date, notificationConfigurationArn: String, notificationEvent: NotificationEventSummary, relatedAccount: String) { self.aggregateNotificationEventArn = aggregateNotificationEventArn self.aggregationEventType = aggregationEventType + self.aggregationSummary = aggregationSummary self.arn = arn self.creationTime = creationTime self.notificationConfigurationArn = notificationConfigurationArn @@ -1148,6 +2161,7 @@ extension Notifications { private enum CodingKeys: String, CodingKey { case aggregateNotificationEventArn = "aggregateNotificationEventArn" case aggregationEventType = "aggregationEventType" + case aggregationSummary = "aggregationSummary" case arn = "arn" case creationTime = "creationTime" case notificationConfigurationArn = "notificationConfigurationArn" @@ -1157,13 +2171,15 @@ extension Notifications { } public struct NotificationEventSchema: AWSDecodableShape { - /// If the value of aggregationEventType is not NONE, this is the Amazon Resource Event (ARN) of the parent aggregate notification. This is omitted if notification isn't aggregated. + /// If the value of aggregationEventType is not NONE, this is the Amazon Resource Event (ARN) of the parent aggregate notification. This is omitted if notification isn't aggregated. public let aggregateNotificationEventArn: String? - /// The NotificationConfiguration's aggregation type. Values: AGGREGATE The notification event is an aggregate notification. Aggregate notifications summarize grouped events over a specified time period. CHILD Some EventRules are ACTIVE and some are INACTIVE. Any call can be run. NONE The notification isn't aggregated. + /// The aggregation type of the NotificationConfiguration. Values: AGGREGATE The notification event is an aggregate notification. Aggregate notifications summarize grouped events over a specified time period. CHILD Some EventRules are ACTIVE and some are INACTIVE. Any call can be run. NONE The notification isn't aggregated. public let aggregationEventType: AggregationEventType? + /// Provides additional information about how multiple notifications are grouped. + public let aggregationSummary: AggregationSummary? /// The end time of the event. public let endTime: Date? - /// The assesed nature of the event. Values: HEALTHY All EventRules are ACTIVE and any call can be run. UNHEALTHY Some EventRules are ACTIVE and some are INACTIVE. Any call can be run. + /// The assessed nature of the event. Values: HEALTHY All EventRules are ACTIVE and any call can be run. UNHEALTHY Some EventRules are ACTIVE and some are INACTIVE. Any call can be run. public let eventStatus: EventStatus? /// The unique identifier for a NotificationEvent. public let id: String @@ -1186,9 +2202,10 @@ extension Notifications { public let textParts: [String: TextPartValue] @inlinable - public init(aggregateNotificationEventArn: String? = nil, aggregationEventType: AggregationEventType? = nil, endTime: Date? = nil, eventStatus: EventStatus? = nil, id: String, media: [MediaElement], messageComponents: MessageComponents, notificationType: NotificationType, schemaVersion: SchemaVersion, sourceEventDetailUrl: String? = nil, sourceEventDetailUrlDisplayText: String? = nil, sourceEventMetadata: SourceEventMetadata, startTime: Date? = nil, textParts: [String: TextPartValue]) { + public init(aggregateNotificationEventArn: String? = nil, aggregationEventType: AggregationEventType? = nil, aggregationSummary: AggregationSummary? = nil, endTime: Date? = nil, eventStatus: EventStatus? = nil, id: String, media: [MediaElement], messageComponents: MessageComponents, notificationType: NotificationType, schemaVersion: SchemaVersion, sourceEventDetailUrl: String? = nil, sourceEventDetailUrlDisplayText: String? = nil, sourceEventMetadata: SourceEventMetadata, startTime: Date? = nil, textParts: [String: TextPartValue]) { self.aggregateNotificationEventArn = aggregateNotificationEventArn self.aggregationEventType = aggregationEventType + self.aggregationSummary = aggregationSummary self.endTime = endTime self.eventStatus = eventStatus self.id = id @@ -1206,6 +2223,7 @@ extension Notifications { private enum CodingKeys: String, CodingKey { case aggregateNotificationEventArn = "aggregateNotificationEventArn" case aggregationEventType = "aggregationEventType" + case aggregationSummary = "aggregationSummary" case endTime = "endTime" case eventStatus = "eventStatus" case id = "id" @@ -1222,7 +2240,7 @@ extension Notifications { } public struct NotificationEventSummary: AWSDecodableShape { - /// The notification event status. Values: HEALTHY All EventRules are ACTIVE and any call can be run. UNHEALTHY Some EventRules are ACTIVE and some are INACTIVE. Any call can be run. + /// Provides additional information about the current status of the NotificationEvent. Values: HEALTHY All EventRules are ACTIVE. UNHEALTHY Some EventRules are ACTIVE and some are INACTIVE. public let eventStatus: EventStatus /// The message components of a notification event. public let messageComponents: MessageComponentsSummary @@ -1252,7 +2270,7 @@ extension Notifications { } public struct NotificationHubOverview: AWSDecodableShape { - /// The date and time the resource was created. + /// The date and time the NotificationHubOverview was created. @CustomCoding public var creationTime: Date /// The most recent time this NotificationHub had an ACTIVE status. @@ -1280,9 +2298,9 @@ extension Notifications { } public struct NotificationHubStatusSummary: AWSDecodableShape { - /// An Explanation for the current status. + /// An explanation for the current status. public let reason: String - /// Status information about the NotificationHub. Values: ACTIVE Incoming NotificationEvents are replicated to this NotificationHub. REGISTERING The NotificationHub is initializing. A NotificationHub with this status can't be deregistered. DEREGISTERING The NotificationHub is being deleted. You can't register additional NotificationHubs in the same Region as a NotificationHub with this status. + /// Status information about the NotificationHub. Values: ACTIVE Incoming NotificationEvents are replicated to this NotificationHub. REGISTERING The NotificationConfiguration is initializing. A NotificationConfiguration with this status can't be deregistered. DEREGISTERING The NotificationConfiguration is being deleted. You can't register additional NotificationHubs in the same Region as a NotificationConfiguration with this status. public let status: NotificationHubStatus @inlinable @@ -1297,6 +2315,20 @@ extension Notifications { } } + public struct NotificationsAccessForOrganization: AWSDecodableShape { + /// Access Status for the Orgs Service. + public let accessStatus: AccessStatus + + @inlinable + public init(accessStatus: AccessStatus) { + self.accessStatus = accessStatus + } + + private enum CodingKeys: String, CodingKey { + case accessStatus = "accessStatus" + } + } + public struct RegisterNotificationHubRequest: AWSEncodableShape { /// The Region of the NotificationHub. public let notificationHubRegion: String @@ -1326,7 +2358,7 @@ extension Notifications { public var lastActivationTime: Date? /// The Region of the NotificationHub. public let notificationHubRegion: String - /// NotificationHub status information. + /// Provides additional information about the current NotificationConfiguration status information. public let statusSummary: NotificationHubStatusSummary @inlinable @@ -1376,15 +2408,15 @@ extension Notifications { public let eventOccurrenceTime: Date /// The Region the event originated from. public let eventOriginRegion: String? - /// The type of event. For example, an AWS CloudWatch state change. + /// The type of event. For example, an Amazon CloudWatch state change. public let eventType: String /// The version of the type of event. public let eventTypeVersion: String - /// The Primary AWS account of Source Event + /// The primary Amazon Web Services account of SourceEvent. public let relatedAccount: String /// A list of resources related to this NotificationEvent. public let relatedResources: [Resource] - /// The AWS servvice the event originates from. For example aws.cloudwatch. + /// The Amazon Web Services service the event originates from. For example aws.cloudwatch. public let source: String /// The source event id. public let sourceEventId: String @@ -1416,9 +2448,9 @@ extension Notifications { public struct SourceEventMetadataSummary: AWSDecodableShape { /// The Region where the notification originated. Unavailable for aggregated notifications. public let eventOriginRegion: String? - /// The event type to match. Must match one of the valid Amazon EventBridge event types. For example, EC2 Instance State-change Notification and AWS CloudWatch Alarm State Change. For more information, see Event delivery from AWS services in the Amazon EventBridge User Guide. + /// The event type to match. Must match one of the valid Amazon EventBridge event types. For example, EC2 Instance State-change Notification and Amazon CloudWatch Alarm State Change. For more information, see Event delivery from Amazon Web Services services in the Amazon EventBridge User Guide. public let eventType: String - /// The matched event source. Must match one of the valid EventBridge sources. Only AWS service sourced events are supported. For example, aws.ec2 and aws.cloudwatch. For more information, see Event delivery from AWS services in the Amazon EventBridge User Guide. + /// The matched event source. Must match one of the valid EventBridge sources. Only Amazon Web Services service sourced events are supported. For example, aws.ec2 and aws.cloudwatch. For more information, see Event delivery from Amazon Web Services services in the Amazon EventBridge User Guide. public let source: String @inlinable @@ -1435,6 +2467,46 @@ extension Notifications { } } + public struct SummarizationDimensionDetail: AWSDecodableShape { + /// The name of the SummarizationDimensionDetail. + public let name: String + /// Value of the property used to summarize aggregated events. + public let value: String + + @inlinable + public init(name: String, value: String) { + self.name = name + self.value = value + } + + private enum CodingKeys: String, CodingKey { + case name = "name" + case value = "value" + } + } + + public struct SummarizationDimensionOverview: AWSDecodableShape { + /// Total number of occurrences for this dimension. + public let count: Int + /// Name of the summarization dimension. + public let name: String + /// Indicates the sample values found within the dimension. + public let sampleValues: [String]? + + @inlinable + public init(count: Int, name: String, sampleValues: [String]? = nil) { + self.count = count + self.name = name + self.sampleValues = sampleValues + } + + private enum CodingKeys: String, CodingKey { + case count = "count" + case name = "name" + case sampleValues = "sampleValues" + } + } + public struct TagResourceRequest: AWSEncodableShape { /// The Amazon Resource Name (ARN) to use to tag a resource. public let arn: String @@ -1473,7 +2545,7 @@ extension Notifications { } public struct TextPartValue: AWSDecodableShape { - /// A short single line description of the link. Must be hyperlinked with the URL itself. Used for text parts with the type URL. + /// A short single line description of the link. Must be hyper-linked with the URL itself. Used for text parts with the type URL. public let displayText: String? /// A map of locales to the text in that locale. public let textByLocale: [LocaleCode: String]? @@ -1537,7 +2609,7 @@ extension Notifications { public let arn: String /// An additional event pattern used to further filter the events this EventRule receives. For more information, see Amazon EventBridge event patterns in the Amazon EventBridge User Guide. public let eventPattern: String? - /// A list of AWS Regions that sends events to this EventRule. + /// A list of Amazon Web Services Regions that sends events to this EventRule. public let regions: [String]? @inlinable @@ -1595,7 +2667,7 @@ extension Notifications { } public struct UpdateNotificationConfigurationRequest: AWSEncodableShape { - /// The status of this NotificationConfiguration. The status should always be INACTIVE when part of the CreateNotificationConfiguration response. Values: ACTIVE All EventRules are ACTIVE and any call can be run. PARTIALLY_ACTIVE Some EventRules are ACTIVE and some are INACTIVE. Any call can be run. Any call can be run. INACTIVE All EventRules are INACTIVE and any call can be run. DELETING This NotificationConfiguration is being deleted. Only GET and LIST calls can be run. + /// The aggregation preference of the NotificationConfiguration. Values: LONG Aggregate notifications for long periods of time (12 hours). SHORT Aggregate notifications for short periods of time (5 minutes). NONE Don't aggregate notifications. public let aggregationDuration: AggregationDuration? /// The Amazon Resource Name (ARN) used to update the NotificationConfiguration. public let arn: String diff --git a/Sources/Soto/Services/OpenSearch/OpenSearch_api.swift b/Sources/Soto/Services/OpenSearch/OpenSearch_api.swift index bdec3649b0..d57d72a715 100644 --- a/Sources/Soto/Services/OpenSearch/OpenSearch_api.swift +++ b/Sources/Soto/Services/OpenSearch/OpenSearch_api.swift @@ -93,6 +93,7 @@ public struct OpenSearch: AWSService { "ap-southeast-3": "aos.ap-southeast-3.api.aws", "ap-southeast-4": "aos.ap-southeast-4.api.aws", "ap-southeast-5": "aos.ap-southeast-5.api.aws", + "ap-southeast-7": "aos.ap-southeast-7.api.aws", "ca-central-1": "aos.ca-central-1.api.aws", "ca-west-1": "aos.ca-west-1.api.aws", "cn-north-1": "aos.cn-north-1.api.amazonwebservices.com.cn", @@ -108,6 +109,7 @@ public struct OpenSearch: AWSService { "il-central-1": "aos.il-central-1.api.aws", "me-central-1": "aos.me-central-1.api.aws", "me-south-1": "aos.me-south-1.api.aws", + "mx-central-1": "aos.mx-central-1.api.aws", "sa-east-1": "aos.sa-east-1.api.aws", "us-east-1": "aos.us-east-1.api.aws", "us-east-2": "aos.us-east-2.api.aws", diff --git a/Sources/Soto/Services/Organizations/Organizations_api.swift b/Sources/Soto/Services/Organizations/Organizations_api.swift index 06a98f6c3d..98f9efeeff 100644 --- a/Sources/Soto/Services/Organizations/Organizations_api.swift +++ b/Sources/Soto/Services/Organizations/Organizations_api.swift @@ -82,6 +82,7 @@ public struct Organizations: AWSService { "aws-cn-global": "organizations.cn-northwest-1.amazonaws.com.cn", "aws-global": "organizations.us-east-1.amazonaws.com", "aws-iso-b-global": "organizations.us-isob-east-1.sc2s.sgov.gov", + "aws-iso-global": "organizations.us-iso-east-1.c2s.ic.gov", "aws-us-gov-global": "organizations.us-gov-west-1.amazonaws.com" ]} @@ -89,6 +90,7 @@ public struct Organizations: AWSService { static var partitionEndpoints: [AWSPartition: (endpoint: String, region: SotoCore.Region)] {[ .aws: (endpoint: "aws-global", region: .useast1), .awscn: (endpoint: "aws-cn-global", region: .cnnorthwest1), + .awsiso: (endpoint: "aws-iso-global", region: .usisoeast1), .awsisob: (endpoint: "aws-iso-b-global", region: .usisobeast1), .awsusgov: (endpoint: "aws-us-gov-global", region: .usgovwest1) ]} diff --git a/Sources/Soto/Services/Organizations/Organizations_shapes.swift b/Sources/Soto/Services/Organizations/Organizations_shapes.swift index a5a1bdd29b..54388fea5d 100644 --- a/Sources/Soto/Services/Organizations/Organizations_shapes.swift +++ b/Sources/Soto/Services/Organizations/Organizations_shapes.swift @@ -2746,7 +2746,7 @@ public struct OrganizationsErrorType: AWSErrorType { public static var concurrentModificationException: Self { .init(.concurrentModificationException) } /// The request failed because it conflicts with the current state of the specified resource. public static var conflictException: Self { .init(.conflictException) } - /// Performing this operation violates a minimum or maximum value limit. For example, attempting to remove the last service control policy (SCP) from an OU or root, inviting or creating too many accounts to the organization, or attaching too many policies to an account, OU, or root. This exception includes a reason that contains additional information about the violated limit: Some of the reasons in the following list might not be applicable to this specific API or operation. ACCOUNT_CANNOT_LEAVE_ORGANIZATION: You attempted to remove the management account from the organization. You can't remove the management account. Instead, after you remove all member accounts, delete the organization itself. ACCOUNT_CANNOT_LEAVE_WITHOUT_PHONE_VERIFICATION: You attempted to remove an account from the organization that doesn't yet have enough information to exist as a standalone account. This account requires you to first complete phone verification. Follow the steps at Removing a member account from your organization in the Organizations User Guide. ACCOUNT_CREATION_RATE_LIMIT_EXCEEDED: You attempted to exceed the number of accounts that you can create in one day. ACCOUNT_CREATION_NOT_COMPLETE: Your account setup isn't complete or your account isn't fully active. You must complete the account setup before you create an organization. ACCOUNT_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the limit on the number of accounts in an organization. If you need more accounts, contact Amazon Web Services Support to request an increase in your limit. Or the number of invitations that you tried to send would cause you to exceed the limit of accounts in your organization. Send fewer invitations or contact Amazon Web Services Support to request an increase in the number of accounts. Deleted and closed accounts still count toward your limit. If you get this exception when running a command immediately after creating the organization, wait one hour and try again. After an hour, if the command continues to fail with this error, contact Amazon Web Services Support. CANNOT_REGISTER_SUSPENDED_ACCOUNT_AS_DELEGATED_ADMINISTRATOR: You cannot register a suspended account as a delegated administrator. CANNOT_REGISTER_MASTER_AS_DELEGATED_ADMINISTRATOR: You attempted to register the management account of the organization as a delegated administrator for an Amazon Web Services service integrated with Organizations. You can designate only a member account as a delegated administrator. CANNOT_CLOSE_MANAGEMENT_ACCOUNT: You attempted to close the management account. To close the management account for the organization, you must first either remove or close all member accounts in the organization. Follow standard account closure process using root credentials.​ CANNOT_REMOVE_DELEGATED_ADMINISTRATOR_FROM_ORG: You attempted to remove an account that is registered as a delegated administrator for a service integrated with your organization. To complete this operation, you must first deregister this account as a delegated administrator. CLOSE_ACCOUNT_QUOTA_EXCEEDED: You have exceeded close account quota for the past 30 days. CLOSE_ACCOUNT_REQUESTS_LIMIT_EXCEEDED: You attempted to exceed the number of accounts that you can close at a time. ​ CREATE_ORGANIZATION_IN_BILLING_MODE_UNSUPPORTED_REGION: To create an organization in the specified region, you must enable all features mode. DELEGATED_ADMINISTRATOR_EXISTS_FOR_THIS_SERVICE: You attempted to register an Amazon Web Services account as a delegated administrator for an Amazon Web Services service that already has a delegated administrator. To complete this operation, you must first deregister any existing delegated administrators for this service. EMAIL_VERIFICATION_CODE_EXPIRED: The email verification code is only valid for a limited period of time. You must resubmit the request and generate a new verfication code. HANDSHAKE_RATE_LIMIT_EXCEEDED: You attempted to exceed the number of handshakes that you can send in one day. INVALID_PAYMENT_INSTRUMENT: You cannot remove an account because no supported payment method is associated with the account. Amazon Web Services does not support cards issued by financial institutions in Russia or Belarus. For more information, see Managing your Amazon Web Services payments. MASTER_ACCOUNT_ADDRESS_DOES_NOT_MATCH_MARKETPLACE: To create an account in this organization, you first must migrate the organization's management account to the marketplace that corresponds to the management account's address. All accounts in an organization must be associated with the same marketplace. MASTER_ACCOUNT_MISSING_BUSINESS_LICENSE: Applies only to the Amazon Web Services Regions in China. To create an organization, the master must have a valid business license. For more information, contact customer support. MASTER_ACCOUNT_MISSING_CONTACT_INFO: To complete this operation, you must first provide a valid contact address and phone number for the management account. Then try the operation again. MASTER_ACCOUNT_NOT_GOVCLOUD_ENABLED: To complete this operation, the management account must have an associated account in the Amazon Web Services GovCloud (US-West) Region. For more information, see Organizations in the Amazon Web Services GovCloud User Guide. MASTER_ACCOUNT_PAYMENT_INSTRUMENT_REQUIRED: To create an organization with this management account, you first must associate a valid payment instrument, such as a credit card, with the account. For more information, see Considerations before removing an account from an organization in the Organizations User Guide. MAX_DELEGATED_ADMINISTRATORS_FOR_SERVICE_LIMIT_EXCEEDED: You attempted to register more delegated administrators than allowed for the service principal. MAX_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to exceed the number of policies of a certain type that can be attached to an entity at one time. MAX_TAG_LIMIT_EXCEEDED: You have exceeded the number of tags allowed on this resource. MEMBER_ACCOUNT_PAYMENT_INSTRUMENT_REQUIRED: To complete this operation with this member account, you first must associate a valid payment instrument, such as a credit card, with the account. For more information, see Considerations before removing an account from an organization in the Organizations User Guide. MIN_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to detach a policy from an entity that would cause the entity to have fewer than the minimum number of policies of a certain type required. ORGANIZATION_NOT_IN_ALL_FEATURES_MODE: You attempted to perform an operation that requires the organization to be configured to support all features. An organization that supports only consolidated billing features can't perform this operation. OU_DEPTH_LIMIT_EXCEEDED: You attempted to create an OU tree that is too many levels deep. OU_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the number of OUs that you can have in an organization. POLICY_CONTENT_LIMIT_EXCEEDED: You attempted to create a policy that is larger than the maximum size. POLICY_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the number of policies that you can have in an organization. SERVICE_ACCESS_NOT_ENABLED: You attempted to register a delegated administrator before you enabled service access. Call the EnableAWSServiceAccess API first. TAG_POLICY_VIOLATION: You attempted to create or update a resource with tags that are not compliant with the tag policy requirements for this account. WAIT_PERIOD_ACTIVE: After you create an Amazon Web Services account, you must wait until at least seven days after the account was created. Invited accounts aren't subject to this waiting period. + /// Performing this operation violates a minimum or maximum value limit. For example, attempting to remove the last service control policy (SCP) from an OU or root, inviting or creating too many accounts to the organization, or attaching too many policies to an account, OU, or root. This exception includes a reason that contains additional information about the violated limit: Some of the reasons in the following list might not be applicable to this specific API or operation. ACCOUNT_CANNOT_LEAVE_ORGANIZATION: You attempted to remove the management account from the organization. You can't remove the management account. Instead, after you remove all member accounts, delete the organization itself. ACCOUNT_CANNOT_LEAVE_WITHOUT_PHONE_VERIFICATION: You attempted to remove an account from the organization that doesn't yet have enough information to exist as a standalone account. This account requires you to first complete phone verification. Follow the steps at Removing a member account from your organization in the Organizations User Guide. ACCOUNT_CREATION_RATE_LIMIT_EXCEEDED: You attempted to exceed the number of accounts that you can create in one day. ACCOUNT_CREATION_NOT_COMPLETE: Your account setup isn't complete or your account isn't fully active. You must complete the account setup before you create an organization. ACCOUNT_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the limit on the number of accounts in an organization. If you need more accounts, contact Amazon Web Services Support to request an increase in your limit. Or the number of invitations that you tried to send would cause you to exceed the limit of accounts in your organization. Send fewer invitations or contact Amazon Web Services Support to request an increase in the number of accounts. Deleted and closed accounts still count toward your limit. If you get this exception when running a command immediately after creating the organization, wait one hour and try again. After an hour, if the command continues to fail with this error, contact Amazon Web Services Support. ALL_FEATURES_MIGRATION_ORGANIZATION_SIZE_LIMIT_EXCEEDED: Your organization has more than 5000 accounts, and you can only use the standard migration process for organizations with less than 5000 accounts. Use the assisted migration process to enable all features mode, or create a support case for assistance if you are unable to use assisted migration. CANNOT_REGISTER_SUSPENDED_ACCOUNT_AS_DELEGATED_ADMINISTRATOR: You cannot register a suspended account as a delegated administrator. CANNOT_REGISTER_MASTER_AS_DELEGATED_ADMINISTRATOR: You attempted to register the management account of the organization as a delegated administrator for an Amazon Web Services service integrated with Organizations. You can designate only a member account as a delegated administrator. CANNOT_CLOSE_MANAGEMENT_ACCOUNT: You attempted to close the management account. To close the management account for the organization, you must first either remove or close all member accounts in the organization. Follow standard account closure process using root credentials.​ CANNOT_REMOVE_DELEGATED_ADMINISTRATOR_FROM_ORG: You attempted to remove an account that is registered as a delegated administrator for a service integrated with your organization. To complete this operation, you must first deregister this account as a delegated administrator. CLOSE_ACCOUNT_QUOTA_EXCEEDED: You have exceeded close account quota for the past 30 days. CLOSE_ACCOUNT_REQUESTS_LIMIT_EXCEEDED: You attempted to exceed the number of accounts that you can close at a time. ​ CREATE_ORGANIZATION_IN_BILLING_MODE_UNSUPPORTED_REGION: To create an organization in the specified region, you must enable all features mode. DELEGATED_ADMINISTRATOR_EXISTS_FOR_THIS_SERVICE: You attempted to register an Amazon Web Services account as a delegated administrator for an Amazon Web Services service that already has a delegated administrator. To complete this operation, you must first deregister any existing delegated administrators for this service. EMAIL_VERIFICATION_CODE_EXPIRED: The email verification code is only valid for a limited period of time. You must resubmit the request and generate a new verfication code. HANDSHAKE_RATE_LIMIT_EXCEEDED: You attempted to exceed the number of handshakes that you can send in one day. INVALID_PAYMENT_INSTRUMENT: You cannot remove an account because no supported payment method is associated with the account. Amazon Web Services does not support cards issued by financial institutions in Russia or Belarus. For more information, see Managing your Amazon Web Services payments. MASTER_ACCOUNT_ADDRESS_DOES_NOT_MATCH_MARKETPLACE: To create an account in this organization, you first must migrate the organization's management account to the marketplace that corresponds to the management account's address. All accounts in an organization must be associated with the same marketplace. MASTER_ACCOUNT_MISSING_BUSINESS_LICENSE: Applies only to the Amazon Web Services Regions in China. To create an organization, the master must have a valid business license. For more information, contact customer support. MASTER_ACCOUNT_MISSING_CONTACT_INFO: To complete this operation, you must first provide a valid contact address and phone number for the management account. Then try the operation again. MASTER_ACCOUNT_NOT_GOVCLOUD_ENABLED: To complete this operation, the management account must have an associated account in the Amazon Web Services GovCloud (US-West) Region. For more information, see Organizations in the Amazon Web Services GovCloud User Guide. MASTER_ACCOUNT_PAYMENT_INSTRUMENT_REQUIRED: To create an organization with this management account, you first must associate a valid payment instrument, such as a credit card, with the account. For more information, see Considerations before removing an account from an organization in the Organizations User Guide. MAX_DELEGATED_ADMINISTRATORS_FOR_SERVICE_LIMIT_EXCEEDED: You attempted to register more delegated administrators than allowed for the service principal. MAX_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to exceed the number of policies of a certain type that can be attached to an entity at one time. MAX_TAG_LIMIT_EXCEEDED: You have exceeded the number of tags allowed on this resource. MEMBER_ACCOUNT_PAYMENT_INSTRUMENT_REQUIRED: To complete this operation with this member account, you first must associate a valid payment instrument, such as a credit card, with the account. For more information, see Considerations before removing an account from an organization in the Organizations User Guide. MIN_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to detach a policy from an entity that would cause the entity to have fewer than the minimum number of policies of a certain type required. ORGANIZATION_NOT_IN_ALL_FEATURES_MODE: You attempted to perform an operation that requires the organization to be configured to support all features. An organization that supports only consolidated billing features can't perform this operation. OU_DEPTH_LIMIT_EXCEEDED: You attempted to create an OU tree that is too many levels deep. OU_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the number of OUs that you can have in an organization. POLICY_CONTENT_LIMIT_EXCEEDED: You attempted to create a policy that is larger than the maximum size. POLICY_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the number of policies that you can have in an organization. SERVICE_ACCESS_NOT_ENABLED: You attempted to register a delegated administrator before you enabled service access. Call the EnableAWSServiceAccess API first. TAG_POLICY_VIOLATION: You attempted to create or update a resource with tags that are not compliant with the tag policy requirements for this account. WAIT_PERIOD_ACTIVE: After you create an Amazon Web Services account, you must wait until at least seven days after the account was created. Invited accounts aren't subject to this waiting period. public static var constraintViolationException: Self { .init(.constraintViolationException) } /// We can't find an create account request with the CreateAccountRequestId that you specified. public static var createAccountStatusNotFoundException: Self { .init(.createAccountStatusNotFoundException) } diff --git a/Sources/Soto/Services/Outposts/Outposts_api.swift b/Sources/Soto/Services/Outposts/Outposts_api.swift index f952b129bd..450bb349f4 100644 --- a/Sources/Soto/Services/Outposts/Outposts_api.swift +++ b/Sources/Soto/Services/Outposts/Outposts_api.swift @@ -976,7 +976,7 @@ public struct Outposts: AWSService { return try await self.listTagsForResource(input, logger: logger) } - /// Starts the specified capacity task. You can have one active capacity task per order or Outpost. + /// Starts the specified capacity task. You can have one active capacity task for each order and each Outpost. @Sendable @inlinable public func startCapacityTask(_ input: StartCapacityTaskInput, logger: Logger = AWSClient.loggingDisabled) async throws -> StartCapacityTaskOutput { @@ -989,7 +989,7 @@ public struct Outposts: AWSService { logger: logger ) } - /// Starts the specified capacity task. You can have one active capacity task per order or Outpost. + /// Starts the specified capacity task. You can have one active capacity task for each order and each Outpost. /// /// Parameters: /// - dryRun: You can request a dry run to determine if the instance type and instance size changes is above or below available instance capacity. Requesting a dry run does not make any changes to your plan. @@ -1252,7 +1252,7 @@ public struct Outposts: AWSService { /// - fiberOpticCableType: The type of fiber that you will use to attach the Outpost to your network. /// - maximumSupportedWeightLbs: The maximum rack weight that this site can support. NO_LIMIT is over 2000lbs. /// - opticalStandard: The type of optical standard that you will use to attach the Outpost to your network. This field is dependent on uplink speed, fiber type, and distance to the upstream device. For more information about networking requirements for racks, see Network in the Amazon Web Services Outposts User Guide. OPTIC_10GBASE_SR: 10GBASE-SR OPTIC_10GBASE_IR: 10GBASE-IR OPTIC_10GBASE_LR: 10GBASE-LR OPTIC_40GBASE_SR: 40GBASE-SR OPTIC_40GBASE_ESR: 40GBASE-ESR OPTIC_40GBASE_IR4_LR4L: 40GBASE-IR (LR4L) OPTIC_40GBASE_LR4: 40GBASE-LR4 OPTIC_100GBASE_SR4: 100GBASE-SR4 OPTIC_100GBASE_CWDM4: 100GBASE-CWDM4 OPTIC_100GBASE_LR4: 100GBASE-LR4 OPTIC_100G_PSM4_MSA: 100G PSM4 MSA OPTIC_1000BASE_LX: 1000Base-LX OPTIC_1000BASE_SX : 1000Base-SX - /// - powerConnector: The power connector that Amazon Web Services should plan to provide for connections to the hardware. Note the correlation between PowerPhase and PowerConnector. Single-phase AC feed L6-30P – (common in US); 30A; single phase IEC309 (blue) – P+N+E, 6hr; 32 A; single phase Three-phase AC feed AH530P7W (red) – 3P+N+E, 7hr; 30A; three phase AH532P6W (red) – 3P+N+E, 6hr; 32A; three phase + /// - powerConnector: The power connector that Amazon Web Services should plan to provide for connections to the hardware. Note the correlation between PowerPhase and PowerConnector. Single-phase AC feed L6-30P – (common in US); 30A; single phase IEC309 (blue) – P+N+E, 6hr; 32 A; single phase Three-phase AC feed AH530P7W (red) – 3P+N+E, 7hr; 30A; three phase AH532P6W (red) – 3P+N+E, 6hr; 32A; three phase CS8365C – (common in US); 3P+E, 50A; three phase /// - powerDrawKva: The power draw, in kVA, available at the hardware placement position for the rack. /// - powerFeedDrop: Indicates whether the power feed comes above or below the rack. /// - powerPhase: The power option that you can provide for hardware. Single-phase AC feed: 200 V to 277 V, 50 Hz or 60 Hz Three-phase AC feed: 346 V to 480 V, 50 Hz or 60 Hz diff --git a/Sources/Soto/Services/Outposts/Outposts_shapes.swift b/Sources/Soto/Services/Outposts/Outposts_shapes.swift index c0256d5d8c..0e842ffddd 100644 --- a/Sources/Soto/Services/Outposts/Outposts_shapes.swift +++ b/Sources/Soto/Services/Outposts/Outposts_shapes.swift @@ -176,6 +176,7 @@ extension Outposts { public enum PowerConnector: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case ah530p7w = "AH530P7W" case ah532p6w = "AH532P6W" + case cs8365c = "CS8365C" case iec309 = "IEC309" case l630P = "L6_30P" public var description: String { return self.rawValue } @@ -2794,7 +2795,7 @@ extension Outposts { public let maximumSupportedWeightLbs: MaximumSupportedWeightLbs? /// The type of optical standard that you will use to attach the Outpost to your network. This field is dependent on uplink speed, fiber type, and distance to the upstream device. For more information about networking requirements for racks, see Network in the Amazon Web Services Outposts User Guide. OPTIC_10GBASE_SR: 10GBASE-SR OPTIC_10GBASE_IR: 10GBASE-IR OPTIC_10GBASE_LR: 10GBASE-LR OPTIC_40GBASE_SR: 40GBASE-SR OPTIC_40GBASE_ESR: 40GBASE-ESR OPTIC_40GBASE_IR4_LR4L: 40GBASE-IR (LR4L) OPTIC_40GBASE_LR4: 40GBASE-LR4 OPTIC_100GBASE_SR4: 100GBASE-SR4 OPTIC_100GBASE_CWDM4: 100GBASE-CWDM4 OPTIC_100GBASE_LR4: 100GBASE-LR4 OPTIC_100G_PSM4_MSA: 100G PSM4 MSA OPTIC_1000BASE_LX: 1000Base-LX OPTIC_1000BASE_SX : 1000Base-SX public let opticalStandard: OpticalStandard? - /// The power connector that Amazon Web Services should plan to provide for connections to the hardware. Note the correlation between PowerPhase and PowerConnector. Single-phase AC feed L6-30P – (common in US); 30A; single phase IEC309 (blue) – P+N+E, 6hr; 32 A; single phase Three-phase AC feed AH530P7W (red) – 3P+N+E, 7hr; 30A; three phase AH532P6W (red) – 3P+N+E, 6hr; 32A; three phase + /// The power connector that Amazon Web Services should plan to provide for connections to the hardware. Note the correlation between PowerPhase and PowerConnector. Single-phase AC feed L6-30P – (common in US); 30A; single phase IEC309 (blue) – P+N+E, 6hr; 32 A; single phase Three-phase AC feed AH530P7W (red) – 3P+N+E, 7hr; 30A; three phase AH532P6W (red) – 3P+N+E, 6hr; 32A; three phase CS8365C – (common in US); 3P+E, 50A; three phase public let powerConnector: PowerConnector? /// The power draw, in kVA, available at the hardware placement position for the rack. public let powerDrawKva: PowerDrawKva? diff --git a/Sources/Soto/Services/PI/PI_api.swift b/Sources/Soto/Services/PI/PI_api.swift index 18975ab794..ea23bc6e97 100644 --- a/Sources/Soto/Services/PI/PI_api.swift +++ b/Sources/Soto/Services/PI/PI_api.swift @@ -94,6 +94,7 @@ public struct PI: AWSService { "ap-southeast-3": "pi.ap-southeast-3.api.aws", "ap-southeast-4": "pi.ap-southeast-4.api.aws", "ap-southeast-5": "pi.ap-southeast-5.api.aws", + "ap-southeast-7": "pi.ap-southeast-7.api.aws", "ca-central-1": "pi.ca-central-1.api.aws", "ca-west-1": "pi.ca-west-1.api.aws", "cn-north-1": "pi.cn-north-1.api.amazonwebservices.com.cn", @@ -109,6 +110,7 @@ public struct PI: AWSService { "il-central-1": "pi.il-central-1.api.aws", "me-central-1": "pi.me-central-1.api.aws", "me-south-1": "pi.me-south-1.api.aws", + "mx-central-1": "pi.mx-central-1.api.aws", "sa-east-1": "pi.sa-east-1.api.aws", "us-east-1": "pi.us-east-1.api.aws", "us-east-2": "pi.us-east-2.api.aws", diff --git a/Sources/Soto/Services/PartnerCentralSelling/PartnerCentralSelling_api.swift b/Sources/Soto/Services/PartnerCentralSelling/PartnerCentralSelling_api.swift index ab2ea805e3..90052d6418 100644 --- a/Sources/Soto/Services/PartnerCentralSelling/PartnerCentralSelling_api.swift +++ b/Sources/Soto/Services/PartnerCentralSelling/PartnerCentralSelling_api.swift @@ -80,7 +80,7 @@ public struct PartnerCentralSelling: AWSService { // MARK: API Calls - /// Use the AcceptEngagementInvitation action to accept an engagement invitation shared by AWS. Accepting the invitation indicates your willingness to participate in the engagement, granting you access to all engagement-related data. + /// Use the AcceptEngagementInvitation action to accept an engagement invitation shared by AWS. Accepting the invitation indicates your willingness to participate in the engagement, granting you access to all engagement-related data. @Sendable @inlinable public func acceptEngagementInvitation(_ input: AcceptEngagementInvitationRequest, logger: Logger = AWSClient.loggingDisabled) async throws { @@ -93,11 +93,11 @@ public struct PartnerCentralSelling: AWSService { logger: logger ) } - /// Use the AcceptEngagementInvitation action to accept an engagement invitation shared by AWS. Accepting the invitation indicates your willingness to participate in the engagement, granting you access to all engagement-related data. + /// Use the AcceptEngagementInvitation action to accept an engagement invitation shared by AWS. Accepting the invitation indicates your willingness to participate in the engagement, granting you access to all engagement-related data. /// /// Parameters: - /// - catalog: The CatalogType parameter specifies the catalog associated with the engagement invitation. Accepted values are AWS and Sandbox, which determine the environment in which the engagement invitation is managed. - /// - identifier: The Identifier parameter in the AcceptEngagementInvitationRequest specifies the unique identifier of the EngagementInvitation to be accepted. Providing the correct identifier ensures that the intended invitation is accepted. + /// - catalog: The CatalogType parameter specifies the catalog associated with the engagement invitation. Accepted values are AWS and Sandbox, which determine the environment in which the engagement invitation is managed. + /// - identifier: The Identifier parameter in the AcceptEngagementInvitationRequest specifies the unique identifier of the EngagementInvitation to be accepted. Providing the correct identifier ensures that the intended invitation is accepted. /// - logger: Logger use during operation @inlinable public func acceptEngagementInvitation( @@ -185,7 +185,7 @@ public struct PartnerCentralSelling: AWSService { return try await self.associateOpportunity(input, logger: logger) } - /// The CreateEngagement action allows you to create an Engagement, which serves as a collaborative space between different parties such as AWS Partners and AWS Sellers. This action automatically adds the caller's AWS account as an active member of the newly created Engagement. + /// The CreateEngagement action allows you to create an Engagement, which serves as a collaborative space between different parties such as AWS Partners and AWS Sellers. This action automatically adds the caller's AWS account as an active member of the newly created Engagement. @Sendable @inlinable public func createEngagement(_ input: CreateEngagementRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateEngagementResponse { @@ -198,12 +198,12 @@ public struct PartnerCentralSelling: AWSService { logger: logger ) } - /// The CreateEngagement action allows you to create an Engagement, which serves as a collaborative space between different parties such as AWS Partners and AWS Sellers. This action automatically adds the caller's AWS account as an active member of the newly created Engagement. + /// The CreateEngagement action allows you to create an Engagement, which serves as a collaborative space between different parties such as AWS Partners and AWS Sellers. This action automatically adds the caller's AWS account as an active member of the newly created Engagement. /// /// Parameters: - /// - catalog: The CreateEngagementRequest$Catalog parameter specifies the catalog related to the engagement. Accepted values are AWS and Sandbox, which determine the environment in which the engagement is managed. - /// - clientToken: The CreateEngagementRequest$ClientToken parameter specifies a unique, case-sensitive identifier to ensure that the request is handled exactly once. The value must not exceed sixty-four alphanumeric characters. - /// - contexts: The Contexts field is a required array of objects, with a maximum of 5 contexts allowed, specifying detailed information about customer projects associated with the Engagement. Each context object contains a Type field indicating the context type, which must be CustomerProject in this version, and a Payload field containing the CustomerProject details. The CustomerProject object is composed of two main components: Customer and Project. The Customer object includes information such as CompanyName, WebsiteUrl, Industry, and CountryCode, providing essential details about the customer. The Project object contains Title, BusinessProblem, and TargetCompletionDate, offering insights into the specific project associated with the customer. This structure allows comprehensive context to be included within the Engagement, facilitating effective collaboration between parties by providing relevant customer and project information. + /// - catalog: The CreateEngagementRequest$Catalog parameter specifies the catalog related to the engagement. Accepted values are AWS and Sandbox, which determine the environment in which the engagement is managed. + /// - clientToken: The CreateEngagementRequest$ClientToken parameter specifies a unique, case-sensitive identifier to ensure that the request is handled exactly once. The value must not exceed sixty-four alphanumeric characters. + /// - contexts: The Contexts field is a required array of objects, with a maximum of 5 contexts allowed, specifying detailed information about customer projects associated with the Engagement. Each context object contains a Type field indicating the context type, which must be CustomerProject in this version, and a Payload field containing the CustomerProject details. The CustomerProject object is composed of two main components: Customer and Project. The Customer object includes information such as CompanyName, WebsiteUrl, Industry, and CountryCode, providing essential details about the customer. The Project object contains Title, BusinessProblem, and TargetCompletionDate, offering insights into the specific project associated with the customer. This structure allows comprehensive context to be included within the Engagement, facilitating effective collaboration between parties by providing relevant customer and project information. /// - description: Provides a description of the Engagement. /// - title: Specifies the title of the Engagement. /// - logger: Logger use during operation @@ -226,7 +226,7 @@ public struct PartnerCentralSelling: AWSService { return try await self.createEngagement(input, logger: logger) } - /// This action creates an invitation from a sender to a single receiver to join an engagement. + /// This action creates an invitation from a sender to a single receiver to join an engagement. @Sendable @inlinable public func createEngagementInvitation(_ input: CreateEngagementInvitationRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateEngagementInvitationResponse { @@ -239,13 +239,13 @@ public struct PartnerCentralSelling: AWSService { logger: logger ) } - /// This action creates an invitation from a sender to a single receiver to join an engagement. + /// This action creates an invitation from a sender to a single receiver to join an engagement. /// /// Parameters: - /// - catalog: Specifies the catalog related to the engagement. Accepted values are AWS and Sandbox, which determine the environment in which the engagement is managed. - /// - clientToken: Specifies a unique, client-generated UUID to ensure that the request is handled exactly once. This token helps prevent duplicate invitation creations. - /// - engagementIdentifier: The unique identifier of the Engagement associated with the invitation. This parameter ensures the invitation is created within the correct Engagement context. - /// - invitation: The Invitation object all information necessary to initiate an engagement invitation to a partner. + /// - catalog: Specifies the catalog related to the engagement. Accepted values are AWS and Sandbox, which determine the environment in which the engagement is managed. + /// - clientToken: Specifies a unique, client-generated UUID to ensure that the request is handled exactly once. This token helps prevent duplicate invitation creations. + /// - engagementIdentifier: The unique identifier of the Engagement associated with the invitation. This parameter ensures the invitation is created within the correct Engagement context. + /// - invitation: The Invitation object all information necessary to initiate an engagement invitation to a partner. It contains a personalized message from the sender, the invitation's receiver, and a payload. The Payload can be the OpportunityInvitation, which includes detailed structures for sender contacts, partner responsibilities, customer information, and project details. /// - logger: Logger use during operation @inlinable public func createEngagementInvitation( @@ -264,7 +264,7 @@ public struct PartnerCentralSelling: AWSService { return try await self.createEngagementInvitation(input, logger: logger) } - /// Creates an Opportunity record in Partner Central. Use this operation to create a potential business opportunity for submission to Amazon Web Services. Creating an opportunity sets Lifecycle.ReviewStatus to Pending Submission. To submit an opportunity, follow these steps: To create the opportunity, use CreateOpportunity. To associate a solution with the opportunity, use AssociateOpportunity. To submit the opportunity, use StartEngagementFromOpportunityTask. After submission, you can't edit the opportunity until the review is complete. But opportunities in the Pending Submission state must have complete details. You can update the opportunity while it's in the Pending Submission state. There's a set of mandatory fields to create opportunities, but consider providing optional fields to enrich the opportunity record. + /// Creates an Opportunity record in Partner Central. Use this operation to create a potential business opportunity for submission to Amazon Web Services. Creating an opportunity sets Lifecycle.ReviewStatus to Pending Submission. To submit an opportunity, follow these steps: To create the opportunity, use CreateOpportunity. To associate a solution with the opportunity, use AssociateOpportunity. To start the engagement with AWS, use StartEngagementFromOpportunity. After submission, you can't edit the opportunity until the review is complete. But opportunities in the Pending Submission state must have complete details. You can update the opportunity while it's in the Pending Submission state. There's a set of mandatory fields to create opportunities, but consider providing optional fields to enrich the opportunity record. @Sendable @inlinable public func createOpportunity(_ input: CreateOpportunityRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateOpportunityResponse { @@ -277,7 +277,7 @@ public struct PartnerCentralSelling: AWSService { logger: logger ) } - /// Creates an Opportunity record in Partner Central. Use this operation to create a potential business opportunity for submission to Amazon Web Services. Creating an opportunity sets Lifecycle.ReviewStatus to Pending Submission. To submit an opportunity, follow these steps: To create the opportunity, use CreateOpportunity. To associate a solution with the opportunity, use AssociateOpportunity. To submit the opportunity, use StartEngagementFromOpportunityTask. After submission, you can't edit the opportunity until the review is complete. But opportunities in the Pending Submission state must have complete details. You can update the opportunity while it's in the Pending Submission state. There's a set of mandatory fields to create opportunities, but consider providing optional fields to enrich the opportunity record. + /// Creates an Opportunity record in Partner Central. Use this operation to create a potential business opportunity for submission to Amazon Web Services. Creating an opportunity sets Lifecycle.ReviewStatus to Pending Submission. To submit an opportunity, follow these steps: To create the opportunity, use CreateOpportunity. To associate a solution with the opportunity, use AssociateOpportunity. To start the engagement with AWS, use StartEngagementFromOpportunity. After submission, you can't edit the opportunity until the review is complete. But opportunities in the Pending Submission state must have complete details. You can update the opportunity while it's in the Pending Submission state. There's a set of mandatory fields to create opportunities, but consider providing optional fields to enrich the opportunity record. /// /// Parameters: /// - catalog: Specifies the catalog associated with the request. This field takes a string value from a predefined list: AWS or Sandbox. The catalog determines which environment the opportunity is created in. Use AWS to create opportunities in the Amazon Web Services catalog, and Sandbox for testing in secure, isolated environments. @@ -290,7 +290,7 @@ public struct PartnerCentralSelling: AWSService { /// - opportunityType: Specifies the opportunity type as a renewal, new, or expansion. Opportunity types: New opportunity: Represents a new business opportunity with a potential customer that's not previously engaged with your solutions or services. Renewal opportunity: Represents an opportunity to renew an existing contract or subscription with a current customer, ensuring continuity of service. Expansion opportunity: Represents an opportunity to expand the scope of an existing contract or subscription, either by adding new services or increasing the volume of existing services for a current customer. /// - origin: Specifies the origin of the opportunity, indicating if it was sourced from Amazon Web Services or the partner. For all opportunities created with Catalog: AWS, this field must only be Partner Referral. However, when using Catalog: Sandbox, you can set this field to AWS Referral to simulate Amazon Web Services referral creation. This allows Amazon Web Services-originated flows testing in the sandbox catalog. /// - partnerOpportunityIdentifier: Specifies the opportunity's unique identifier in the partner's CRM system. This value is essential to track and reconcile because it's included in the outbound payload to the partner. This field allows partners to link an opportunity to their CRM, which helps to ensure seamless integration and accurate synchronization between the Partner Central API and the partner's internal systems. - /// - primaryNeedsFromAws: Identifies the type of support the partner needs from Amazon Web Services. Valid values: Cosell—Architectural Validation: Confirmation from Amazon Web Services that the partner's proposed solution architecture is aligned with Amazon Web Services best practices and poses minimal architectural risks. Cosell—Business Presentation: Request Amazon Web Services seller's participation in a joint customer presentation. Cosell—Competitive Information: Access to Amazon Web Services competitive resources and support for the partner's proposed solution. Cosell—Pricing Assistance: Connect with an Amazon Web Services seller for support situations where a partner may be receiving an upfront discount on a service (for example: EDP deals). Cosell—Technical Consultation: Connect with an Amazon Web Services Solutions Architect to address the partner's questions about the proposed solution. Cosell—Total Cost of Ownership Evaluation: Assistance with quoting different cost savings of proposed solutions on Amazon Web Services versus on-premises or a traditional hosting environment. Cosell—Deal Support: Request Amazon Web Services seller's support to progress the opportunity (for example: joint customer call, strategic positioning). Cosell—Support for Public Tender/RFx: Opportunity related to the public sector where the partner needs Amazon Web Services RFx support. Do Not Need Support from AWS Sales Rep: Indicates that a partner doesn't need support from an Amazon Web Services sales representative, and the partner solely manages the opportunity. It's possible to request coselling support on these opportunities at any stage during their lifecycles. This is also known as a for-visibility-only (FVO) opportunity. + /// - primaryNeedsFromAws: Identifies the type of support the partner needs from Amazon Web Services. Valid values: Cosell—Architectural Validation: Confirmation from Amazon Web Services that the partner's proposed solution architecture is aligned with Amazon Web Services best practices and poses minimal architectural risks. Cosell—Business Presentation: Request Amazon Web Services seller's participation in a joint customer presentation. Cosell—Competitive Information: Access to Amazon Web Services competitive resources and support for the partner's proposed solution. Cosell—Pricing Assistance: Connect with an Amazon Web Services seller for support situations where a partner may be receiving an upfront discount on a service (for example: EDP deals). Cosell—Technical Consultation: Connect with an Amazon Web Services Solutions Architect to address the partner's questions about the proposed solution. Cosell—Total Cost of Ownership Evaluation: Assistance with quoting different cost savings of proposed solutions on Amazon Web Services versus on-premises or a traditional hosting environment. Cosell—Deal Support: Request Amazon Web Services seller's support to progress the opportunity (for example: joint customer call, strategic positioning). Cosell—Support for Public Tender/RFx: Opportunity related to the public sector where the partner needs Amazon Web Services RFx support. /// - project: An object that contains project details for the Opportunity. /// - softwareRevenue: Specifies details of a customer's procurement terms. This is required only for partners in eligible programs. /// - logger: Logger use during operation @@ -329,7 +329,7 @@ public struct PartnerCentralSelling: AWSService { return try await self.createOpportunity(input, logger: logger) } - /// This action allows you to create an immutable snapshot of a specific resource, such as an opportunity, within the context of an engagement. The snapshot captures a subset of the resource's data based on the schema defined by the provided template. + /// This action allows you to create an immutable snapshot of a specific resource, such as an opportunity, within the context of an engagement. The snapshot captures a subset of the resource's data based on the schema defined by the provided template. @Sendable @inlinable public func createResourceSnapshot(_ input: CreateResourceSnapshotRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateResourceSnapshotResponse { @@ -342,11 +342,11 @@ public struct PartnerCentralSelling: AWSService { logger: logger ) } - /// This action allows you to create an immutable snapshot of a specific resource, such as an opportunity, within the context of an engagement. The snapshot captures a subset of the resource's data based on the schema defined by the provided template. + /// This action allows you to create an immutable snapshot of a specific resource, such as an opportunity, within the context of an engagement. The snapshot captures a subset of the resource's data based on the schema defined by the provided template. /// /// Parameters: /// - catalog: Specifies the catalog where the snapshot is created. Valid values are AWS and Sandbox. - /// - clientToken: Specifies a unique, client-generated UUID to ensure that the request is handled exactly once. This token helps prevent duplicate snapshot creations. + /// - clientToken: Specifies a unique, client-generated UUID to ensure that the request is handled exactly once. This token helps prevent duplicate snapshot creations. /// - engagementIdentifier: The unique identifier of the engagement associated with this snapshot. This field links the snapshot to a specific engagement context. /// - resourceIdentifier: The unique identifier of the specific resource to be snapshotted. The format and constraints of this identifier depend on the ResourceType specified. For example: For Opportunity type, it will be an opportunity ID. /// - resourceSnapshotTemplateIdentifier: The name of the template that defines the schema for the snapshot. This template determines which subset of the resource data will be included in the snapshot. Must correspond to an existing and valid template for the specified ResourceType. @@ -373,7 +373,7 @@ public struct PartnerCentralSelling: AWSService { return try await self.createResourceSnapshot(input, logger: logger) } - /// Use this action to create a job to generate a snapshot of the specified resource within an engagement. It initiates an asynchronous process to create a resource snapshot. The job creates a new snapshot only if the resource state has changed, adhering to the same access control and immutability rules as direct snapshot creation. + /// Use this action to create a job to generate a snapshot of the specified resource within an engagement. It initiates an asynchronous process to create a resource snapshot. The job creates a new snapshot only if the resource state has changed, adhering to the same access control and immutability rules as direct snapshot creation. @Sendable @inlinable public func createResourceSnapshotJob(_ input: CreateResourceSnapshotJobRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateResourceSnapshotJobResponse { @@ -386,15 +386,16 @@ public struct PartnerCentralSelling: AWSService { logger: logger ) } - /// Use this action to create a job to generate a snapshot of the specified resource within an engagement. It initiates an asynchronous process to create a resource snapshot. The job creates a new snapshot only if the resource state has changed, adhering to the same access control and immutability rules as direct snapshot creation. + /// Use this action to create a job to generate a snapshot of the specified resource within an engagement. It initiates an asynchronous process to create a resource snapshot. The job creates a new snapshot only if the resource state has changed, adhering to the same access control and immutability rules as direct snapshot creation. /// /// Parameters: - /// - catalog: Specifies the catalog in which to create the snapshot job. Valid values are AWS and Sandbox. - /// - clientToken: Specifies a unique, client-generated UUID to ensure that the request is handled exactly once. This token helps prevent duplicate snapshot job creations. - /// - engagementIdentifier: Specifies the identifier of the engagement associated with the resource to be snapshotted. - /// - resourceIdentifier: Specifies the identifier of the specific resource to be snapshotted. The format depends on the ResourceType. - /// - resourceSnapshotTemplateIdentifier: Specifies the name of the template that defines the schema for the snapshot. - /// - resourceType: The type of resource for which the snapshot job is being created. Must be one of the supported resource types Opportunity. + /// - catalog: Specifies the catalog in which to create the snapshot job. Valid values are AWS and Sandbox. + /// - clientToken: A client-generated UUID used for idempotency check. The token helps prevent duplicate job creations. + /// - engagementIdentifier: Specifies the identifier of the engagement associated with the resource to be snapshotted. + /// - resourceIdentifier: Specifies the identifier of the specific resource to be snapshotted. The format depends on the ResourceType. + /// - resourceSnapshotTemplateIdentifier: Specifies the name of the template that defines the schema for the snapshot. + /// - resourceType: The type of resource for which the snapshot job is being created. Must be one of the supported resource types i.e. Opportunity + /// - tags: A list of objects specifying each tag name and value. /// - logger: Logger use during operation @inlinable public func createResourceSnapshotJob( @@ -404,6 +405,7 @@ public struct PartnerCentralSelling: AWSService { resourceIdentifier: String, resourceSnapshotTemplateIdentifier: String, resourceType: ResourceType, + tags: [Tag]? = nil, logger: Logger = AWSClient.loggingDisabled ) async throws -> CreateResourceSnapshotJobResponse { let input = CreateResourceSnapshotJobRequest( @@ -412,7 +414,8 @@ public struct PartnerCentralSelling: AWSService { engagementIdentifier: engagementIdentifier, resourceIdentifier: resourceIdentifier, resourceSnapshotTemplateIdentifier: resourceSnapshotTemplateIdentifier, - resourceType: resourceType + resourceType: resourceType, + tags: tags ) return try await self.createResourceSnapshotJob(input, logger: logger) } @@ -519,7 +522,7 @@ public struct PartnerCentralSelling: AWSService { return try await self.getAwsOpportunitySummary(input, logger: logger) } - /// Use this action to retrieve the engagement record for a given EngagementIdentifier. + /// Use this action to retrieve the engagement record for a given EngagementIdentifier. @Sendable @inlinable public func getEngagement(_ input: GetEngagementRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetEngagementResponse { @@ -532,11 +535,11 @@ public struct PartnerCentralSelling: AWSService { logger: logger ) } - /// Use this action to retrieve the engagement record for a given EngagementIdentifier. + /// Use this action to retrieve the engagement record for a given EngagementIdentifier. /// /// Parameters: - /// - catalog: Specifies the catalog related to the engagement request. Valid values are AWS and Sandbox. - /// - identifier: Specifies the identifier of the Engagement record to retrieve. + /// - catalog: Specifies the catalog related to the engagement request. Valid values are AWS and Sandbox. + /// - identifier: Specifies the identifier of the Engagement record to retrieve. /// - logger: Logger use during operation @inlinable public func getEngagement( @@ -659,7 +662,7 @@ public struct PartnerCentralSelling: AWSService { return try await self.getResourceSnapshot(input, logger: logger) } - /// Use this action to retrieves information about a specific resource snapshot job. + /// Use this action to retrieves information about a specific resource snapshot job. @Sendable @inlinable public func getResourceSnapshotJob(_ input: GetResourceSnapshotJobRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetResourceSnapshotJobResponse { @@ -672,11 +675,11 @@ public struct PartnerCentralSelling: AWSService { logger: logger ) } - /// Use this action to retrieves information about a specific resource snapshot job. + /// Use this action to retrieves information about a specific resource snapshot job. /// /// Parameters: - /// - catalog: Specifies the catalog related to the request. Valid values are: - /// - resourceSnapshotJobIdentifier: The unique identifier of the resource snapshot job to be retrieved. This identifier is crucial for pinpointing the specific job you want to query. + /// - catalog: Specifies the catalog related to the request. Valid values are: AWS: Retrieves the snapshot job from the production AWS environment. Sandbox: Retrieves the snapshot job from a sandbox environment used for testing or development purposes. + /// - resourceSnapshotJobIdentifier: The unique identifier of the resource snapshot job to be retrieved. This identifier is crucial for pinpointing the specific job you want to query. /// - logger: Logger use during operation @inlinable public func getResourceSnapshotJob( @@ -736,7 +739,7 @@ public struct PartnerCentralSelling: AWSService { /// Lists all in-progress, completed, or failed StartEngagementByAcceptingInvitationTask tasks that were initiated by the caller's account. /// /// Parameters: - /// - catalog: Specifies the catalog related to the request. Valid values are: + /// - catalog: Specifies the catalog related to the request. Valid values are: AWS: Retrieves the request from the production AWS environment. Sandbox: Retrieves the request from a sandbox environment used for testing or development purposes. /// - engagementInvitationIdentifier: Filters tasks by the identifiers of the engagement invitations they are processing. /// - maxResults: Use this parameter to control the number of items returned in each request, which can be useful for performance tuning and managing large result sets. /// - nextToken: Use this parameter for pagination when the result set spans multiple pages. This value is obtained from the NextToken field in the response of a previous call to this API. @@ -786,7 +789,7 @@ public struct PartnerCentralSelling: AWSService { /// Lists all in-progress, completed, or failed EngagementFromOpportunity tasks that were initiated by the caller's account. /// /// Parameters: - /// - catalog: Specifies the catalog related to the request. Valid values are: + /// - catalog: Specifies the catalog related to the request. Valid values are: AWS: Retrieves the request from the production AWS environment. Sandbox: Retrieves the request from a sandbox environment used for testing or development purposes. /// - engagementIdentifier: Filters tasks by the identifiers of the engagements they created or are associated with. /// - maxResults: Specifies the maximum number of results to return in a single page of the response.Use this parameter to control the number of items returned in each request, which can be useful for performance tuning and managing large result sets. /// - nextToken: The token for requesting the next page of results. This value is obtained from the NextToken field in the response of a previous call to this API. Use this parameter for pagination when the result set spans multiple pages. @@ -873,7 +876,7 @@ public struct PartnerCentralSelling: AWSService { return try await self.listEngagementInvitations(input, logger: logger) } - /// Retrieves the details of member partners in an engagement. This operation can only be invoked by members of the engagement. The ListEngagementMembers operation allows you to fetch information about the members of a specific engagement. This action is restricted to members of the engagement being queried. + /// Retrieves the details of member partners in an Engagement. This operation can only be invoked by members of the Engagement. The ListEngagementMembers operation allows you to fetch information about the members of a specific Engagement. This action is restricted to members of the Engagement being queried. @Sendable @inlinable public func listEngagementMembers(_ input: ListEngagementMembersRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListEngagementMembersResponse { @@ -886,13 +889,13 @@ public struct PartnerCentralSelling: AWSService { logger: logger ) } - /// Retrieves the details of member partners in an engagement. This operation can only be invoked by members of the engagement. The ListEngagementMembers operation allows you to fetch information about the members of a specific engagement. This action is restricted to members of the engagement being queried. + /// Retrieves the details of member partners in an Engagement. This operation can only be invoked by members of the Engagement. The ListEngagementMembers operation allows you to fetch information about the members of a specific Engagement. This action is restricted to members of the Engagement being queried. /// /// Parameters: - /// - catalog: The catalog related to the request. - /// - identifier: Identifier of the engagement record to retrieve members from. - /// - maxResults: The maximum number of results to return in a single call. - /// - nextToken: The token for the next set of results. + /// - catalog: The catalog related to the request. + /// - identifier: Identifier of the Engagement record to retrieve members from. + /// - maxResults: The maximum number of results to return in a single call. + /// - nextToken: The token for the next set of results. /// - logger: Logger use during operation @inlinable public func listEngagementMembers( @@ -911,7 +914,7 @@ public struct PartnerCentralSelling: AWSService { return try await self.listEngagementMembers(input, logger: logger) } - /// Lists the associations between resources and engagements where the caller is a member and has at least one snapshot in the engagement. + /// Lists the associations between resources and engagements where the caller is a member and has at least one snapshot in the engagement. @Sendable @inlinable public func listEngagementResourceAssociations(_ input: ListEngagementResourceAssociationsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListEngagementResourceAssociationsResponse { @@ -924,15 +927,15 @@ public struct PartnerCentralSelling: AWSService { logger: logger ) } - /// Lists the associations between resources and engagements where the caller is a member and has at least one snapshot in the engagement. + /// Lists the associations between resources and engagements where the caller is a member and has at least one snapshot in the engagement. /// /// Parameters: - /// - catalog: Specifies the catalog in which to search for engagement-resource associations. - /// - createdBy: Filters the results to include only associations with resources owned by the specified AWS account. Use this when you want to find associations related to resources owned by a particular account. - /// - engagementIdentifier: Filters the results to include only associations related to the specified engagement. Use this when you want to find all resources associated with a specific engagement. - /// - maxResults: Limits the number of results returned in a single call. Use this to control the number of results returned, especially useful for pagination. - /// - nextToken: A token used for pagination of results. Include this token in subsequent requests to retrieve the next set of results. - /// - resourceIdentifier: Filters the results to include only associations with the specified resource. Varies depending on the resource type. Use this when you want to find all engagements associated with a specific resource. + /// - catalog: Specifies the catalog in which to search for engagement-resource associations. Valid Values: "AWS" or "Sandbox" AWS for production environments. Sandbox for testing and development purposes. + /// - createdBy: Filters the response to include only snapshots of resources owned by the specified AWS account ID. Use this when you want to find associations related to resources owned by a particular account. + /// - engagementIdentifier: Filters the results to include only associations related to the specified engagement. Use this when you want to find all resources associated with a specific engagement. + /// - maxResults: Limits the number of results returned in a single call. Use this to control the number of results returned, especially useful for pagination. + /// - nextToken: A token used for pagination of results. Include this token in subsequent requests to retrieve the next set of results. + /// - resourceIdentifier: Filters the results to include only associations with the specified resource. Varies depending on the resource type. Use this when you want to find all engagements associated with a specific resource. /// - resourceType: Filters the results to include only associations with resources of the specified type. /// - logger: Logger use during operation @inlinable @@ -958,7 +961,7 @@ public struct PartnerCentralSelling: AWSService { return try await self.listEngagementResourceAssociations(input, logger: logger) } - /// This action allows users to retrieve a list of engagement records from Partner Central. This action can be used to manage and track various engagements across different stages of the partner selling process. + /// This action allows users to retrieve a list of Engagement records from Partner Central. This action can be used to manage and track various engagements across different stages of the partner selling process. @Sendable @inlinable public func listEngagements(_ input: ListEngagementsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListEngagementsResponse { @@ -971,15 +974,15 @@ public struct PartnerCentralSelling: AWSService { logger: logger ) } - /// This action allows users to retrieve a list of engagement records from Partner Central. This action can be used to manage and track various engagements across different stages of the partner selling process. + /// This action allows users to retrieve a list of Engagement records from Partner Central. This action can be used to manage and track various engagements across different stages of the partner selling process. /// /// Parameters: /// - catalog: Specifies the catalog related to the request. /// - createdBy: A list of AWS account IDs. When specified, the response includes engagements created by these accounts. This filter is useful for finding engagements created by specific team members. - /// - engagementIdentifier: An array of strings representing engagement identifiers to retrieve. - /// - excludeCreatedBy: An array of strings representing AWS Account IDs. Use this to exclude engagements created by specific users. - /// - maxResults: The maximum number of results to return in a single call. - /// - nextToken: The token for the next set of results. This value is returned from a previous call. + /// - engagementIdentifier: An array of strings representing engagement identifiers to retrieve. + /// - excludeCreatedBy: An array of strings representing AWS Account IDs. Use this to exclude engagements created by specific users. + /// - maxResults: The maximum number of results to return in a single call. + /// - nextToken: The token for the next set of results. This value is returned from a previous call. /// - sort: An object that specifies the sort order of the results. /// - logger: Logger use during operation @inlinable @@ -1102,7 +1105,7 @@ public struct PartnerCentralSelling: AWSService { return try await self.listResourceSnapshotJobs(input, logger: logger) } - /// Retrieves a list of resource view snapshots based on specified criteria. + /// Retrieves a list of resource view snapshots based on specified criteria. This operation supports various use cases, including: Fetching all snapshots associated with an engagement. Retrieving snapshots of a specific resource type within an engagement. Obtaining snapshots for a particular resource using a specified template. Accessing the latest snapshot of a resource within an engagement. Filtering snapshots by resource owner. @Sendable @inlinable public func listResourceSnapshots(_ input: ListResourceSnapshotsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListResourceSnapshotsResponse { @@ -1115,16 +1118,16 @@ public struct PartnerCentralSelling: AWSService { logger: logger ) } - /// Retrieves a list of resource view snapshots based on specified criteria. + /// Retrieves a list of resource view snapshots based on specified criteria. This operation supports various use cases, including: Fetching all snapshots associated with an engagement. Retrieving snapshots of a specific resource type within an engagement. Obtaining snapshots for a particular resource using a specified template. Accessing the latest snapshot of a resource within an engagement. Filtering snapshots by resource owner. /// /// Parameters: /// - catalog: Specifies the catalog related to the request. - /// - createdBy: Filters the response to include only snapshots of resources created by the specified AWS account. + /// - createdBy: Filters the response to include only snapshots of resources owned by the specified AWS account. /// - engagementIdentifier: The unique identifier of the engagement associated with the snapshots. /// - maxResults: The maximum number of results to return in a single call. /// - nextToken: The token for the next set of results. /// - resourceIdentifier: Filters the response to include only snapshots of the specified resource. - /// - resourceSnapshotTemplateIdentifier: Filters the response to include only snapshots created using the specified template. + /// - resourceSnapshotTemplateIdentifier: Filters the response to include only snapshots created using the specified template. /// - resourceType: Filters the response to include only snapshots of the specified resource type. /// - logger: Logger use during operation @inlinable @@ -1199,6 +1202,35 @@ public struct PartnerCentralSelling: AWSService { return try await self.listSolutions(input, logger: logger) } + /// Returns a list of tags for a resource. + @Sendable + @inlinable + public func listTagsForResource(_ input: ListTagsForResourceRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListTagsForResourceResponse { + try await self.client.execute( + operation: "ListTagsForResource", + path: "/ListTagsForResource", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Returns a list of tags for a resource. + /// + /// Parameters: + /// - resourceArn: The Amazon Resource Name (ARN) of the resource for which you want to retrieve tags. + /// - logger: Logger use during operation + @inlinable + public func listTagsForResource( + resourceArn: String, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> ListTagsForResourceResponse { + let input = ListTagsForResourceRequest( + resourceArn: resourceArn + ) + return try await self.listTagsForResource(input, logger: logger) + } + /// Updates the currently set system settings, which include the IAM Role used for resource snapshot jobs. @Sendable @inlinable @@ -1285,18 +1317,21 @@ public struct PartnerCentralSelling: AWSService { /// - catalog: Specifies the catalog related to the task. Use AWS for production engagements and Sandbox for testing scenarios. /// - clientToken: A unique, case-sensitive identifier provided by the client that helps to ensure the idempotency of the request. This can be a random or meaningful string but must be unique for each request. /// - identifier: Specifies the unique identifier of the EngagementInvitation to be accepted. Providing the correct identifier helps ensure that the correct engagement is processed. + /// - tags: A list of objects specifying each tag name and value. /// - logger: Logger use during operation @inlinable public func startEngagementByAcceptingInvitationTask( catalog: String, clientToken: String = StartEngagementByAcceptingInvitationTaskRequest.idempotencyToken(), identifier: String, + tags: [Tag]? = nil, logger: Logger = AWSClient.loggingDisabled ) async throws -> StartEngagementByAcceptingInvitationTaskResponse { let input = StartEngagementByAcceptingInvitationTaskRequest( catalog: catalog, clientToken: clientToken, - identifier: identifier + identifier: identifier, + tags: tags ) return try await self.startEngagementByAcceptingInvitationTask(input, logger: logger) } @@ -1321,6 +1356,7 @@ public struct PartnerCentralSelling: AWSService { /// - catalog: Specifies the catalog in which the engagement is tracked. Acceptable values include AWS for production and Sandbox for testing environments. /// - clientToken: A unique token provided by the client to help ensure the idempotency of the request. It helps prevent the same task from being performed multiple times. /// - identifier: The unique identifier of the opportunity from which the engagement task is to be initiated. This helps ensure that the task is applied to the correct opportunity. + /// - tags: A list of objects specifying each tag name and value. /// - logger: Logger use during operation @inlinable public func startEngagementFromOpportunityTask( @@ -1328,18 +1364,20 @@ public struct PartnerCentralSelling: AWSService { catalog: String, clientToken: String = StartEngagementFromOpportunityTaskRequest.idempotencyToken(), identifier: String, + tags: [Tag]? = nil, logger: Logger = AWSClient.loggingDisabled ) async throws -> StartEngagementFromOpportunityTaskResponse { let input = StartEngagementFromOpportunityTaskRequest( awsSubmission: awsSubmission, catalog: catalog, clientToken: clientToken, - identifier: identifier + identifier: identifier, + tags: tags ) return try await self.startEngagementFromOpportunityTask(input, logger: logger) } - /// Starts a resource snapshot job that has been previously created. + /// Starts a resource snapshot job that has been previously created. @Sendable @inlinable public func startResourceSnapshotJob(_ input: StartResourceSnapshotJobRequest, logger: Logger = AWSClient.loggingDisabled) async throws { @@ -1352,11 +1390,11 @@ public struct PartnerCentralSelling: AWSService { logger: logger ) } - /// Starts a resource snapshot job that has been previously created. + /// Starts a resource snapshot job that has been previously created. /// /// Parameters: - /// - catalog: Specifies the catalog related to the request. - /// - resourceSnapshotJobIdentifier: The identifier of the resource snapshot job to start. + /// - catalog: Specifies the catalog related to the request. Valid values are: AWS: Starts the request from the production AWS environment. Sandbox: Starts the request from a sandbox environment used for testing or development purposes. + /// - resourceSnapshotJobIdentifier: The identifier of the resource snapshot job to start. /// - logger: Logger use during operation @inlinable public func startResourceSnapshotJob( @@ -1371,7 +1409,7 @@ public struct PartnerCentralSelling: AWSService { return try await self.startResourceSnapshotJob(input, logger: logger) } - /// Stops a resource snapshot job. The job must be started prior to being stopped. + /// Stops a resource snapshot job. The job must be started prior to being stopped. @Sendable @inlinable public func stopResourceSnapshotJob(_ input: StopResourceSnapshotJobRequest, logger: Logger = AWSClient.loggingDisabled) async throws { @@ -1384,11 +1422,11 @@ public struct PartnerCentralSelling: AWSService { logger: logger ) } - /// Stops a resource snapshot job. The job must be started prior to being stopped. + /// Stops a resource snapshot job. The job must be started prior to being stopped. /// /// Parameters: - /// - catalog: Specifies the catalog related to the request. - /// - resourceSnapshotJobIdentifier: The identifier of the job to stop. + /// - catalog: Specifies the catalog related to the request. Valid values are: AWS: Stops the request from the production AWS environment. Sandbox: Stops the request from a sandbox environment used for testing or development purposes. + /// - resourceSnapshotJobIdentifier: The identifier of the job to stop. /// - logger: Logger use during operation @inlinable public func stopResourceSnapshotJob( @@ -1403,7 +1441,7 @@ public struct PartnerCentralSelling: AWSService { return try await self.stopResourceSnapshotJob(input, logger: logger) } - /// Use this action to submit an opportunity that was previously created by partner for AWS review. After you perform this action, the opportunity becomes non-editable until it is reviewed by AWS and has LifeCycle.ReviewStatus as either Approved or Action Required. + /// Use this action to submit an Opportunity that was previously created by partner for AWS review. After you perform this action, the Opportunity becomes non-editable until it is reviewed by AWS and has LifeCycle.ReviewStatus as either Approved or Action Required. @Sendable @inlinable public func submitOpportunity(_ input: SubmitOpportunityRequest, logger: Logger = AWSClient.loggingDisabled) async throws { @@ -1416,13 +1454,13 @@ public struct PartnerCentralSelling: AWSService { logger: logger ) } - /// Use this action to submit an opportunity that was previously created by partner for AWS review. After you perform this action, the opportunity becomes non-editable until it is reviewed by AWS and has LifeCycle.ReviewStatus as either Approved or Action Required. + /// Use this action to submit an Opportunity that was previously created by partner for AWS review. After you perform this action, the Opportunity becomes non-editable until it is reviewed by AWS and has LifeCycle.ReviewStatus as either Approved or Action Required. /// /// Parameters: - /// - catalog: Specifies the catalog related to the request. - /// - identifier: The identifier of the opportunity previously created by partner and needs to be submitted. - /// - involvementType: Specifies the level of AWS sellers' involvement on the opportunity. - /// - visibility: Determines whether to restrict visibility of the opportunity from AWS sales. Default value is Full. + /// - catalog: Specifies the catalog related to the request. Valid values are: AWS: Submits the opportunity request from the production AWS environment. Sandbox: Submits the opportunity request from a sandbox environment used for testing or development purposes. + /// - identifier: The identifier of the Opportunity previously created by partner and needs to be submitted. + /// - involvementType: Specifies the level of AWS sellers' involvement on the opportunity. Valid values: Co-sell: Indicates the user wants to co-sell with AWS. Share the opportunity with AWS to receive deal assistance and support. For Visibility Only: Indicates that the user does not need support from AWS Sales Rep. Share this opportunity with AWS for visibility only, you will not receive deal assistance and support. + /// - visibility: Determines whether to restrict visibility of the opportunity from AWS sales. Default value is Full. Valid values: Full: The opportunity is fully visible to AWS sales. Limited: The opportunity has restricted visibility to AWS sales. /// - logger: Logger use during operation @inlinable public func submitOpportunity( @@ -1441,6 +1479,70 @@ public struct PartnerCentralSelling: AWSService { return try await self.submitOpportunity(input, logger: logger) } + /// Assigns one or more tags (key-value pairs) to the specified resource. + @Sendable + @inlinable + public func tagResource(_ input: TagResourceRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> TagResourceResponse { + try await self.client.execute( + operation: "TagResource", + path: "/TagResource", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Assigns one or more tags (key-value pairs) to the specified resource. + /// + /// Parameters: + /// - resourceArn: The Amazon Resource Name (ARN) of the resource that you want to tag. + /// - tags: A map of the key-value pairs of the tag or tags to assign to the resource. + /// - logger: Logger use during operation + @inlinable + public func tagResource( + resourceArn: String, + tags: [Tag], + logger: Logger = AWSClient.loggingDisabled + ) async throws -> TagResourceResponse { + let input = TagResourceRequest( + resourceArn: resourceArn, + tags: tags + ) + return try await self.tagResource(input, logger: logger) + } + + /// Removes a tag or tags from a resource. + @Sendable + @inlinable + public func untagResource(_ input: UntagResourceRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UntagResourceResponse { + try await self.client.execute( + operation: "UntagResource", + path: "/UntagResource", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Removes a tag or tags from a resource. + /// + /// Parameters: + /// - resourceArn: The Amazon Resource Name (ARN) of the resource that you want to untag. + /// - tagKeys: The keys of the key-value pairs for the tag or tags you want to remove from the specified resource. + /// - logger: Logger use during operation + @inlinable + public func untagResource( + resourceArn: String, + tagKeys: [String], + logger: Logger = AWSClient.loggingDisabled + ) async throws -> UntagResourceResponse { + let input = UntagResourceRequest( + resourceArn: resourceArn, + tagKeys: tagKeys + ) + return try await self.untagResource(input, logger: logger) + } + /// Updates the Opportunity record identified by a given Identifier. This operation allows you to modify the details of an existing opportunity to reflect the latest information and progress. Use this action to keep the opportunity record up-to-date and accurate. When you perform updates, include the entire payload with each request. If any field is omitted, the API assumes that the field is set to null. The best practice is to always perform a GetOpportunity to retrieve the latest values, then send the complete payload with the updated values to be changed. @Sendable @inlinable @@ -1466,7 +1568,7 @@ public struct PartnerCentralSelling: AWSService { /// - nationalSecurity: Specifies if the opportunity is associated with national security concerns. This flag is only applicable when the industry is Government. For national-security-related opportunities, validation and compliance rules may apply, impacting the opportunity's visibility and processing. /// - opportunityType: Specifies the opportunity type as a renewal, new, or expansion. Opportunity types: New opportunity: Represents a new business opportunity with a potential customer that's not previously engaged with your solutions or services. Renewal opportunity: Represents an opportunity to renew an existing contract or subscription with a current customer, ensuring continuity of service. Expansion opportunity: Represents an opportunity to expand the scope of an existing contract or subscription, either by adding new services or increasing the volume of existing services for a current customer. /// - partnerOpportunityIdentifier: Specifies the opportunity's unique identifier in the partner's CRM system. This value is essential to track and reconcile because it's included in the outbound payload sent back to the partner. - /// - primaryNeedsFromAws: Identifies the type of support the partner needs from Amazon Web Services. Valid values: Cosell—Architectural Validation: Confirmation from Amazon Web Services that the partner's proposed solution architecture is aligned with Amazon Web Services best practices and poses minimal architectural risks. Cosell—Business Presentation: Request Amazon Web Services seller's participation in a joint customer presentation. Cosell—Competitive Information: Access to Amazon Web Services competitive resources and support for the partner's proposed solution. Cosell—Pricing Assistance: Connect with an AWS seller for support situations where a partner may be receiving an upfront discount on a service (for example: EDP deals). Cosell—Technical Consultation: Connection with an Amazon Web Services Solutions Architect to address the partner's questions about the proposed solution. Cosell—Total Cost of Ownership Evaluation: Assistance with quoting different cost savings of proposed solutions on Amazon Web Services versus on-premises or a traditional hosting environment. Cosell—Deal Support: Request Amazon Web Services seller's support to progress the opportunity (for example: joint customer call, strategic positioning). Cosell—Support for Public Tender/RFx: Opportunity related to the public sector where the partner needs RFx support from Amazon Web Services. Do Not Need Support from AWS Sales Rep: Indicates that a partner doesn't need support from an Amazon Web Services Sales representative. The opportunity is managed solely by the partner. It's possible to request coselling support on these opportunities at any stage during their lifecycle. Also known as, for-visibility-only (FVO) opportunity. + /// - primaryNeedsFromAws: Identifies the type of support the partner needs from Amazon Web Services. Valid values: Cosell—Architectural Validation: Confirmation from Amazon Web Services that the partner's proposed solution architecture is aligned with Amazon Web Services best practices and poses minimal architectural risks. Cosell—Business Presentation: Request Amazon Web Services seller's participation in a joint customer presentation. Cosell—Competitive Information: Access to Amazon Web Services competitive resources and support for the partner's proposed solution. Cosell—Pricing Assistance: Connect with an AWS seller for support situations where a partner may be receiving an upfront discount on a service (for example: EDP deals). Cosell—Technical Consultation: Connection with an Amazon Web Services Solutions Architect to address the partner's questions about the proposed solution. Cosell—Total Cost of Ownership Evaluation: Assistance with quoting different cost savings of proposed solutions on Amazon Web Services versus on-premises or a traditional hosting environment. Cosell—Deal Support: Request Amazon Web Services seller's support to progress the opportunity (for example: joint customer call, strategic positioning). Cosell—Support for Public Tender/RFx: Opportunity related to the public sector where the partner needs RFx support from Amazon Web Services. /// - project: An object that contains project details summary for the Opportunity. /// - softwareRevenue: Specifies details of a customer's procurement terms. Required only for partners in eligible programs. /// - logger: Logger use during operation @@ -1538,7 +1640,7 @@ extension PartnerCentralSelling { /// Return PaginatorSequence for operation ``listEngagementByAcceptingInvitationTasks(_:logger:)``. /// /// - Parameters: - /// - catalog: Specifies the catalog related to the request. Valid values are: + /// - catalog: Specifies the catalog related to the request. Valid values are: AWS: Retrieves the request from the production AWS environment. Sandbox: Retrieves the request from a sandbox environment used for testing or development purposes. /// - engagementInvitationIdentifier: Filters tasks by the identifiers of the engagement invitations they are processing. /// - maxResults: Use this parameter to control the number of items returned in each request, which can be useful for performance tuning and managing large result sets. /// - opportunityIdentifier: Filters tasks by the identifiers of the opportunities they created or are associated with. @@ -1590,7 +1692,7 @@ extension PartnerCentralSelling { /// Return PaginatorSequence for operation ``listEngagementFromOpportunityTasks(_:logger:)``. /// /// - Parameters: - /// - catalog: Specifies the catalog related to the request. Valid values are: + /// - catalog: Specifies the catalog related to the request. Valid values are: AWS: Retrieves the request from the production AWS environment. Sandbox: Retrieves the request from a sandbox environment used for testing or development purposes. /// - engagementIdentifier: Filters tasks by the identifiers of the engagements they created or are associated with. /// - maxResults: Specifies the maximum number of results to return in a single page of the response.Use this parameter to control the number of items returned in each request, which can be useful for performance tuning and managing large result sets. /// - opportunityIdentifier: The identifier of the original opportunity associated with this task. @@ -1697,9 +1799,9 @@ extension PartnerCentralSelling { /// Return PaginatorSequence for operation ``listEngagementMembers(_:logger:)``. /// /// - Parameters: - /// - catalog: The catalog related to the request. - /// - identifier: Identifier of the engagement record to retrieve members from. - /// - maxResults: The maximum number of results to return in a single call. + /// - catalog: The catalog related to the request. + /// - identifier: Identifier of the Engagement record to retrieve members from. + /// - maxResults: The maximum number of results to return in a single call. /// - logger: Logger used for logging @inlinable public func listEngagementMembersPaginator( @@ -1737,11 +1839,11 @@ extension PartnerCentralSelling { /// Return PaginatorSequence for operation ``listEngagementResourceAssociations(_:logger:)``. /// /// - Parameters: - /// - catalog: Specifies the catalog in which to search for engagement-resource associations. - /// - createdBy: Filters the results to include only associations with resources owned by the specified AWS account. Use this when you want to find associations related to resources owned by a particular account. - /// - engagementIdentifier: Filters the results to include only associations related to the specified engagement. Use this when you want to find all resources associated with a specific engagement. - /// - maxResults: Limits the number of results returned in a single call. Use this to control the number of results returned, especially useful for pagination. - /// - resourceIdentifier: Filters the results to include only associations with the specified resource. Varies depending on the resource type. Use this when you want to find all engagements associated with a specific resource. + /// - catalog: Specifies the catalog in which to search for engagement-resource associations. Valid Values: "AWS" or "Sandbox" AWS for production environments. Sandbox for testing and development purposes. + /// - createdBy: Filters the response to include only snapshots of resources owned by the specified AWS account ID. Use this when you want to find associations related to resources owned by a particular account. + /// - engagementIdentifier: Filters the results to include only associations related to the specified engagement. Use this when you want to find all resources associated with a specific engagement. + /// - maxResults: Limits the number of results returned in a single call. Use this to control the number of results returned, especially useful for pagination. + /// - resourceIdentifier: Filters the results to include only associations with the specified resource. Varies depending on the resource type. Use this when you want to find all engagements associated with a specific resource. /// - resourceType: Filters the results to include only associations with resources of the specified type. /// - logger: Logger used for logging @inlinable @@ -1788,9 +1890,9 @@ extension PartnerCentralSelling { /// - Parameters: /// - catalog: Specifies the catalog related to the request. /// - createdBy: A list of AWS account IDs. When specified, the response includes engagements created by these accounts. This filter is useful for finding engagements created by specific team members. - /// - engagementIdentifier: An array of strings representing engagement identifiers to retrieve. - /// - excludeCreatedBy: An array of strings representing AWS Account IDs. Use this to exclude engagements created by specific users. - /// - maxResults: The maximum number of results to return in a single call. + /// - engagementIdentifier: An array of strings representing engagement identifiers to retrieve. + /// - excludeCreatedBy: An array of strings representing AWS Account IDs. Use this to exclude engagements created by specific users. + /// - maxResults: The maximum number of results to return in a single call. /// - sort: An object that specifies the sort order of the results. /// - logger: Logger used for logging @inlinable @@ -1937,11 +2039,11 @@ extension PartnerCentralSelling { /// /// - Parameters: /// - catalog: Specifies the catalog related to the request. - /// - createdBy: Filters the response to include only snapshots of resources created by the specified AWS account. + /// - createdBy: Filters the response to include only snapshots of resources owned by the specified AWS account. /// - engagementIdentifier: The unique identifier of the engagement associated with the snapshots. /// - maxResults: The maximum number of results to return in a single call. /// - resourceIdentifier: Filters the response to include only snapshots of the specified resource. - /// - resourceSnapshotTemplateIdentifier: Filters the response to include only snapshots created using the specified template. + /// - resourceSnapshotTemplateIdentifier: Filters the response to include only snapshots created using the specified template. /// - resourceType: Filters the response to include only snapshots of the specified resource type. /// - logger: Logger used for logging @inlinable diff --git a/Sources/Soto/Services/PartnerCentralSelling/PartnerCentralSelling_shapes.swift b/Sources/Soto/Services/PartnerCentralSelling/PartnerCentralSelling_shapes.swift index 2a8936be28..483c605896 100644 --- a/Sources/Soto/Services/PartnerCentralSelling/PartnerCentralSelling_shapes.swift +++ b/Sources/Soto/Services/PartnerCentralSelling/PartnerCentralSelling_shapes.swift @@ -891,9 +891,9 @@ extension PartnerCentralSelling { // MARK: Shapes public struct AcceptEngagementInvitationRequest: AWSEncodableShape { - /// The CatalogType parameter specifies the catalog associated with the engagement invitation. Accepted values are AWS and Sandbox, which determine the environment in which the engagement invitation is managed. + /// The CatalogType parameter specifies the catalog associated with the engagement invitation. Accepted values are AWS and Sandbox, which determine the environment in which the engagement invitation is managed. public let catalog: String - /// The Identifier parameter in the AcceptEngagementInvitationRequest specifies the unique identifier of the EngagementInvitation to be accepted. Providing the correct identifier ensures that the intended invitation is accepted. + /// The Identifier parameter in the AcceptEngagementInvitationRequest specifies the unique identifier of the EngagementInvitation to be accepted. Providing the correct identifier ensures that the intended invitation is accepted. public let identifier: String @inlinable @@ -1343,16 +1343,13 @@ extension PartnerCentralSelling { } public struct CreateEngagementInvitationRequest: AWSEncodableShape { - /// Specifies the catalog related to the engagement. Accepted values are AWS and Sandbox, which determine the environment in which the engagement is managed. + /// Specifies the catalog related to the engagement. Accepted values are AWS and Sandbox, which determine the environment in which the engagement is managed. public let catalog: String - /// Specifies a unique, client-generated UUID to ensure that the request is handled exactly once. This token helps prevent duplicate invitation creations. + /// Specifies a unique, client-generated UUID to ensure that the request is handled exactly once. This token helps prevent duplicate invitation creations. public let clientToken: String - /// The unique identifier of the Engagement associated with the invitation. This parameter ensures the invitation is created within the correct Engagement context. + /// The unique identifier of the Engagement associated with the invitation. This parameter ensures the invitation is created within the correct Engagement context. public let engagementIdentifier: String - /// The Invitation object all information necessary to initiate an engagement invitation to a partner. - /// It contains a personalized message from the sender, the invitation's receiver, and a payload. The Payload can - /// be the OpportunityInvitation, which includes detailed structures for sender contacts, partner responsibilities, customer - /// information, and project details. + /// The Invitation object all information necessary to initiate an engagement invitation to a partner. It contains a personalized message from the sender, the invitation's receiver, and a payload. The Payload can be the OpportunityInvitation, which includes detailed structures for sender contacts, partner responsibilities, customer information, and project details. public let invitation: Invitation @inlinable @@ -1397,11 +1394,11 @@ extension PartnerCentralSelling { } public struct CreateEngagementRequest: AWSEncodableShape { - /// The CreateEngagementRequest$Catalog parameter specifies the catalog related to the engagement. Accepted values are AWS and Sandbox, which determine the environment in which the engagement is managed. + /// The CreateEngagementRequest$Catalog parameter specifies the catalog related to the engagement. Accepted values are AWS and Sandbox, which determine the environment in which the engagement is managed. public let catalog: String - /// The CreateEngagementRequest$ClientToken parameter specifies a unique, case-sensitive identifier to ensure that the request is handled exactly once. The value must not exceed sixty-four alphanumeric characters. + /// The CreateEngagementRequest$ClientToken parameter specifies a unique, case-sensitive identifier to ensure that the request is handled exactly once. The value must not exceed sixty-four alphanumeric characters. public let clientToken: String - /// The Contexts field is a required array of objects, with a maximum of 5 contexts allowed, specifying detailed information about customer projects associated with the Engagement. Each context object contains a Type field indicating the context type, which must be CustomerProject in this version, and a Payload field containing the CustomerProject details. The CustomerProject object is composed of two main components: Customer and Project. The Customer object includes information such as CompanyName, WebsiteUrl, Industry, and CountryCode, providing essential details about the customer. The Project object contains Title, BusinessProblem, and TargetCompletionDate, offering insights into the specific project associated with the customer. This structure allows comprehensive context to be included within the Engagement, facilitating effective collaboration between parties by providing relevant customer and project information. + /// The Contexts field is a required array of objects, with a maximum of 5 contexts allowed, specifying detailed information about customer projects associated with the Engagement. Each context object contains a Type field indicating the context type, which must be CustomerProject in this version, and a Payload field containing the CustomerProject details. The CustomerProject object is composed of two main components: Customer and Project. The Customer object includes information such as CompanyName, WebsiteUrl, Industry, and CountryCode, providing essential details about the customer. The Project object contains Title, BusinessProblem, and TargetCompletionDate, offering insights into the specific project associated with the customer. This structure allows comprehensive context to be included within the Engagement, facilitating effective collaboration between parties by providing relevant customer and project information. public let contexts: [EngagementContextDetails]? /// Provides a description of the Engagement. public let description: String @@ -1476,7 +1473,7 @@ extension PartnerCentralSelling { public let origin: OpportunityOrigin? /// Specifies the opportunity's unique identifier in the partner's CRM system. This value is essential to track and reconcile because it's included in the outbound payload to the partner. This field allows partners to link an opportunity to their CRM, which helps to ensure seamless integration and accurate synchronization between the Partner Central API and the partner's internal systems. public let partnerOpportunityIdentifier: String? - /// Identifies the type of support the partner needs from Amazon Web Services. Valid values: Cosell—Architectural Validation: Confirmation from Amazon Web Services that the partner's proposed solution architecture is aligned with Amazon Web Services best practices and poses minimal architectural risks. Cosell—Business Presentation: Request Amazon Web Services seller's participation in a joint customer presentation. Cosell—Competitive Information: Access to Amazon Web Services competitive resources and support for the partner's proposed solution. Cosell—Pricing Assistance: Connect with an Amazon Web Services seller for support situations where a partner may be receiving an upfront discount on a service (for example: EDP deals). Cosell—Technical Consultation: Connect with an Amazon Web Services Solutions Architect to address the partner's questions about the proposed solution. Cosell—Total Cost of Ownership Evaluation: Assistance with quoting different cost savings of proposed solutions on Amazon Web Services versus on-premises or a traditional hosting environment. Cosell—Deal Support: Request Amazon Web Services seller's support to progress the opportunity (for example: joint customer call, strategic positioning). Cosell—Support for Public Tender/RFx: Opportunity related to the public sector where the partner needs Amazon Web Services RFx support. Do Not Need Support from AWS Sales Rep: Indicates that a partner doesn't need support from an Amazon Web Services sales representative, and the partner solely manages the opportunity. It's possible to request coselling support on these opportunities at any stage during their lifecycles. This is also known as a for-visibility-only (FVO) opportunity. + /// Identifies the type of support the partner needs from Amazon Web Services. Valid values: Cosell—Architectural Validation: Confirmation from Amazon Web Services that the partner's proposed solution architecture is aligned with Amazon Web Services best practices and poses minimal architectural risks. Cosell—Business Presentation: Request Amazon Web Services seller's participation in a joint customer presentation. Cosell—Competitive Information: Access to Amazon Web Services competitive resources and support for the partner's proposed solution. Cosell—Pricing Assistance: Connect with an Amazon Web Services seller for support situations where a partner may be receiving an upfront discount on a service (for example: EDP deals). Cosell—Technical Consultation: Connect with an Amazon Web Services Solutions Architect to address the partner's questions about the proposed solution. Cosell—Total Cost of Ownership Evaluation: Assistance with quoting different cost savings of proposed solutions on Amazon Web Services versus on-premises or a traditional hosting environment. Cosell—Deal Support: Request Amazon Web Services seller's support to progress the opportunity (for example: joint customer call, strategic positioning). Cosell—Support for Public Tender/RFx: Opportunity related to the public sector where the partner needs Amazon Web Services RFx support. public let primaryNeedsFromAws: [PrimaryNeedFromAws]? /// An object that contains project details for the Opportunity. public let project: Project? @@ -1553,27 +1550,30 @@ extension PartnerCentralSelling { } public struct CreateResourceSnapshotJobRequest: AWSEncodableShape { - /// Specifies the catalog in which to create the snapshot job. Valid values are AWS and Sandbox. + /// Specifies the catalog in which to create the snapshot job. Valid values are AWS and Sandbox. public let catalog: String - /// Specifies a unique, client-generated UUID to ensure that the request is handled exactly once. This token helps prevent duplicate snapshot job creations. + /// A client-generated UUID used for idempotency check. The token helps prevent duplicate job creations. public let clientToken: String - /// Specifies the identifier of the engagement associated with the resource to be snapshotted. + /// Specifies the identifier of the engagement associated with the resource to be snapshotted. public let engagementIdentifier: String - /// Specifies the identifier of the specific resource to be snapshotted. The format depends on the ResourceType. + /// Specifies the identifier of the specific resource to be snapshotted. The format depends on the ResourceType. public let resourceIdentifier: String - /// Specifies the name of the template that defines the schema for the snapshot. + /// Specifies the name of the template that defines the schema for the snapshot. public let resourceSnapshotTemplateIdentifier: String - /// The type of resource for which the snapshot job is being created. Must be one of the supported resource types Opportunity. + /// The type of resource for which the snapshot job is being created. Must be one of the supported resource types i.e. Opportunity public let resourceType: ResourceType + /// A list of objects specifying each tag name and value. + public let tags: [Tag]? @inlinable - public init(catalog: String, clientToken: String = CreateResourceSnapshotJobRequest.idempotencyToken(), engagementIdentifier: String, resourceIdentifier: String, resourceSnapshotTemplateIdentifier: String, resourceType: ResourceType) { + public init(catalog: String, clientToken: String = CreateResourceSnapshotJobRequest.idempotencyToken(), engagementIdentifier: String, resourceIdentifier: String, resourceSnapshotTemplateIdentifier: String, resourceType: ResourceType, tags: [Tag]? = nil) { self.catalog = catalog self.clientToken = clientToken self.engagementIdentifier = engagementIdentifier self.resourceIdentifier = resourceIdentifier self.resourceSnapshotTemplateIdentifier = resourceSnapshotTemplateIdentifier self.resourceType = resourceType + self.tags = tags } public func validate(name: String) throws { @@ -1582,6 +1582,11 @@ extension PartnerCentralSelling { try self.validate(self.engagementIdentifier, name: "engagementIdentifier", parent: name, pattern: "^eng-[0-9a-z]{14}$") try self.validate(self.resourceIdentifier, name: "resourceIdentifier", parent: name, pattern: "^O[0-9]{1,19}$") try self.validate(self.resourceSnapshotTemplateIdentifier, name: "resourceSnapshotTemplateIdentifier", parent: name, pattern: "^[a-zA-Z0-9]{3,80}$") + try self.tags?.forEach { + try $0.validate(name: "\(name).tags[]") + } + try self.validate(self.tags, name: "tags", parent: name, max: 200) + try self.validate(self.tags, name: "tags", parent: name, min: 1) } private enum CodingKeys: String, CodingKey { @@ -1591,13 +1596,14 @@ extension PartnerCentralSelling { case resourceIdentifier = "ResourceIdentifier" case resourceSnapshotTemplateIdentifier = "ResourceSnapshotTemplateIdentifier" case resourceType = "ResourceType" + case tags = "Tags" } } public struct CreateResourceSnapshotJobResponse: AWSDecodableShape { - /// The Amazon Resource Name (ARN) of the created snapshot job. + /// The Amazon Resource Name (ARN) of the created snapshot job. public let arn: String? - /// The unique identifier for the created snapshot job. + /// The unique identifier for the created snapshot job. public let id: String? @inlinable @@ -1615,7 +1621,7 @@ extension PartnerCentralSelling { public struct CreateResourceSnapshotRequest: AWSEncodableShape { /// Specifies the catalog where the snapshot is created. Valid values are AWS and Sandbox. public let catalog: String - /// Specifies a unique, client-generated UUID to ensure that the request is handled exactly once. This token helps prevent duplicate snapshot creations. + /// Specifies a unique, client-generated UUID to ensure that the request is handled exactly once. This token helps prevent duplicate snapshot creations. public let clientToken: String /// The unique identifier of the engagement associated with this snapshot. This field links the snapshot to a specific engagement context. public let engagementIdentifier: String @@ -1699,7 +1705,7 @@ extension PartnerCentralSelling { public struct CustomerProjectsContext: AWSEncodableShape & AWSDecodableShape { public let customer: EngagementCustomer? - /// Information about the customer project associated with the Engagement. + /// Information about the customer project associated with the Engagement. public let project: EngagementCustomerProjectDetails? @inlinable @@ -1788,9 +1794,9 @@ extension PartnerCentralSelling { } public struct EngagementContextDetails: AWSEncodableShape & AWSDecodableShape { - /// Contains the specific details of the Engagement context. The structure of this payload varies depending on the Type field. + /// Contains the specific details of the Engagement context. The structure of this payload varies depending on the Type field. public let payload: EngagementContextPayload? - /// Specifies the type of Engagement context. Valid values are "CustomerProject" or "Document", indicating whether the context relates to a customer project or a document respectively. + /// Specifies the type of Engagement context. Valid values are "CustomerProject" or "Document", indicating whether the context relates to a customer project or a document respectively. public let type: EngagementContextType @inlinable @@ -1845,11 +1851,11 @@ extension PartnerCentralSelling { } public struct EngagementCustomerProjectDetails: AWSEncodableShape & AWSDecodableShape { - /// A description of the business problem the project aims to solve. + /// A description of the business problem the project aims to solve. public let businessProblem: String - /// The target completion date for the customer's project. + /// The target completion date for the customer's project. public let targetCompletionDate: String - /// The title of the project. + /// The title of the project. public let title: String @inlinable @@ -1938,11 +1944,11 @@ extension PartnerCentralSelling { } public struct EngagementMember: AWSDecodableShape { - /// This is the unique identifier for the AWS account associated with the member organization. It's used for AWS-related operations and identity verification. + /// This is the unique identifier for the AWS account associated with the member organization. It's used for AWS-related operations and identity verification. public let accountId: String? - /// The official name of the member's company or organization. + /// The official name of the member's company or organization. public let companyName: String? - /// The URL of the member company's website. This offers a way to find more information about the member organization and serves as an additional identifier. + /// The URL of the member company's website. This offers a way to find more information about the member organization and serves as an additional identifier. public let websiteUrl: String? @inlinable @@ -1960,9 +1966,9 @@ extension PartnerCentralSelling { } public struct EngagementMemberSummary: AWSDecodableShape { - /// The official name of the member's company or organization. + /// The official name of the member's company or organization. public let companyName: String? - /// The URL of the member company's website. This offers a way to find more information about the member organization and serves as an additional identifier. + /// The URL of the member company's website. This offers a way to find more information about the member organization and serves as an additional identifier. public let websiteUrl: String? @inlinable @@ -1980,7 +1986,7 @@ extension PartnerCentralSelling { public struct EngagementResourceAssociationSummary: AWSDecodableShape { /// Indicates the environment in which the resource and engagement exist. public let catalog: String - /// The AWS account ID of the entity that created the association. + /// The AWS account ID of the entity that owns the resource. Identifies the account responsible for or having primary control over the resource. public let createdBy: String? /// A unique identifier for the engagement associated with the resource. public let engagementId: String? @@ -2008,9 +2014,9 @@ extension PartnerCentralSelling { } public struct EngagementSort: AWSEncodableShape { - /// The field by which to sort the results. + /// The field by which to sort the results. public let sortBy: EngagementSortName - /// The order in which to sort the results. + /// The order in which to sort the results. public let sortOrder: SortOrder @inlinable @@ -2026,18 +2032,18 @@ extension PartnerCentralSelling { } public struct EngagementSummary: AWSDecodableShape { - /// The Amazon Resource Name (ARN) of the created engagement. + /// The Amazon Resource Name (ARN) of the created Engagement. public let arn: String? - /// The date and time when the engagement was created. + /// The date and time when the Engagement was created. @OptionalCustomCoding public var createdAt: Date? - /// The AWS account ID of the engagement creator. + /// The AWS Account ID of the Engagement creator. public let createdBy: String? - /// The unique identifier for the engagement. + /// The unique identifier for the Engagement. public let id: String? - /// The number of members in the engagement. + /// The number of members in the Engagement. public let memberCount: Int? - /// The title of the engagement. + /// The title of the Engagement. public let title: String? @inlinable @@ -2065,7 +2071,7 @@ extension PartnerCentralSelling { public let amount: String /// Indicates the currency in which the revenue estimate is provided. This helps in understanding the financial impact across different markets. public let currencyCode: CurrencyCode - /// A URL providing additional information or context about the spend estimation. + /// A URL providing additional information or context about the spend estimation. public let estimationUrl: String? /// Indicates how frequently the customer is expected to spend the projected amount. This can include values such as Monthly, Quarterly, or Annually. The default value is Monthly, representing recurring monthly spend. public let frequency: PaymentFrequency @@ -2206,14 +2212,13 @@ extension PartnerCentralSelling { public let arn: String? /// Indicates the catalog from which the engagement invitation details are retrieved. This field helps in identifying the appropriate catalog (e.g., AWS or Sandbox) used in the request. public let catalog: String - /// The description of the engagement associated with this invitation. + /// The description of the engagement associated with this invitation. public let engagementDescription: String? - /// The identifier of the engagement associated with this invitation.This ID links the invitation to its corresponding engagement. + /// The identifier of the engagement associated with this invitation.This ID links the invitation to its corresponding engagement. public let engagementId: String? /// The title of the engagement invitation, summarizing the purpose or objectives of the opportunity shared by AWS. public let engagementTitle: String? - /// A list of active members currently part of the Engagement. This array contains a maximum of 10 members, each represented by an object with the following properties. - /// CompanyName: The name of the member's company. WebsiteUrl: The website URL of the member's company. + /// A list of active members currently part of the Engagement. This array contains a maximum of 10 members, each represented by an object with the following properties. CompanyName: The name of the member's company. WebsiteUrl: The website URL of the member's company. public let existingMembers: [EngagementMemberSummary]? /// Indicates the date on which the engagement invitation will expire if not accepted by the partner. @OptionalCustomCoding @@ -2223,7 +2228,7 @@ extension PartnerCentralSelling { /// The date when the engagement invitation was sent to the partner. @OptionalCustomCoding public var invitationDate: Date? - /// The message sent to the invited partner when the invitation was created. + /// The message sent to the invited partner when the invitation was created. public let invitationMessage: String? /// Details of the engagement invitation payload, including specific data relevant to the invitation's contents, such as customer information and opportunity insights. public let payload: Payload? @@ -2283,9 +2288,9 @@ extension PartnerCentralSelling { } public struct GetEngagementRequest: AWSEncodableShape { - /// Specifies the catalog related to the engagement request. Valid values are AWS and Sandbox. + /// Specifies the catalog related to the engagement request. Valid values are AWS and Sandbox. public let catalog: String - /// Specifies the identifier of the Engagement record to retrieve. + /// Specifies the identifier of the Engagement record to retrieve. public let identifier: String @inlinable @@ -2306,22 +2311,22 @@ extension PartnerCentralSelling { } public struct GetEngagementResponse: AWSDecodableShape { - /// The Amazon Resource Name (ARN) of the engagement retrieved. + /// The Amazon Resource Name (ARN) of the engagement retrieved. public let arn: String? - /// A list of context objects associated with the engagement. Each context provides additional information related to the Engagement, such as customer projects or documents. + /// A list of context objects associated with the engagement. Each context provides additional information related to the Engagement, such as customer projects or documents. public let contexts: [EngagementContextDetails]? - /// The date and time when the Engagement was created, presented in ISO 8601 format (UTC). For example: "2023-05-01T20:37:46Z". This timestamp helps track the lifecycle of the Engagement. + /// The date and time when the Engagement was created, presented in ISO 8601 format (UTC). For example: "2023-05-01T20:37:46Z". This timestamp helps track the lifecycle of the Engagement. @OptionalCustomCoding public var createdAt: Date? - /// The AWS account ID of the user who originally created the engagement. This field helps in tracking the origin of the engagement. + /// The AWS account ID of the user who originally created the engagement. This field helps in tracking the origin of the engagement. public let createdBy: String? - /// A more detailed description of the engagement. This provides additional context or information about the engagement's purpose or scope. + /// A more detailed description of the engagement. This provides additional context or information about the engagement's purpose or scope. public let description: String? - /// The unique resource identifier of the engagement retrieved. + /// The unique resource identifier of the engagement retrieved. public let id: String? - /// Specifies the current count of members participating in the Engagement. This count includes all active members regardless of their roles or permissions within the Engagement. + /// Specifies the current count of members participating in the Engagement. This count includes all active members regardless of their roles or permissions within the Engagement. public let memberCount: Int? - /// The title of the engagement. It provides a brief, descriptive name for the engagement that is meaningful and easily recognizable. + /// The title of the engagement. It provides a brief, descriptive name for the engagement that is meaningful and easily recognizable. public let title: String? @inlinable @@ -2398,7 +2403,7 @@ extension PartnerCentralSelling { public let opportunityType: OpportunityType? /// Specifies the opportunity's unique identifier in the partner's CRM system. This value is essential to track and reconcile because it's included in the outbound payload sent back to the partner. public let partnerOpportunityIdentifier: String? - /// Identifies the type of support the partner needs from Amazon Web Services. Valid values: Cosell—Architectural Validation: Confirmation from Amazon Web Services that the partner's proposed solution architecture is aligned with Amazon Web Services best practices and poses minimal architectural risks. Cosell—Business Presentation: Request Amazon Web Services seller's participation in a joint customer presentation. Cosell—Competitive Information: Access to Amazon Web Services competitive resources and support for the partner's proposed solution. Cosell—Pricing Assistance: Connect with an Amazon Web Services seller for support situations where a partner may be receiving an upfront discount on a service (for example: EDP deals). Cosell—Technical Consultation: Connect with an Amazon Web Services Solutions Architect to address the partner's questions about the proposed solution. Cosell—Total Cost of Ownership Evaluation: Assistance with quoting different cost savings of proposed solutions on Amazon Web Services versus on-premises or a traditional hosting environment. Cosell—Deal Support: Request Amazon Web Services seller's support to progress the opportunity (for example: joint customer call, strategic positioning). Cosell—Support for Public Tender/RFx: Opportunity related to the public sector where the partner needs Amazon Web Services RFx support. Do Not Need Support from Amazon Web Services Sales Rep: Indicates that a partner doesn't need support from an Amazon Web Services sales representative, and the partner solely manages the opportunity. It's possible to request coselling support on these opportunities at any stage during their lifecycle. Also known as, for-visibility-only (FVO) opportunity. + /// Identifies the type of support the partner needs from Amazon Web Services. Valid values: Cosell—Architectural Validation: Confirmation from Amazon Web Services that the partner's proposed solution architecture is aligned with Amazon Web Services best practices and poses minimal architectural risks. Cosell—Business Presentation: Request Amazon Web Services seller's participation in a joint customer presentation. Cosell—Competitive Information: Access to Amazon Web Services competitive resources and support for the partner's proposed solution. Cosell—Pricing Assistance: Connect with an Amazon Web Services seller for support situations where a partner may be receiving an upfront discount on a service (for example: EDP deals). Cosell—Technical Consultation: Connect with an Amazon Web Services Solutions Architect to address the partner's questions about the proposed solution. Cosell—Total Cost of Ownership Evaluation: Assistance with quoting different cost savings of proposed solutions on Amazon Web Services versus on-premises or a traditional hosting environment. Cosell—Deal Support: Request Amazon Web Services seller's support to progress the opportunity (for example: joint customer call, strategic positioning). Cosell—Support for Public Tender/RFx: Opportunity related to the public sector where the partner needs Amazon Web Services RFx support. public let primaryNeedsFromAws: [PrimaryNeedFromAws]? /// An object that contains project details summary for the Opportunity. public let project: Project? @@ -2448,10 +2453,9 @@ extension PartnerCentralSelling { } public struct GetResourceSnapshotJobRequest: AWSEncodableShape { - /// Specifies the catalog related to the request. Valid values are: - /// AWS: Retrieves the snapshot job from the production AWS environment. Sandbox: Retrieves the snapshot job from a sandbox environment used for testing or development purposes. + /// Specifies the catalog related to the request. Valid values are: AWS: Retrieves the snapshot job from the production AWS environment. Sandbox: Retrieves the snapshot job from a sandbox environment used for testing or development purposes. public let catalog: String - /// The unique identifier of the resource snapshot job to be retrieved. This identifier is crucial for pinpointing the specific job you want to query. + /// The unique identifier of the resource snapshot job to be retrieved. This identifier is crucial for pinpointing the specific job you want to query. public let resourceSnapshotJobIdentifier: String @inlinable @@ -2472,32 +2476,31 @@ extension PartnerCentralSelling { } public struct GetResourceSnapshotJobResponse: AWSDecodableShape { - /// he Amazon Resource Name (ARN) of the snapshot job. This globally unique identifier can be used for resource-specific operations across AWS services. + /// The Amazon Resource Name (ARN) of the snapshot job. This globally unique identifier can be used for resource-specific operations across AWS services. public let arn: String? - /// The catalog in which the snapshot job was created. This will match the catalog specified in the request. + /// The catalog in which the snapshot job was created. This will match the Catalog specified in the request. public let catalog: String - /// The date and time when the snapshot job was created, in ISO 8601 format (UTC). Example: "2023-05-01T20:37:46Z" + /// The date and time when the snapshot job was created in ISO 8601 format (UTC). Example: "2023-05-01T20:37:46Z" @OptionalCustomCoding public var createdAt: Date? - /// The identifier of the engagement associated with this snapshot job. This links the job to a specific engagement context. + /// The identifier of the engagement associated with this snapshot job. This links the job to a specific engagement context. public let engagementId: String? - /// The unique identifier of the snapshot job. This matches the ResourceSnapshotJobIdentifier provided in the request. + /// The unique identifier of the snapshot job. This matches the ResourceSnapshotJobIdentifier provided in the request. public let id: String? - /// If the job has encountered any failures, this field contains the error message from the most recent failure. This can be useful for troubleshooting issues with the job. + /// If the job has encountered any failures, this field contains the error message from the most recent failure. This can be useful for troubleshooting issues with the job. public let lastFailure: String? - /// The date and time of the last successful execution of the job, in ISO 8601 format (UTC). Example: "2023-05-01T20:37:46Z" + /// The date and time of the last successful execution of the job, in ISO 8601 format (UTC). Example: "2023-05-01T20:37:46Z" @OptionalCustomCoding public var lastSuccessfulExecutionDate: Date? - /// The Amazon Resource Name (ARN) of the resource being snapshotted. This provides a globally unique identifier for the resource across AWS. + /// The Amazon Resource Name (ARN) of the resource being snapshotted. This provides a globally unique identifier for the resource across AWS. public let resourceArn: String? - /// The identifier of the specific resource being snapshotted. The format may vary depending on the ResourceType. + /// The identifier of the specific resource being snapshotted. The format might vary depending on the ResourceType. public let resourceId: String? - /// The name of the template used for creating the snapshot. This is the same as the template name. It defines the structure and content of the snapshot. + /// The name of the template used for creating the snapshot. This is the same as the template name. It defines the structure and content of the snapshot. public let resourceSnapshotTemplateName: String? - /// The type of resource being snapshotted. This would have Opportunity as a value as it is dependent on the supported resource type. + /// The type of resource being snapshotted. This would have "Opportunity" as a value as it is dependent on the supported resource type. public let resourceType: ResourceType? - /// The current status of the snapshot job. Valid values: - /// STOPPED: The job is not currently running. RUNNING: The job is actively executing. + /// The current status of the snapshot job. Valid values: STOPPED: The job is not currently running. RUNNING: The job is actively executing. public let status: ResourceSnapshotJobStatus? @inlinable @@ -2575,7 +2578,7 @@ extension PartnerCentralSelling { } public struct GetResourceSnapshotResponse: AWSDecodableShape { - /// The Amazon Resource Name (ARN) of the snapshot. This globally unique identifier can be used for resource-specific operations across AWS services. + /// The Amazon Resource Name (ARN) that uniquely identifies the resource snapshot. public let arn: String? /// The catalog in which the snapshot was created. Matches the Catalog specified in the request. public let catalog: String @@ -2587,13 +2590,13 @@ extension PartnerCentralSelling { /// The identifier of the engagement associated with this snapshot. Matches the EngagementIdentifier specified in the request. public let engagementId: String? public let payload: ResourceSnapshotPayload? - /// The identifier of the specific resource that was snapshotted. Matches the ResourceIdentifier specified in the request. + /// The identifier of the specific resource that was snapshotted. Matches the ResourceIdentifier specified in the request. public let resourceId: String? - /// The name of the view used for this snapshot. This is the same as the template name. + /// The name of the view used for this snapshot. This is the same as the template name. public let resourceSnapshotTemplateName: String? - /// The type of the resource that was snapshotted. Matches the ResourceType specified in the request. + /// The type of the resource that was snapshotted. Matches the ResourceType specified in the request. public let resourceType: ResourceType? - /// The revision number of this snapshot. This is a positive integer that is sequential and unique within the context of a resource view. + /// The revision number of this snapshot. This is a positive integer that is sequential and unique within the context of a resource view. public let revision: Int? @inlinable @@ -2865,8 +2868,7 @@ extension PartnerCentralSelling { } public struct ListEngagementByAcceptingInvitationTasksRequest: AWSEncodableShape { - /// Specifies the catalog related to the request. Valid values are: - /// AWS: Retrieves the request from the production AWS environment. Sandbox: Retrieves the request from a sandbox environment used for testing or development purposes. + /// Specifies the catalog related to the request. Valid values are: AWS: Retrieves the request from the production AWS environment. Sandbox: Retrieves the request from a sandbox environment used for testing or development purposes. public let catalog: String /// Filters tasks by the identifiers of the engagement invitations they are processing. public let engagementInvitationIdentifier: [String]? @@ -2951,7 +2953,7 @@ extension PartnerCentralSelling { public struct ListEngagementFromOpportunityTaskSummary: AWSDecodableShape { /// The unique identifier of the engagement created as a result of the task. This field is populated when the task is completed successfully. public let engagementId: String? - /// The unique identifier of the engagement identifier created as a result of the task. This field is populated when the task is completed successfully. + /// The unique identifier of the Engagement Invitation. public let engagementInvitationId: String? /// A detailed message providing additional information about the task, especially useful in case of failures. This field may contain error details or other relevant information about the task's execution public let message: String? @@ -3000,8 +3002,7 @@ extension PartnerCentralSelling { } public struct ListEngagementFromOpportunityTasksRequest: AWSEncodableShape { - /// Specifies the catalog related to the request. Valid values are: - /// AWS: Retrieves the request from the production AWS environment. Sandbox: Retrieves the request from a sandbox environment used for testing or development purposes. + /// Specifies the catalog related to the request. Valid values are: AWS: Retrieves the request from the production AWS environment. Sandbox: Retrieves the request from a sandbox environment used for testing or development purposes. public let catalog: String /// Filters tasks by the identifiers of the engagements they created or are associated with. public let engagementIdentifier: [String]? @@ -3164,13 +3165,13 @@ extension PartnerCentralSelling { } public struct ListEngagementMembersRequest: AWSEncodableShape { - /// The catalog related to the request. + /// The catalog related to the request. public let catalog: String - /// Identifier of the engagement record to retrieve members from. + /// Identifier of the Engagement record to retrieve members from. public let identifier: String - /// The maximum number of results to return in a single call. + /// The maximum number of results to return in a single call. public let maxResults: Int? - /// The token for the next set of results. + /// The token for the next set of results. public let nextToken: String? @inlinable @@ -3197,9 +3198,9 @@ extension PartnerCentralSelling { } public struct ListEngagementMembersResponse: AWSDecodableShape { - /// Provides a list of engagement members. + /// Provides a list of engagement members. public let engagementMemberList: [EngagementMember] - /// A pagination token used to retrieve the next set of results. If there are more results available than can be returned in a single response, this token will be present. Use this token in a subsequent request to retrieve the next page of results. If there are no more results, this value will be null. + /// A pagination token used to retrieve the next set of results. If there are more results available than can be returned in a single response, this token will be present. Use this token in a subsequent request to retrieve the next page of results. If there are no more results, this value will be null. public let nextToken: String? @inlinable @@ -3215,17 +3216,17 @@ extension PartnerCentralSelling { } public struct ListEngagementResourceAssociationsRequest: AWSEncodableShape { - /// Specifies the catalog in which to search for engagement-resource associations. + /// Specifies the catalog in which to search for engagement-resource associations. Valid Values: "AWS" or "Sandbox" AWS for production environments. Sandbox for testing and development purposes. public let catalog: String - /// Filters the results to include only associations with resources owned by the specified AWS account. Use this when you want to find associations related to resources owned by a particular account. + /// Filters the response to include only snapshots of resources owned by the specified AWS account ID. Use this when you want to find associations related to resources owned by a particular account. public let createdBy: String? - /// Filters the results to include only associations related to the specified engagement. Use this when you want to find all resources associated with a specific engagement. + /// Filters the results to include only associations related to the specified engagement. Use this when you want to find all resources associated with a specific engagement. public let engagementIdentifier: String? - /// Limits the number of results returned in a single call. Use this to control the number of results returned, especially useful for pagination. + /// Limits the number of results returned in a single call. Use this to control the number of results returned, especially useful for pagination. public let maxResults: Int? - /// A token used for pagination of results. Include this token in subsequent requests to retrieve the next set of results. + /// A token used for pagination of results. Include this token in subsequent requests to retrieve the next set of results. public let nextToken: String? - /// Filters the results to include only associations with the specified resource. Varies depending on the resource type. Use this when you want to find all engagements associated with a specific resource. + /// Filters the results to include only associations with the specified resource. Varies depending on the resource type. Use this when you want to find all engagements associated with a specific resource. public let resourceIdentifier: String? /// Filters the results to include only associations with resources of the specified type. public let resourceType: ResourceType? @@ -3284,13 +3285,13 @@ extension PartnerCentralSelling { public let catalog: String /// A list of AWS account IDs. When specified, the response includes engagements created by these accounts. This filter is useful for finding engagements created by specific team members. public let createdBy: [String]? - /// An array of strings representing engagement identifiers to retrieve. + /// An array of strings representing engagement identifiers to retrieve. public let engagementIdentifier: [String]? - /// An array of strings representing AWS Account IDs. Use this to exclude engagements created by specific users. + /// An array of strings representing AWS Account IDs. Use this to exclude engagements created by specific users. public let excludeCreatedBy: [String]? - /// The maximum number of results to return in a single call. + /// The maximum number of results to return in a single call. public let maxResults: Int? - /// The token for the next set of results. This value is returned from a previous call. + /// The token for the next set of results. This value is returned from a previous call. public let nextToken: String? /// An object that specifies the sort order of the results. public let sort: EngagementSort? @@ -3339,9 +3340,9 @@ extension PartnerCentralSelling { } public struct ListEngagementsResponse: AWSDecodableShape { - /// An array of engagement summary objects. + /// An array of engagement summary objects. public let engagementSummaryList: [EngagementSummary] - /// The token to retrieve the next set of results. This field will be null if there are no more results. + /// The token to retrieve the next set of results. This field will be null if there are no more results. public let nextToken: String? @inlinable @@ -3491,7 +3492,7 @@ extension PartnerCentralSelling { public struct ListResourceSnapshotsRequest: AWSEncodableShape { /// Specifies the catalog related to the request. public let catalog: String - /// Filters the response to include only snapshots of resources created by the specified AWS account. + /// Filters the response to include only snapshots of resources owned by the specified AWS account. public let createdBy: String? /// The unique identifier of the engagement associated with the snapshots. public let engagementIdentifier: String @@ -3501,7 +3502,7 @@ extension PartnerCentralSelling { public let nextToken: String? /// Filters the response to include only snapshots of the specified resource. public let resourceIdentifier: String? - /// Filters the response to include only snapshots created using the specified template. + /// Filters the response to include only snapshots created using the specified template. public let resourceSnapshotTemplateIdentifier: String? /// Filters the response to include only snapshots of the specified resource type. public let resourceType: ResourceType? @@ -3623,6 +3624,40 @@ extension PartnerCentralSelling { } } + public struct ListTagsForResourceRequest: AWSEncodableShape { + /// The Amazon Resource Name (ARN) of the resource for which you want to retrieve tags. + public let resourceArn: String + + @inlinable + public init(resourceArn: String) { + self.resourceArn = resourceArn + } + + public func validate(name: String) throws { + try self.validate(self.resourceArn, name: "resourceArn", parent: name, max: 1000) + try self.validate(self.resourceArn, name: "resourceArn", parent: name, min: 1) + try self.validate(self.resourceArn, name: "resourceArn", parent: name, pattern: "^arn:[\\w+=/,.@-]+:partnercentral:[\\w+=/,.@-]*:[0-9]{12}:catalog/([a-zA-Z]+)/[\\w+=,.@-]+(/[\\w+=,.@-]+)*$") + } + + private enum CodingKeys: String, CodingKey { + case resourceArn = "ResourceArn" + } + } + + public struct ListTagsForResourceResponse: AWSDecodableShape { + /// A map of the key-value pairs for the tag or tags assigned to the specified resource. + public let tags: [Tag] + + @inlinable + public init(tags: [Tag]) { + self.tags = tags + } + + private enum CodingKeys: String, CodingKey { + case tags = "Tags" + } + } + public struct ListTasksSortBase: AWSEncodableShape { /// Specifies the field by which the task list should be sorted. public let sortBy: ListTasksSortName @@ -4134,11 +4169,11 @@ extension PartnerCentralSelling { public struct ResourceSnapshotJobSummary: AWSDecodableShape { /// The Amazon Resource Name (ARN) for the resource snapshot job. public let arn: String? - /// The unique identifier for the engagement within the AWS Partner Central system. This ID is used for direct references to the engagement within the service. + /// The unique identifier of the Engagement. public let engagementId: String? /// The unique identifier for the resource snapshot job within the AWS Partner Central system. This ID is used for direct references to the job within the service. public let id: String? - /// Represents the current status of the resource snapshot job. + /// The current status of the snapshot job. Valid values: STOPPED: The job is not currently running. RUNNING: The job is actively executing. public let status: ResourceSnapshotJobStatus? @inlinable @@ -4160,7 +4195,7 @@ extension PartnerCentralSelling { public struct ResourceSnapshotSummary: AWSDecodableShape { /// The Amazon Resource Name (ARN) of the snapshot. This globally unique identifier can be used for cross-service references and in IAM policies. public let arn: String? - /// The AWS account ID of the principal (user or role) who created the snapshot. This helps in tracking the origin of the snapshot. + /// The AWS account ID of the entity that owns the resource from which the snapshot was created. public let createdBy: String? /// The identifier of the specific resource snapshotted. The format might vary depending on the ResourceType. public let resourceId: String? @@ -4168,7 +4203,7 @@ extension PartnerCentralSelling { public let resourceSnapshotTemplateName: String? /// The type of resource snapshotted. public let resourceType: ResourceType? - /// The revision number of the snapshot. This integer value is incremented each time the snapshot is updated, allowing for version tracking of the resource snapshot. + /// The revision number of the snapshot. This integer value is incremented each time the snapshot is updated, allowing for version tracking of the resource snapshot. public let revision: Int? @inlinable @@ -4344,12 +4379,15 @@ extension PartnerCentralSelling { public let clientToken: String /// Specifies the unique identifier of the EngagementInvitation to be accepted. Providing the correct identifier helps ensure that the correct engagement is processed. public let identifier: String + /// A list of objects specifying each tag name and value. + public let tags: [Tag]? @inlinable - public init(catalog: String, clientToken: String = StartEngagementByAcceptingInvitationTaskRequest.idempotencyToken(), identifier: String) { + public init(catalog: String, clientToken: String = StartEngagementByAcceptingInvitationTaskRequest.idempotencyToken(), identifier: String, tags: [Tag]? = nil) { self.catalog = catalog self.clientToken = clientToken self.identifier = identifier + self.tags = tags } public func validate(name: String) throws { @@ -4358,12 +4396,18 @@ extension PartnerCentralSelling { try self.validate(self.identifier, name: "identifier", parent: name, max: 255) try self.validate(self.identifier, name: "identifier", parent: name, min: 1) try self.validate(self.identifier, name: "identifier", parent: name, pattern: "^(arn:.*|engi-[0-9a-z]{13})$") + try self.tags?.forEach { + try $0.validate(name: "\(name).tags[]") + } + try self.validate(self.tags, name: "tags", parent: name, max: 200) + try self.validate(self.tags, name: "tags", parent: name, min: 1) } private enum CodingKeys: String, CodingKey { case catalog = "Catalog" case clientToken = "ClientToken" case identifier = "Identifier" + case tags = "Tags" } } @@ -4376,7 +4420,7 @@ extension PartnerCentralSelling { public let opportunityId: String? /// Indicates the reason for task failure using an enumerated code. public let reasonCode: ReasonCode? - /// The identifier of the resource snapshot job created as part of this task. + /// The identifier of the Resource Snapshot Job created as part of this task. public let resourceSnapshotJobId: String? /// The timestamp indicating when the task was initiated. The format follows RFC 3339 section 5.6. @OptionalCustomCoding @@ -4422,19 +4466,27 @@ extension PartnerCentralSelling { public let clientToken: String /// The unique identifier of the opportunity from which the engagement task is to be initiated. This helps ensure that the task is applied to the correct opportunity. public let identifier: String + /// A list of objects specifying each tag name and value. + public let tags: [Tag]? @inlinable - public init(awsSubmission: AwsSubmission, catalog: String, clientToken: String = StartEngagementFromOpportunityTaskRequest.idempotencyToken(), identifier: String) { + public init(awsSubmission: AwsSubmission, catalog: String, clientToken: String = StartEngagementFromOpportunityTaskRequest.idempotencyToken(), identifier: String, tags: [Tag]? = nil) { self.awsSubmission = awsSubmission self.catalog = catalog self.clientToken = clientToken self.identifier = identifier + self.tags = tags } public func validate(name: String) throws { try self.validate(self.catalog, name: "catalog", parent: name, pattern: "^[a-zA-Z]+$") try self.validate(self.clientToken, name: "clientToken", parent: name, pattern: "^[!-~]{1,64}$") try self.validate(self.identifier, name: "identifier", parent: name, pattern: "^O[0-9]{1,19}$") + try self.tags?.forEach { + try $0.validate(name: "\(name).tags[]") + } + try self.validate(self.tags, name: "tags", parent: name, max: 200) + try self.validate(self.tags, name: "tags", parent: name, min: 1) } private enum CodingKeys: String, CodingKey { @@ -4442,13 +4494,14 @@ extension PartnerCentralSelling { case catalog = "Catalog" case clientToken = "ClientToken" case identifier = "Identifier" + case tags = "Tags" } } public struct StartEngagementFromOpportunityTaskResponse: AWSDecodableShape { - /// The identifier of the newly created engagement. Only populated if TaskStatus is COMPLETE. + /// The identifier of the newly created Engagement. Only populated if TaskStatus is COMPLETE. public let engagementId: String? - /// The identifier of the new engagement invitation. Only populated if TaskStatus is COMPLETE. + /// The identifier of the new Engagement invitation. Only populated if TaskStatus is COMPLETE. public let engagementInvitationId: String? /// If the task fails, this field contains a detailed message describing the failure and possible recovery steps. public let message: String? @@ -4456,7 +4509,7 @@ extension PartnerCentralSelling { public let opportunityId: String? /// Indicates the reason for task failure using an enumerated code. public let reasonCode: ReasonCode? - /// The identifier of the resource snapshot job created to add the opportunity resource snapshot to the Engagement. Only populated if TaskStatus is COMPLETE. + /// The identifier of the resource snapshot job created to add the opportunity resource snapshot to the Engagement. Only populated if TaskStatus is COMPLETE public let resourceSnapshotJobId: String? /// The timestamp indicating when the task was initiated. The format follows RFC 3339 section 5.6. @OptionalCustomCoding @@ -4497,9 +4550,9 @@ extension PartnerCentralSelling { } public struct StartResourceSnapshotJobRequest: AWSEncodableShape { - /// Specifies the catalog related to the request. + /// Specifies the catalog related to the request. Valid values are: AWS: Starts the request from the production AWS environment. Sandbox: Starts the request from a sandbox environment used for testing or development purposes. public let catalog: String - /// The identifier of the resource snapshot job to start. + /// The identifier of the resource snapshot job to start. public let resourceSnapshotJobIdentifier: String @inlinable @@ -4520,9 +4573,9 @@ extension PartnerCentralSelling { } public struct StopResourceSnapshotJobRequest: AWSEncodableShape { - /// Specifies the catalog related to the request. + /// Specifies the catalog related to the request. Valid values are: AWS: Stops the request from the production AWS environment. Sandbox: Stops the request from a sandbox environment used for testing or development purposes. public let catalog: String - /// The identifier of the job to stop. + /// The identifier of the job to stop. public let resourceSnapshotJobIdentifier: String @inlinable @@ -4543,13 +4596,13 @@ extension PartnerCentralSelling { } public struct SubmitOpportunityRequest: AWSEncodableShape { - /// Specifies the catalog related to the request. + /// Specifies the catalog related to the request. Valid values are: AWS: Submits the opportunity request from the production AWS environment. Sandbox: Submits the opportunity request from a sandbox environment used for testing or development purposes. public let catalog: String - /// The identifier of the opportunity previously created by partner and needs to be submitted. + /// The identifier of the Opportunity previously created by partner and needs to be submitted. public let identifier: String - /// Specifies the level of AWS sellers' involvement on the opportunity. + /// Specifies the level of AWS sellers' involvement on the opportunity. Valid values: Co-sell: Indicates the user wants to co-sell with AWS. Share the opportunity with AWS to receive deal assistance and support. For Visibility Only: Indicates that the user does not need support from AWS Sales Rep. Share this opportunity with AWS for visibility only, you will not receive deal assistance and support. public let involvementType: SalesInvolvementType - /// Determines whether to restrict visibility of the opportunity from AWS sales. Default value is Full. + /// Determines whether to restrict visibility of the opportunity from AWS sales. Default value is Full. Valid values: Full: The opportunity is fully visible to AWS sales. Limited: The opportunity has restricted visibility to AWS sales. public let visibility: Visibility? @inlinable @@ -4573,6 +4626,100 @@ extension PartnerCentralSelling { } } + public struct Tag: AWSEncodableShape & AWSDecodableShape { + /// The key in the tag. + public let key: String + /// The value in the tag. + public let value: String + + @inlinable + public init(key: String, value: String) { + self.key = key + self.value = value + } + + public func validate(name: String) throws { + try self.validate(self.key, name: "key", parent: name, max: 128) + try self.validate(self.key, name: "key", parent: name, min: 1) + try self.validate(self.key, name: "key", parent: name, pattern: "^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)$") + try self.validate(self.value, name: "value", parent: name, max: 256) + try self.validate(self.value, name: "value", parent: name, pattern: "^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)$") + } + + private enum CodingKeys: String, CodingKey { + case key = "Key" + case value = "Value" + } + } + + public struct TagResourceRequest: AWSEncodableShape { + /// The Amazon Resource Name (ARN) of the resource that you want to tag. + public let resourceArn: String + /// A map of the key-value pairs of the tag or tags to assign to the resource. + public let tags: [Tag] + + @inlinable + public init(resourceArn: String, tags: [Tag]) { + self.resourceArn = resourceArn + self.tags = tags + } + + public func validate(name: String) throws { + try self.validate(self.resourceArn, name: "resourceArn", parent: name, max: 1000) + try self.validate(self.resourceArn, name: "resourceArn", parent: name, min: 1) + try self.validate(self.resourceArn, name: "resourceArn", parent: name, pattern: "^arn:[\\w+=/,.@-]+:partnercentral:[\\w+=/,.@-]*:[0-9]{12}:catalog/([a-zA-Z]+)/[\\w+=,.@-]+(/[\\w+=,.@-]+)*$") + try self.tags.forEach { + try $0.validate(name: "\(name).tags[]") + } + try self.validate(self.tags, name: "tags", parent: name, max: 200) + try self.validate(self.tags, name: "tags", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case resourceArn = "ResourceArn" + case tags = "Tags" + } + } + + public struct TagResourceResponse: AWSDecodableShape { + public init() {} + } + + public struct UntagResourceRequest: AWSEncodableShape { + /// The Amazon Resource Name (ARN) of the resource that you want to untag. + public let resourceArn: String + /// The keys of the key-value pairs for the tag or tags you want to remove from the specified resource. + public let tagKeys: [String] + + @inlinable + public init(resourceArn: String, tagKeys: [String]) { + self.resourceArn = resourceArn + self.tagKeys = tagKeys + } + + public func validate(name: String) throws { + try self.validate(self.resourceArn, name: "resourceArn", parent: name, max: 1000) + try self.validate(self.resourceArn, name: "resourceArn", parent: name, min: 1) + try self.validate(self.resourceArn, name: "resourceArn", parent: name, pattern: "^arn:[\\w+=/,.@-]+:partnercentral:[\\w+=/,.@-]*:[0-9]{12}:catalog/([a-zA-Z]+)/[\\w+=,.@-]+(/[\\w+=,.@-]+)*$") + try self.tagKeys.forEach { + try validate($0, name: "tagKeys[]", parent: name, max: 128) + try validate($0, name: "tagKeys[]", parent: name, min: 1) + try validate($0, name: "tagKeys[]", parent: name, pattern: "^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)$") + } + try self.validate(self.tagKeys, name: "tagKeys", parent: name, max: 50) + try self.validate(self.tagKeys, name: "tagKeys", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case resourceArn = "ResourceArn" + case tagKeys = "TagKeys" + } + } + + public struct UntagResourceResponse: AWSDecodableShape { + public init() {} + } + public struct UpdateOpportunityRequest: AWSEncodableShape { /// Specifies the catalog associated with the request. This field takes a string value from a predefined list: AWS or Sandbox. The catalog determines which environment the opportunity is updated in. Use AWS to update real opportunities in the production environment, and Sandbox for testing in secure, isolated environments. When you use the Sandbox catalog, it allows you to simulate and validate your interactions with Amazon Web Services services without affecting live data or operations. public let catalog: String @@ -4593,7 +4740,7 @@ extension PartnerCentralSelling { public let opportunityType: OpportunityType? /// Specifies the opportunity's unique identifier in the partner's CRM system. This value is essential to track and reconcile because it's included in the outbound payload sent back to the partner. public let partnerOpportunityIdentifier: String? - /// Identifies the type of support the partner needs from Amazon Web Services. Valid values: Cosell—Architectural Validation: Confirmation from Amazon Web Services that the partner's proposed solution architecture is aligned with Amazon Web Services best practices and poses minimal architectural risks. Cosell—Business Presentation: Request Amazon Web Services seller's participation in a joint customer presentation. Cosell—Competitive Information: Access to Amazon Web Services competitive resources and support for the partner's proposed solution. Cosell—Pricing Assistance: Connect with an AWS seller for support situations where a partner may be receiving an upfront discount on a service (for example: EDP deals). Cosell—Technical Consultation: Connection with an Amazon Web Services Solutions Architect to address the partner's questions about the proposed solution. Cosell—Total Cost of Ownership Evaluation: Assistance with quoting different cost savings of proposed solutions on Amazon Web Services versus on-premises or a traditional hosting environment. Cosell—Deal Support: Request Amazon Web Services seller's support to progress the opportunity (for example: joint customer call, strategic positioning). Cosell—Support for Public Tender/RFx: Opportunity related to the public sector where the partner needs RFx support from Amazon Web Services. Do Not Need Support from AWS Sales Rep: Indicates that a partner doesn't need support from an Amazon Web Services Sales representative. The opportunity is managed solely by the partner. It's possible to request coselling support on these opportunities at any stage during their lifecycle. Also known as, for-visibility-only (FVO) opportunity. + /// Identifies the type of support the partner needs from Amazon Web Services. Valid values: Cosell—Architectural Validation: Confirmation from Amazon Web Services that the partner's proposed solution architecture is aligned with Amazon Web Services best practices and poses minimal architectural risks. Cosell—Business Presentation: Request Amazon Web Services seller's participation in a joint customer presentation. Cosell—Competitive Information: Access to Amazon Web Services competitive resources and support for the partner's proposed solution. Cosell—Pricing Assistance: Connect with an AWS seller for support situations where a partner may be receiving an upfront discount on a service (for example: EDP deals). Cosell—Technical Consultation: Connection with an Amazon Web Services Solutions Architect to address the partner's questions about the proposed solution. Cosell—Total Cost of Ownership Evaluation: Assistance with quoting different cost savings of proposed solutions on Amazon Web Services versus on-premises or a traditional hosting environment. Cosell—Deal Support: Request Amazon Web Services seller's support to progress the opportunity (for example: joint customer call, strategic positioning). Cosell—Support for Public Tender/RFx: Opportunity related to the public sector where the partner needs RFx support from Amazon Web Services. public let primaryNeedsFromAws: [PrimaryNeedFromAws]? /// An object that contains project details summary for the Opportunity. public let project: Project? @@ -4661,7 +4808,7 @@ extension PartnerCentralSelling { } public struct EngagementContextPayload: AWSEncodableShape & AWSDecodableShape { - /// Contains detailed information about a customer project when the context type is "CustomerProject". This field is present only when the Type in EngagementContextDetails is set to "CustomerProject". + /// Contains detailed information about a customer project when the context type is "CustomerProject". This field is present only when the Type in EngagementContextDetails is set to "CustomerProject". public let customerProject: CustomerProjectsContext? @inlinable diff --git a/Sources/Soto/Services/QBusiness/QBusiness_api.swift b/Sources/Soto/Services/QBusiness/QBusiness_api.swift index 98829a47b8..b5eb3cef78 100644 --- a/Sources/Soto/Services/QBusiness/QBusiness_api.swift +++ b/Sources/Soto/Services/QBusiness/QBusiness_api.swift @@ -90,6 +90,7 @@ public struct QBusiness: AWSService { "ap-southeast-3": "qbusiness.ap-southeast-3.api.aws", "ap-southeast-4": "qbusiness.ap-southeast-4.api.aws", "ap-southeast-5": "qbusiness.ap-southeast-5.api.aws", + "ap-southeast-7": "qbusiness.ap-southeast-7.api.aws", "ca-central-1": "qbusiness.ca-central-1.api.aws", "ca-west-1": "qbusiness.ca-west-1.api.aws", "cn-north-1": "qbusiness.cn-north-1.api.amazonwebservices.com.cn", @@ -105,6 +106,7 @@ public struct QBusiness: AWSService { "il-central-1": "qbusiness.il-central-1.api.aws", "me-central-1": "qbusiness.me-central-1.api.aws", "me-south-1": "qbusiness.me-south-1.api.aws", + "mx-central-1": "qbusiness.mx-central-1.api.aws", "sa-east-1": "qbusiness.sa-east-1.api.aws", "us-east-1": "qbusiness.us-east-1.api.aws", "us-east-2": "qbusiness.us-east-2.api.aws", @@ -130,6 +132,7 @@ public struct QBusiness: AWSService { "ap-southeast-3": "qbusiness-fips.ap-southeast-3.api.aws", "ap-southeast-4": "qbusiness-fips.ap-southeast-4.api.aws", "ap-southeast-5": "qbusiness-fips.ap-southeast-5.api.aws", + "ap-southeast-7": "qbusiness-fips.ap-southeast-7.api.aws", "ca-central-1": "qbusiness-fips.ca-central-1.api.aws", "ca-west-1": "qbusiness-fips.ca-west-1.api.aws", "cn-north-1": "qbusiness-fips.cn-north-1.api.amazonwebservices.com.cn", @@ -145,6 +148,7 @@ public struct QBusiness: AWSService { "il-central-1": "qbusiness-fips.il-central-1.api.aws", "me-central-1": "qbusiness-fips.me-central-1.api.aws", "me-south-1": "qbusiness-fips.me-south-1.api.aws", + "mx-central-1": "qbusiness-fips.mx-central-1.api.aws", "sa-east-1": "qbusiness-fips.sa-east-1.api.aws", "us-east-1": "qbusiness-fips.us-east-1.api.aws", "us-east-2": "qbusiness-fips.us-east-2.api.aws", diff --git a/Sources/Soto/Services/QConnect/QConnect_shapes.swift b/Sources/Soto/Services/QConnect/QConnect_shapes.swift index 654db50d88..e224a49cf9 100644 --- a/Sources/Soto/Services/QConnect/QConnect_shapes.swift +++ b/Sources/Soto/Services/QConnect/QConnect_shapes.swift @@ -1518,15 +1518,18 @@ extension QConnect { public let associationConfigurations: [AssociationConfiguration]? /// The AI Prompt identifier for the Intent Labeling prompt used by the ANSWER_RECOMMENDATION AI Agent. public let intentLabelingGenerationAIPromptId: String? + /// The locale to which specifies the language and region settings that determine the response language for QueryAssistant. Changing this locale to anything other than en_US will turn off recommendations triggered by contact transcripts for agent assistance, as this feature is not supported in multiple languages. + public let locale: String? /// The AI Prompt identifier for the Query Reformulation prompt used by the ANSWER_RECOMMENDATION AI Agent. public let queryReformulationAIPromptId: String? @inlinable - public init(answerGenerationAIGuardrailId: String? = nil, answerGenerationAIPromptId: String? = nil, associationConfigurations: [AssociationConfiguration]? = nil, intentLabelingGenerationAIPromptId: String? = nil, queryReformulationAIPromptId: String? = nil) { + public init(answerGenerationAIGuardrailId: String? = nil, answerGenerationAIPromptId: String? = nil, associationConfigurations: [AssociationConfiguration]? = nil, intentLabelingGenerationAIPromptId: String? = nil, locale: String? = nil, queryReformulationAIPromptId: String? = nil) { self.answerGenerationAIGuardrailId = answerGenerationAIGuardrailId self.answerGenerationAIPromptId = answerGenerationAIPromptId self.associationConfigurations = associationConfigurations self.intentLabelingGenerationAIPromptId = intentLabelingGenerationAIPromptId + self.locale = locale self.queryReformulationAIPromptId = queryReformulationAIPromptId } @@ -1537,6 +1540,8 @@ extension QConnect { try $0.validate(name: "\(name).associationConfigurations[]") } try self.validate(self.intentLabelingGenerationAIPromptId, name: "intentLabelingGenerationAIPromptId", parent: name, pattern: "^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}(:[A-Z0-9_$]+){0,1}$") + try self.validate(self.locale, name: "locale", parent: name, max: 4096) + try self.validate(self.locale, name: "locale", parent: name, min: 1) try self.validate(self.queryReformulationAIPromptId, name: "queryReformulationAIPromptId", parent: name, pattern: "^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}(:[A-Z0-9_$]+){0,1}$") } @@ -1545,6 +1550,7 @@ extension QConnect { case answerGenerationAIPromptId = "answerGenerationAIPromptId" case associationConfigurations = "associationConfigurations" case intentLabelingGenerationAIPromptId = "intentLabelingGenerationAIPromptId" + case locale = "locale" case queryReformulationAIPromptId = "queryReformulationAIPromptId" } } @@ -5508,7 +5514,7 @@ extension QConnect { public struct GuardrailPiiEntityConfig: AWSEncodableShape & AWSDecodableShape { /// Configure AI Guardrail's action when the PII entity is detected. public let action: GuardrailSensitiveInformationAction - /// Configure AI Guardrail type when the PII entity is detected. The following PIIs are used to block or mask sensitive information: General ADDRESS A physical address, such as "100 Main Street, Anytown, USA" or "Suite #12, Building 123". An address can include information such as the street, building, location, city, state, country, county, zip code, precinct, and neighborhood. AGE An individual's age, including the quantity and unit of time. For example, in the phrase "I am 40 years old," Guarrails recognizes "40 years" as an age. NAME An individual's name. This entity type does not include titles, such as Dr., Mr., Mrs., or Miss. AI Guardrail doesn't apply this entity type to names that are part of organizations or addresses. For example, AI Guardrail recognizes the "John Doe Organization" as an organization, and it recognizes "Jane Doe Street" as an address. EMAIL An email address, such as marymajor@email.com. PHONE A phone number. This entity type also includes fax and pager numbers. USERNAME A user name that identifies an account, such as a login name, screen name, nick name, or handle. PASSWORD An alphanumeric string that is used as a password, such as "* very20special#pass*". DRIVER_ID The number assigned to a driver's license, which is an official document permitting an individual to operate one or more motorized vehicles on a public road. A driver's license number consists of alphanumeric characters. LICENSE_PLATE A license plate for a vehicle is issued by the state or country where the vehicle is registered. The format for passenger vehicles is typically five to eight digits, consisting of upper-case letters and numbers. The format varies depending on the location of the issuing state or country. VEHICLE_IDENTIFICATION_NUMBER A Vehicle Identification Number (VIN) uniquely identifies a vehicle. VIN content and format are defined in the ISO 3779 specification. Each country has specific codes and formats for VINs. Finance REDIT_DEBIT_CARD_CVV A three-digit card verification code (CVV) that is present on VISA, MasterCard, and Discover credit and debit cards. For American Express credit or debit cards, the CVV is a four-digit numeric code. CREDIT_DEBIT_CARD_EXPIRY The expiration date for a credit or debit card. This number is usually four digits long and is often formatted as month/year or MM/YY. AI Guardrail recognizes expiration dates such as 01/21, 01/2021, and Jan 2021. CREDIT_DEBIT_CARD_NUMBER The number for a credit or debit card. These numbers can vary from 13 to 16 digits in length. However, Amazon Comprehend also recognizes credit or debit card numbers when only the last four digits are present. PIN A four-digit personal identification number (PIN) with which you can access your bank account. INTERNATIONAL_BANK_ACCOUNT_NUMBER An International Bank Account Number has specific formats in each country. For more information, see www.iban.com/structure. SWIFT_CODE A SWIFT code is a standard format of Bank Identifier Code (BIC) used to specify a particular bank or branch. Banks use these codes for money transfers such as international wire transfers. SWIFT codes consist of eight or 11 characters. The 11-digit codes refer to specific branches, while eight-digit codes (or 11-digit codes ending in 'XXX') refer to the head or primary office. IT IP_ADDRESS An IPv4 address, such as 198.51.100.0. MAC_ADDRESS A media access control (MAC) address is a unique identifier assigned to a network interface controller (NIC). URL A web address, such as www.example.com. AWS_ACCESS_KEY A unique identifier that's associated with a secret access key; you use the access key ID and secret access key to sign programmatic Amazon Web Services requests cryptographically. AWS_SECRET_KEY A unique identifier that's associated with an access key. You use the access key ID and secret access key to sign programmatic Amazon Web Services requests cryptographically. USA specific US_BANK_ACCOUNT_NUMBER A US bank account number, which is typically 10 to 12 digits long. US_BANK_ROUTING_NUMBER A US bank account routing number. These are typically nine digits long, US_INDIVIDUAL_TAX_IDENTIFICATION_NUMBER A US Individual Taxpayer Identification Number (ITIN) is a nine-digit number that starts with a "9" and contain a "7" or "8" as the fourth digit. An ITIN can be formatted with a space or a dash after the third and forth digits. US_PASSPORT_NUMBER A US passport number. Passport numbers range from six to nine alphanumeric characters. US_SOCIAL_SECURITY_NUMBER A US Social Security Number (SSN) is a nine-digit number that is issued to US citizens, permanent residents, and temporary working residents. Canada specific CA_HEALTH_NUMBER A Canadian Health Service Number is a 10-digit unique identifier, required for individuals to access healthcare benefits. CA_SOCIAL_INSURANCE_NUMBER A Canadian Social Insurance Number (SIN) is a nine-digit unique identifier, required for individuals to access government programs and benefits. The SIN is formatted as three groups of three digits, such as 123-456-789. A SIN can be validated through a simple check-digit process called the Luhn algorithm . UK Specific UK_NATIONAL_HEALTH_SERVICE_NUMBER A UK National Health Service Number is a 10-17 digit number, such as 485 555 3456. The current system formats the 10-digit number with spaces after the third and sixth digits. The final digit is an error-detecting checksum. UK_NATIONAL_INSURANCE_NUMBER A UK National Insurance Number (NINO) provides individuals with access to National Insurance (social security) benefits. It is also used for some purposes in the UK tax system. The number is nine digits long and starts with two letters, followed by six numbers and one letter. A NINO can be formatted with a space or a dash after the two letters and after the second, forth, and sixth digits. UK_UNIQUE_TAXPAYER_REFERENCE_NUMBER A UK Unique Taxpayer Reference (UTR) is a 10-digit number that identifies a taxpayer or a business. Custom Regex filter - You can use a regular expressions to define patterns for an AI Guardrail to recognize and act upon such as serial number, booking ID etc.. + /// Configure AI Guardrail type when the PII entity is detected. The following PIIs are used to block or mask sensitive information: General ADDRESS A physical address, such as "100 Main Street, Anytown, USA" or "Suite #12, Building 123". An address can include information such as the street, building, location, city, state, country, county, zip code, precinct, and neighborhood. AGE An individual's age, including the quantity and unit of time. For example, in the phrase "I am 40 years old," Guarrails recognizes "40 years" as an age. NAME An individual's name. This entity type does not include titles, such as Dr., Mr., Mrs., or Miss. AI Guardrail doesn't apply this entity type to names that are part of organizations or addresses. For example, AI Guardrail recognizes the "John Doe Organization" as an organization, and it recognizes "Jane Doe Street" as an address. EMAIL An email address, such as marymajor@email.com. PHONE A phone number. This entity type also includes fax and pager numbers. USERNAME A user name that identifies an account, such as a login name, screen name, nick name, or handle. PASSWORD An alphanumeric string that is used as a password, such as "* very20special#pass*". DRIVER_ID The number assigned to a driver's license, which is an official document permitting an individual to operate one or more motorized vehicles on a public road. A driver's license number consists of alphanumeric characters. LICENSE_PLATE A license plate for a vehicle is issued by the state or country where the vehicle is registered. The format for passenger vehicles is typically five to eight digits, consisting of upper-case letters and numbers. The format varies depending on the location of the issuing state or country. VEHICLE_IDENTIFICATION_NUMBER A Vehicle Identification Number (VIN) uniquely identifies a vehicle. VIN content and format are defined in the ISO 3779 specification. Each country has specific codes and formats for VINs. Finance CREDIT_DEBIT_CARD_CVV A three-digit card verification code (CVV) that is present on VISA, MasterCard, and Discover credit and debit cards. For American Express credit or debit cards, the CVV is a four-digit numeric code. CREDIT_DEBIT_CARD_EXPIRY The expiration date for a credit or debit card. This number is usually four digits long and is often formatted as month/year or MM/YY. AI Guardrail recognizes expiration dates such as 01/21, 01/2021, and Jan 2021. CREDIT_DEBIT_CARD_NUMBER The number for a credit or debit card. These numbers can vary from 13 to 16 digits in length. However, Amazon Comprehend also recognizes credit or debit card numbers when only the last four digits are present. PIN A four-digit personal identification number (PIN) with which you can access your bank account. INTERNATIONAL_BANK_ACCOUNT_NUMBER An International Bank Account Number has specific formats in each country. For more information, see www.iban.com/structure. SWIFT_CODE A SWIFT code is a standard format of Bank Identifier Code (BIC) used to specify a particular bank or branch. Banks use these codes for money transfers such as international wire transfers. SWIFT codes consist of eight or 11 characters. The 11-digit codes refer to specific branches, while eight-digit codes (or 11-digit codes ending in 'XXX') refer to the head or primary office. IT IP_ADDRESS An IPv4 address, such as 198.51.100.0. MAC_ADDRESS A media access control (MAC) address is a unique identifier assigned to a network interface controller (NIC). URL A web address, such as www.example.com. AWS_ACCESS_KEY A unique identifier that's associated with a secret access key; you use the access key ID and secret access key to sign programmatic Amazon Web Services requests cryptographically. AWS_SECRET_KEY A unique identifier that's associated with an access key. You use the access key ID and secret access key to sign programmatic Amazon Web Services requests cryptographically. USA specific US_BANK_ACCOUNT_NUMBER A US bank account number, which is typically 10 to 12 digits long. US_BANK_ROUTING_NUMBER A US bank account routing number. These are typically nine digits long, US_INDIVIDUAL_TAX_IDENTIFICATION_NUMBER A US Individual Taxpayer Identification Number (ITIN) is a nine-digit number that starts with a "9" and contain a "7" or "8" as the fourth digit. An ITIN can be formatted with a space or a dash after the third and forth digits. US_PASSPORT_NUMBER A US passport number. Passport numbers range from six to nine alphanumeric characters. US_SOCIAL_SECURITY_NUMBER A US Social Security Number (SSN) is a nine-digit number that is issued to US citizens, permanent residents, and temporary working residents. Canada specific CA_HEALTH_NUMBER A Canadian Health Service Number is a 10-digit unique identifier, required for individuals to access healthcare benefits. CA_SOCIAL_INSURANCE_NUMBER A Canadian Social Insurance Number (SIN) is a nine-digit unique identifier, required for individuals to access government programs and benefits. The SIN is formatted as three groups of three digits, such as 123-456-789. A SIN can be validated through a simple check-digit process called the Luhn algorithm . UK Specific UK_NATIONAL_HEALTH_SERVICE_NUMBER A UK National Health Service Number is a 10-17 digit number, such as 485 555 3456. The current system formats the 10-digit number with spaces after the third and sixth digits. The final digit is an error-detecting checksum. UK_NATIONAL_INSURANCE_NUMBER A UK National Insurance Number (NINO) provides individuals with access to National Insurance (social security) benefits. It is also used for some purposes in the UK tax system. The number is nine digits long and starts with two letters, followed by six numbers and one letter. A NINO can be formatted with a space or a dash after the two letters and after the second, forth, and sixth digits. UK_UNIQUE_TAXPAYER_REFERENCE_NUMBER A UK Unique Taxpayer Reference (UTR) is a 10-digit number that identifies a taxpayer or a business. Custom Regex filter - You can use a regular expressions to define patterns for an AI Guardrail to recognize and act upon such as serial number, booking ID etc.. public let type: GuardrailPiiEntityType @inlinable @@ -6902,12 +6908,15 @@ extension QConnect { public let answerGenerationAIPromptId: String? /// The association configurations for overriding behavior on this AI Agent. public let associationConfigurations: [AssociationConfiguration]? + /// The locale to which specifies the language and region settings that determine the response language for QueryAssistant. + public let locale: String? @inlinable - public init(answerGenerationAIGuardrailId: String? = nil, answerGenerationAIPromptId: String? = nil, associationConfigurations: [AssociationConfiguration]? = nil) { + public init(answerGenerationAIGuardrailId: String? = nil, answerGenerationAIPromptId: String? = nil, associationConfigurations: [AssociationConfiguration]? = nil, locale: String? = nil) { self.answerGenerationAIGuardrailId = answerGenerationAIGuardrailId self.answerGenerationAIPromptId = answerGenerationAIPromptId self.associationConfigurations = associationConfigurations + self.locale = locale } public func validate(name: String) throws { @@ -6916,12 +6925,15 @@ extension QConnect { try self.associationConfigurations?.forEach { try $0.validate(name: "\(name).associationConfigurations[]") } + try self.validate(self.locale, name: "locale", parent: name, max: 4096) + try self.validate(self.locale, name: "locale", parent: name, min: 1) } private enum CodingKeys: String, CodingKey { case answerGenerationAIGuardrailId = "answerGenerationAIGuardrailId" case answerGenerationAIPromptId = "answerGenerationAIPromptId" case associationConfigurations = "associationConfigurations" + case locale = "locale" } } diff --git a/Sources/Soto/Services/QuickSight/QuickSight_api.swift b/Sources/Soto/Services/QuickSight/QuickSight_api.swift index 77736a6292..f690608b3e 100644 --- a/Sources/Soto/Services/QuickSight/QuickSight_api.swift +++ b/Sources/Soto/Services/QuickSight/QuickSight_api.swift @@ -538,6 +538,7 @@ public struct QuickSight: AWSService { /// - importMode: Indicates whether you want to import the data into SPICE. /// - logicalTableMap: Configures the combination and transformation of the data from the physical tables. /// - name: The display name for the dataset. + /// - performanceConfiguration: The configuration for the performance optimization of the dataset that contains a UniqueKey configuration. /// - permissions: A list of resource permissions on the dataset. /// - physicalTableMap: Declares the physical tables that are available in the underlying data sources. /// - rowLevelPermissionDataSet: The row-level security configuration for the data that you want to create. @@ -557,6 +558,7 @@ public struct QuickSight: AWSService { importMode: DataSetImportMode, logicalTableMap: [String: LogicalTable]? = nil, name: String, + performanceConfiguration: PerformanceConfiguration? = nil, permissions: [ResourcePermission]? = nil, physicalTableMap: [String: PhysicalTable], rowLevelPermissionDataSet: RowLevelPermissionDataSet? = nil, @@ -576,6 +578,7 @@ public struct QuickSight: AWSService { importMode: importMode, logicalTableMap: logicalTableMap, name: name, + performanceConfiguration: performanceConfiguration, permissions: permissions, physicalTableMap: physicalTableMap, rowLevelPermissionDataSet: rowLevelPermissionDataSet, @@ -6872,6 +6875,7 @@ public struct QuickSight: AWSService { /// - importMode: Indicates whether you want to import the data into SPICE. /// - logicalTableMap: Configures the combination and transformation of the data from the physical tables. /// - name: The display name for the dataset. + /// - performanceConfiguration: The configuration for the performance optimization of the dataset that contains a UniqueKey configuration. /// - physicalTableMap: Declares the physical tables that are available in the underlying data sources. /// - rowLevelPermissionDataSet: The row-level security configuration for the data you want to create. /// - rowLevelPermissionTagConfiguration: The configuration of tags on a dataset to set row-level security. Row-level security tags are currently supported for anonymous embedding only. @@ -6888,6 +6892,7 @@ public struct QuickSight: AWSService { importMode: DataSetImportMode, logicalTableMap: [String: LogicalTable]? = nil, name: String, + performanceConfiguration: PerformanceConfiguration? = nil, physicalTableMap: [String: PhysicalTable], rowLevelPermissionDataSet: RowLevelPermissionDataSet? = nil, rowLevelPermissionTagConfiguration: RowLevelPermissionTagConfiguration? = nil, @@ -6904,6 +6909,7 @@ public struct QuickSight: AWSService { importMode: importMode, logicalTableMap: logicalTableMap, name: name, + performanceConfiguration: performanceConfiguration, physicalTableMap: physicalTableMap, rowLevelPermissionDataSet: rowLevelPermissionDataSet, rowLevelPermissionTagConfiguration: rowLevelPermissionTagConfiguration diff --git a/Sources/Soto/Services/QuickSight/QuickSight_shapes.swift b/Sources/Soto/Services/QuickSight/QuickSight_shapes.swift index 5887e17f88..6759520a28 100644 --- a/Sources/Soto/Services/QuickSight/QuickSight_shapes.swift +++ b/Sources/Soto/Services/QuickSight/QuickSight_shapes.swift @@ -647,6 +647,12 @@ extension QuickSight { public var description: String { return self.rawValue } } + public enum DigitGroupingStyle: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case `default` = "DEFAULT" + case lakhs = "LAKHS" + public var description: String { return self.rawValue } + } + public enum DisplayFormat: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case auto = "AUTO" case currency = "CURRENCY" @@ -1182,6 +1188,8 @@ extension QuickSight { public enum NumberScale: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case auto = "AUTO" case billions = "BILLIONS" + case crores = "CRORES" + case lakhs = "LAKHS" case millions = "MILLIONS" case none = "NONE" case thousands = "THOUSANDS" @@ -8636,6 +8644,8 @@ extension QuickSight { public let logicalTableMap: [String: LogicalTable]? /// The display name for the dataset. public let name: String + /// The configuration for the performance optimization of the dataset that contains a UniqueKey configuration. + public let performanceConfiguration: PerformanceConfiguration? /// A list of resource permissions on the dataset. public let permissions: [ResourcePermission]? /// Declares the physical tables that are available in the underlying data sources. @@ -8648,7 +8658,7 @@ extension QuickSight { public let tags: [Tag]? @inlinable - public init(awsAccountId: String, columnGroups: [ColumnGroup]? = nil, columnLevelPermissionRules: [ColumnLevelPermissionRule]? = nil, dataSetId: String, datasetParameters: [DatasetParameter]? = nil, dataSetUsageConfiguration: DataSetUsageConfiguration? = nil, fieldFolders: [String: FieldFolder]? = nil, folderArns: [String]? = nil, importMode: DataSetImportMode, logicalTableMap: [String: LogicalTable]? = nil, name: String, permissions: [ResourcePermission]? = nil, physicalTableMap: [String: PhysicalTable], rowLevelPermissionDataSet: RowLevelPermissionDataSet? = nil, rowLevelPermissionTagConfiguration: RowLevelPermissionTagConfiguration? = nil, tags: [Tag]? = nil) { + public init(awsAccountId: String, columnGroups: [ColumnGroup]? = nil, columnLevelPermissionRules: [ColumnLevelPermissionRule]? = nil, dataSetId: String, datasetParameters: [DatasetParameter]? = nil, dataSetUsageConfiguration: DataSetUsageConfiguration? = nil, fieldFolders: [String: FieldFolder]? = nil, folderArns: [String]? = nil, importMode: DataSetImportMode, logicalTableMap: [String: LogicalTable]? = nil, name: String, performanceConfiguration: PerformanceConfiguration? = nil, permissions: [ResourcePermission]? = nil, physicalTableMap: [String: PhysicalTable], rowLevelPermissionDataSet: RowLevelPermissionDataSet? = nil, rowLevelPermissionTagConfiguration: RowLevelPermissionTagConfiguration? = nil, tags: [Tag]? = nil) { self.awsAccountId = awsAccountId self.columnGroups = columnGroups self.columnLevelPermissionRules = columnLevelPermissionRules @@ -8660,6 +8670,7 @@ extension QuickSight { self.importMode = importMode self.logicalTableMap = logicalTableMap self.name = name + self.performanceConfiguration = performanceConfiguration self.permissions = permissions self.physicalTableMap = physicalTableMap self.rowLevelPermissionDataSet = rowLevelPermissionDataSet @@ -8681,6 +8692,7 @@ extension QuickSight { try container.encode(self.importMode, forKey: .importMode) try container.encodeIfPresent(self.logicalTableMap, forKey: .logicalTableMap) try container.encode(self.name, forKey: .name) + try container.encodeIfPresent(self.performanceConfiguration, forKey: .performanceConfiguration) try container.encodeIfPresent(self.permissions, forKey: .permissions) try container.encode(self.physicalTableMap, forKey: .physicalTableMap) try container.encodeIfPresent(self.rowLevelPermissionDataSet, forKey: .rowLevelPermissionDataSet) @@ -8722,6 +8734,7 @@ extension QuickSight { try self.validate(self.logicalTableMap, name: "logicalTableMap", parent: name, min: 1) try self.validate(self.name, name: "name", parent: name, max: 128) try self.validate(self.name, name: "name", parent: name, min: 1) + try self.performanceConfiguration?.validate(name: "\(name).performanceConfiguration") try self.permissions?.forEach { try $0.validate(name: "\(name).permissions[]") } @@ -8754,6 +8767,7 @@ extension QuickSight { case importMode = "ImportMode" case logicalTableMap = "LogicalTableMap" case name = "Name" + case performanceConfiguration = "PerformanceConfiguration" case permissions = "Permissions" case physicalTableMap = "PhysicalTableMap" case rowLevelPermissionDataSet = "RowLevelPermissionDataSet" @@ -11878,6 +11892,8 @@ extension QuickSight { public let name: String? /// The list of columns after all transforms. These columns are available in templates, analyses, and dashboards. public let outputColumns: [OutputColumn]? + /// The performance optimization configuration of a dataset. + public let performanceConfiguration: PerformanceConfiguration? /// Declares the physical tables that are available in the underlying data sources. public let physicalTableMap: [String: PhysicalTable]? /// The row-level security configuration for the dataset. @@ -11886,7 +11902,7 @@ extension QuickSight { public let rowLevelPermissionTagConfiguration: RowLevelPermissionTagConfiguration? @inlinable - public init(arn: String? = nil, columnGroups: [ColumnGroup]? = nil, columnLevelPermissionRules: [ColumnLevelPermissionRule]? = nil, consumedSpiceCapacityInBytes: Int64? = nil, createdTime: Date? = nil, dataSetId: String? = nil, datasetParameters: [DatasetParameter]? = nil, dataSetUsageConfiguration: DataSetUsageConfiguration? = nil, fieldFolders: [String: FieldFolder]? = nil, importMode: DataSetImportMode? = nil, lastUpdatedTime: Date? = nil, logicalTableMap: [String: LogicalTable]? = nil, name: String? = nil, outputColumns: [OutputColumn]? = nil, physicalTableMap: [String: PhysicalTable]? = nil, rowLevelPermissionDataSet: RowLevelPermissionDataSet? = nil, rowLevelPermissionTagConfiguration: RowLevelPermissionTagConfiguration? = nil) { + public init(arn: String? = nil, columnGroups: [ColumnGroup]? = nil, columnLevelPermissionRules: [ColumnLevelPermissionRule]? = nil, consumedSpiceCapacityInBytes: Int64? = nil, createdTime: Date? = nil, dataSetId: String? = nil, datasetParameters: [DatasetParameter]? = nil, dataSetUsageConfiguration: DataSetUsageConfiguration? = nil, fieldFolders: [String: FieldFolder]? = nil, importMode: DataSetImportMode? = nil, lastUpdatedTime: Date? = nil, logicalTableMap: [String: LogicalTable]? = nil, name: String? = nil, outputColumns: [OutputColumn]? = nil, performanceConfiguration: PerformanceConfiguration? = nil, physicalTableMap: [String: PhysicalTable]? = nil, rowLevelPermissionDataSet: RowLevelPermissionDataSet? = nil, rowLevelPermissionTagConfiguration: RowLevelPermissionTagConfiguration? = nil) { self.arn = arn self.columnGroups = columnGroups self.columnLevelPermissionRules = columnLevelPermissionRules @@ -11901,6 +11917,7 @@ extension QuickSight { self.logicalTableMap = logicalTableMap self.name = name self.outputColumns = outputColumns + self.performanceConfiguration = performanceConfiguration self.physicalTableMap = physicalTableMap self.rowLevelPermissionDataSet = rowLevelPermissionDataSet self.rowLevelPermissionTagConfiguration = rowLevelPermissionTagConfiguration @@ -11921,6 +11938,7 @@ extension QuickSight { case logicalTableMap = "LogicalTableMap" case name = "Name" case outputColumns = "OutputColumns" + case performanceConfiguration = "PerformanceConfiguration" case physicalTableMap = "PhysicalTableMap" case rowLevelPermissionDataSet = "RowLevelPermissionDataSet" case rowLevelPermissionTagConfiguration = "RowLevelPermissionTagConfiguration" @@ -29876,6 +29894,28 @@ extension QuickSight { } } + public struct PerformanceConfiguration: AWSEncodableShape & AWSDecodableShape { + /// A UniqueKey configuration. + public let uniqueKeys: [UniqueKey]? + + @inlinable + public init(uniqueKeys: [UniqueKey]? = nil) { + self.uniqueKeys = uniqueKeys + } + + public func validate(name: String) throws { + try self.uniqueKeys?.forEach { + try $0.validate(name: "\(name).uniqueKeys[]") + } + try self.validate(self.uniqueKeys, name: "uniqueKeys", parent: name, max: 1) + try self.validate(self.uniqueKeys, name: "uniqueKeys", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case uniqueKeys = "UniqueKeys" + } + } + public struct PeriodOverPeriodComputation: AWSEncodableShape & AWSDecodableShape { /// The ID for a computation. public let computationId: String @@ -36676,7 +36716,7 @@ extension QuickSight { try self.selectedFieldOptions?.forEach { try $0.validate(name: "\(name).selectedFieldOptions[]") } - try self.validate(self.selectedFieldOptions, name: "selectedFieldOptions", parent: name, max: 100) + try self.validate(self.selectedFieldOptions, name: "selectedFieldOptions", parent: name, max: 201) } private enum CodingKeys: String, CodingKey { @@ -36940,7 +36980,7 @@ extension QuickSight { try self.values?.forEach { try $0.validate(name: "\(name).values[]") } - try self.validate(self.values, name: "values", parent: name, max: 200) + try self.validate(self.values, name: "values", parent: name, max: 201) } private enum CodingKeys: String, CodingKey { @@ -37781,18 +37821,22 @@ extension QuickSight { } public struct ThousandSeparatorOptions: AWSEncodableShape & AWSDecodableShape { + /// Determines the way numbers are styled to accommodate different readability standards. The DEFAULT value uses the standard international grouping system and groups numbers by the thousands. The LAKHS value uses the Indian numbering system and groups numbers by lakhs and crores. + public let groupingStyle: DigitGroupingStyle? /// Determines the thousands separator symbol. public let symbol: NumericSeparatorSymbol? /// Determines the visibility of the thousands separator. public let visibility: Visibility? @inlinable - public init(symbol: NumericSeparatorSymbol? = nil, visibility: Visibility? = nil) { + public init(groupingStyle: DigitGroupingStyle? = nil, symbol: NumericSeparatorSymbol? = nil, visibility: Visibility? = nil) { + self.groupingStyle = groupingStyle self.symbol = symbol self.visibility = visibility } private enum CodingKeys: String, CodingKey { + case groupingStyle = "GroupingStyle" case symbol = "Symbol" case visibility = "Visibility" } @@ -39909,6 +39953,29 @@ extension QuickSight { } } + public struct UniqueKey: AWSEncodableShape & AWSDecodableShape { + /// The name of the column that is referenced in the UniqueKey configuration. + public let columnNames: [String] + + @inlinable + public init(columnNames: [String]) { + self.columnNames = columnNames + } + + public func validate(name: String) throws { + try self.columnNames.forEach { + try validate($0, name: "columnNames[]", parent: name, max: 128) + try validate($0, name: "columnNames[]", parent: name, min: 1) + } + try self.validate(self.columnNames, name: "columnNames", parent: name, max: 1) + try self.validate(self.columnNames, name: "columnNames", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case columnNames = "ColumnNames" + } + } + public struct UniqueValuesComputation: AWSEncodableShape & AWSDecodableShape { /// The category field that is used in a computation. public let category: DimensionField? @@ -41173,6 +41240,8 @@ extension QuickSight { public let logicalTableMap: [String: LogicalTable]? /// The display name for the dataset. public let name: String + /// The configuration for the performance optimization of the dataset that contains a UniqueKey configuration. + public let performanceConfiguration: PerformanceConfiguration? /// Declares the physical tables that are available in the underlying data sources. public let physicalTableMap: [String: PhysicalTable] /// The row-level security configuration for the data you want to create. @@ -41181,7 +41250,7 @@ extension QuickSight { public let rowLevelPermissionTagConfiguration: RowLevelPermissionTagConfiguration? @inlinable - public init(awsAccountId: String, columnGroups: [ColumnGroup]? = nil, columnLevelPermissionRules: [ColumnLevelPermissionRule]? = nil, dataSetId: String, datasetParameters: [DatasetParameter]? = nil, dataSetUsageConfiguration: DataSetUsageConfiguration? = nil, fieldFolders: [String: FieldFolder]? = nil, importMode: DataSetImportMode, logicalTableMap: [String: LogicalTable]? = nil, name: String, physicalTableMap: [String: PhysicalTable], rowLevelPermissionDataSet: RowLevelPermissionDataSet? = nil, rowLevelPermissionTagConfiguration: RowLevelPermissionTagConfiguration? = nil) { + public init(awsAccountId: String, columnGroups: [ColumnGroup]? = nil, columnLevelPermissionRules: [ColumnLevelPermissionRule]? = nil, dataSetId: String, datasetParameters: [DatasetParameter]? = nil, dataSetUsageConfiguration: DataSetUsageConfiguration? = nil, fieldFolders: [String: FieldFolder]? = nil, importMode: DataSetImportMode, logicalTableMap: [String: LogicalTable]? = nil, name: String, performanceConfiguration: PerformanceConfiguration? = nil, physicalTableMap: [String: PhysicalTable], rowLevelPermissionDataSet: RowLevelPermissionDataSet? = nil, rowLevelPermissionTagConfiguration: RowLevelPermissionTagConfiguration? = nil) { self.awsAccountId = awsAccountId self.columnGroups = columnGroups self.columnLevelPermissionRules = columnLevelPermissionRules @@ -41192,6 +41261,7 @@ extension QuickSight { self.importMode = importMode self.logicalTableMap = logicalTableMap self.name = name + self.performanceConfiguration = performanceConfiguration self.physicalTableMap = physicalTableMap self.rowLevelPermissionDataSet = rowLevelPermissionDataSet self.rowLevelPermissionTagConfiguration = rowLevelPermissionTagConfiguration @@ -41210,6 +41280,7 @@ extension QuickSight { try container.encode(self.importMode, forKey: .importMode) try container.encodeIfPresent(self.logicalTableMap, forKey: .logicalTableMap) try container.encode(self.name, forKey: .name) + try container.encodeIfPresent(self.performanceConfiguration, forKey: .performanceConfiguration) try container.encode(self.physicalTableMap, forKey: .physicalTableMap) try container.encodeIfPresent(self.rowLevelPermissionDataSet, forKey: .rowLevelPermissionDataSet) try container.encodeIfPresent(self.rowLevelPermissionTagConfiguration, forKey: .rowLevelPermissionTagConfiguration) @@ -41248,6 +41319,7 @@ extension QuickSight { try self.validate(self.logicalTableMap, name: "logicalTableMap", parent: name, min: 1) try self.validate(self.name, name: "name", parent: name, max: 128) try self.validate(self.name, name: "name", parent: name, min: 1) + try self.performanceConfiguration?.validate(name: "\(name).performanceConfiguration") try self.physicalTableMap.forEach { try validate($0.key, name: "physicalTableMap.key", parent: name, max: 64) try validate($0.key, name: "physicalTableMap.key", parent: name, min: 1) @@ -41268,6 +41340,7 @@ extension QuickSight { case importMode = "ImportMode" case logicalTableMap = "LogicalTableMap" case name = "Name" + case performanceConfiguration = "PerformanceConfiguration" case physicalTableMap = "PhysicalTableMap" case rowLevelPermissionDataSet = "RowLevelPermissionDataSet" case rowLevelPermissionTagConfiguration = "RowLevelPermissionTagConfiguration" diff --git a/Sources/Soto/Services/RDS/RDS_api.swift b/Sources/Soto/Services/RDS/RDS_api.swift index b8b9a73abc..471ee12326 100644 --- a/Sources/Soto/Services/RDS/RDS_api.swift +++ b/Sources/Soto/Services/RDS/RDS_api.swift @@ -723,7 +723,7 @@ public struct RDS: AWSService { /// /// Parameters: /// - allocatedStorage: The amount of storage in gibibytes (GiB) to allocate to each DB instance in the Multi-AZ DB cluster. Valid for Cluster Type: Multi-AZ DB clusters only This setting is required to create a Multi-AZ DB cluster. - /// - autoMinorVersionUpgrade: Specifies whether minor engine upgrades are applied automatically to the DB cluster during the maintenance window. By default, minor engine upgrades are applied automatically. Valid for Cluster Type: Multi-AZ DB clusters only + /// - autoMinorVersionUpgrade: Specifies whether minor engine upgrades are applied automatically to the DB cluster during the maintenance window. By default, minor engine upgrades are applied automatically. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB cluster /// - availabilityZones: A list of Availability Zones (AZs) where you specifically want to create DB instances in the DB cluster. For information on AZs, see Availability Zones in the Amazon Aurora User Guide. Valid for Cluster Type: Aurora DB clusters only Constraints: Can't specify more than three AZs. /// - backtrackWindow: The target backtrack window, in seconds. To disable backtracking, set this value to 0. Valid for Cluster Type: Aurora MySQL DB clusters only Default: 0 Constraints: If specified, this value must be set to a number from 0 to 259,200 (72 hours). /// - backupRetentionPeriod: The number of days for which automated backups are retained. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters Default: 1 Constraints: Must be a value from 1 to 35. @@ -731,7 +731,7 @@ public struct RDS: AWSService { /// - characterSetName: The name of the character set (CharacterSet) to associate the DB cluster with. Valid for Cluster Type: Aurora DB clusters only /// - clusterScalabilityType: Specifies the scalability mode of the Aurora DB cluster. When set to limitless, the cluster operates as an Aurora Limitless Database. When set to standard (the default), the cluster uses normal DB instance creation. Valid for: Aurora DB clusters only You can't modify this setting after you create the DB cluster. /// - copyTagsToSnapshot: Specifies whether to copy all tags from the DB cluster to snapshots of the DB cluster. The default is not to copy them. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters - /// - databaseInsightsMode: Specifies the mode of Database Insights to enable for the cluster. + /// - databaseInsightsMode: The mode of Database Insights to enable for the DB cluster. If you set this value to advanced, you must also set the PerformanceInsightsEnabled parameter to true and the PerformanceInsightsRetentionPeriod parameter to 465. Valid for Cluster Type: Aurora DB clusters only /// - databaseName: The name for your database of up to 64 alphanumeric characters. A database named postgres is always created. If this parameter is specified, an additional database with this name is created. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters /// - dbClusterIdentifier: The identifier for this DB cluster. This parameter is stored as a lowercase string. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters Constraints: Must contain from 1 to 63 (for Aurora DB clusters) or 1 to 52 (for Multi-AZ DB clusters) letters, numbers, or hyphens. First character must be a letter. Can't end with a hyphen or contain two consecutive hyphens. Example: my-cluster1 /// - dbClusterInstanceClass: The compute and memory capacity of each DB instance in the Multi-AZ DB cluster, for example db.m6gd.xlarge. Not all DB instance classes are available in all Amazon Web Services Regions, or for all database engines. For the full list of DB instance classes and availability for your engine, see DB instance class in the Amazon RDS User Guide. This setting is required to create a Multi-AZ DB cluster. Valid for Cluster Type: Multi-AZ DB clusters only @@ -743,11 +743,11 @@ public struct RDS: AWSService { /// - domainIAMRoleName: The name of the IAM role to use when making API calls to the Directory Service. Valid for Cluster Type: Aurora DB clusters only /// - enableCloudwatchLogsExports: The list of log types that need to be enabled for exporting to CloudWatch Logs. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters The following values are valid for each DB engine: Aurora MySQL - audit | error | general | slowquery Aurora PostgreSQL - postgresql RDS for MySQL - error | general | slowquery RDS for PostgreSQL - postgresql | upgrade For more information about exporting CloudWatch Logs for Amazon RDS, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon RDS User Guide. For more information about exporting CloudWatch Logs for Amazon Aurora, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon Aurora User Guide. /// - enableGlobalWriteForwarding: Specifies whether to enable this DB cluster to forward write operations to the primary cluster of a global cluster (Aurora global database). By default, write operations are not allowed on Aurora DB clusters that are secondary clusters in an Aurora global database. You can set this value only on Aurora DB clusters that are members of an Aurora global database. With this parameter enabled, a secondary cluster can forward writes to the current primary cluster, and the resulting changes are replicated back to this cluster. For the primary DB cluster of an Aurora global database, this value is used immediately if the primary is demoted by a global cluster API operation, but it does nothing until then. Valid for Cluster Type: Aurora DB clusters only - /// - enableHttpEndpoint: Specifies whether to enable the HTTP endpoint for the DB cluster. By default, the HTTP endpoint isn't enabled. When enabled, the HTTP endpoint provides a connectionless web service API (RDS Data API) for running SQL queries on the DB cluster. You can also query your database from inside the RDS console with the RDS query editor. RDS Data API is supported with the following DB clusters: Aurora PostgreSQL Serverless v2 and provisioned Aurora PostgreSQL and Aurora MySQL Serverless v1 For more information, see Using RDS Data API in the Amazon Aurora User Guide. Valid for Cluster Type: Aurora DB clusters only + /// - enableHttpEndpoint: Specifies whether to enable the HTTP endpoint for the DB cluster. By default, the HTTP endpoint isn't enabled. When enabled, the HTTP endpoint provides a connectionless web service API (RDS Data API) for running SQL queries on the DB cluster. You can also query your database from inside the RDS console with the RDS query editor. For more information, see Using RDS Data API in the Amazon Aurora User Guide. Valid for Cluster Type: Aurora DB clusters only /// - enableIAMDatabaseAuthentication: Specifies whether to enable mapping of Amazon Web Services Identity and Access Management (IAM) accounts to database accounts. By default, mapping isn't enabled. For more information, see IAM Database Authentication in the Amazon Aurora User Guide or IAM database authentication for MariaDB, MySQL, and PostgreSQL in the Amazon RDS User Guide. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters /// - enableLimitlessDatabase: Specifies whether to enable Aurora Limitless Database. You must enable Aurora Limitless Database to create a DB shard group. Valid for: Aurora DB clusters only This setting is no longer used. Instead use the ClusterScalabilityType setting. /// - enableLocalWriteForwarding: Specifies whether read replicas can forward write operations to the writer DB instance in the DB cluster. By default, write operations aren't allowed on reader DB instances. Valid for: Aurora DB clusters only - /// - enablePerformanceInsights: Specifies whether to turn on Performance Insights for the DB cluster. For more information, see Using Amazon Performance Insights in the Amazon RDS User Guide. Valid for Cluster Type: Multi-AZ DB clusters only + /// - enablePerformanceInsights: Specifies whether to turn on Performance Insights for the DB cluster. For more information, see Using Amazon Performance Insights in the Amazon RDS User Guide. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters /// - engine: The database engine to use for this DB cluster. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters Valid Values: aurora-mysql aurora-postgresql mysql postgres neptune - For information about using Amazon Neptune, see the Amazon Neptune User Guide . /// - engineLifecycleSupport: The life cycle type for this DB cluster. By default, this value is set to open-source-rds-extended-support, which enrolls your DB cluster into Amazon RDS Extended Support. At the end of standard support, you can avoid charges for Extended Support by setting the value to open-source-rds-extended-support-disabled. In this case, creating the DB cluster will fail if the DB major version is past its end of standard support date. You can use this setting to enroll your DB cluster into Amazon RDS Extended Support. With RDS Extended Support, you can run the selected major engine version on your DB cluster past the end of standard support for that engine version. For more information, see the following sections: Amazon Aurora - Using Amazon RDS Extended Support in the Amazon Aurora User Guide Amazon RDS - Using Amazon RDS Extended Support in the Amazon RDS User Guide Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters Valid Values: open-source-rds-extended-support | open-source-rds-extended-support-disabled Default: open-source-rds-extended-support /// - engineMode: The DB engine mode of the DB cluster, either provisioned or serverless. The serverless engine mode only applies for Aurora Serverless v1 DB clusters. Aurora Serverless v2 DB clusters use the provisioned engine mode. For information about limitations and requirements for Serverless DB clusters, see the following sections in the Amazon Aurora User Guide: Limitations of Aurora Serverless v1 Requirements for Aurora Serverless v2 Valid for Cluster Type: Aurora DB clusters only @@ -759,12 +759,12 @@ public struct RDS: AWSService { /// - masterUsername: The name of the master user for the DB cluster. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters Constraints: Must be 1 to 16 letters or numbers. First character must be a letter. Can't be a reserved word for the chosen database engine. /// - masterUserPassword: The password for the master database user. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters Constraints: Must contain from 8 to 41 characters. Can contain any printable ASCII character except "/", """, or "@". Can't be specified if ManageMasterUserPassword is turned on. /// - masterUserSecretKmsKeyId: The Amazon Web Services KMS key identifier to encrypt a secret that is automatically generated and managed in Amazon Web Services Secrets Manager. This setting is valid only if the master user password is managed by RDS in Amazon Web Services Secrets Manager for the DB cluster. The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key. To use a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN. If you don't specify MasterUserSecretKmsKeyId, then the aws/secretsmanager KMS key is used to encrypt the secret. If the secret is in a different Amazon Web Services account, then you can't use the aws/secretsmanager KMS key to encrypt the secret, and you must use a customer managed KMS key. There is a default KMS key for your Amazon Web Services account. Your Amazon Web Services account has a different default KMS key for each Amazon Web Services Region. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters - /// - monitoringInterval: The interval, in seconds, between points when Enhanced Monitoring metrics are collected for the DB cluster. To turn off collecting Enhanced Monitoring metrics, specify 0. If MonitoringRoleArn is specified, also set MonitoringInterval to a value other than 0. Valid for Cluster Type: Multi-AZ DB clusters only Valid Values: 0 | 1 | 5 | 10 | 15 | 30 | 60 Default: 0 - /// - monitoringRoleArn: The Amazon Resource Name (ARN) for the IAM role that permits RDS to send Enhanced Monitoring metrics to Amazon CloudWatch Logs. An example is arn:aws:iam:123456789012:role/emaccess. For information on creating a monitoring role, see Setting up and enabling Enhanced Monitoring in the Amazon RDS User Guide. If MonitoringInterval is set to a value other than 0, supply a MonitoringRoleArn value. Valid for Cluster Type: Multi-AZ DB clusters only + /// - monitoringInterval: The interval, in seconds, between points when Enhanced Monitoring metrics are collected for the DB cluster. To turn off collecting Enhanced Monitoring metrics, specify 0. If MonitoringRoleArn is specified, also set MonitoringInterval to a value other than 0. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters Valid Values: 0 | 1 | 5 | 10 | 15 | 30 | 60 Default: 0 + /// - monitoringRoleArn: The Amazon Resource Name (ARN) for the IAM role that permits RDS to send Enhanced Monitoring metrics to Amazon CloudWatch Logs. An example is arn:aws:iam:123456789012:role/emaccess. For information on creating a monitoring role, see Setting up and enabling Enhanced Monitoring in the Amazon RDS User Guide. If MonitoringInterval is set to a value other than 0, supply a MonitoringRoleArn value. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters /// - networkType: The network type of the DB cluster. The network type is determined by the DBSubnetGroup specified for the DB cluster. A DBSubnetGroup can support only the IPv4 protocol or the IPv4 and the IPv6 protocols (DUAL). For more information, see Working with a DB instance in a VPC in the Amazon Aurora User Guide. Valid for Cluster Type: Aurora DB clusters only Valid Values: IPV4 | DUAL /// - optionGroupName: The option group to associate the DB cluster with. DB clusters are associated with a default option group that can't be modified. - /// - performanceInsightsKMSKeyId: The Amazon Web Services KMS key identifier for encryption of Performance Insights data. The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key. If you don't specify a value for PerformanceInsightsKMSKeyId, then Amazon RDS uses your default KMS key. There is a default KMS key for your Amazon Web Services account. Your Amazon Web Services account has a different default KMS key for each Amazon Web Services Region. Valid for Cluster Type: Multi-AZ DB clusters only - /// - performanceInsightsRetentionPeriod: The number of days to retain Performance Insights data. Valid for Cluster Type: Multi-AZ DB clusters only Valid Values: 7 month * 31, where month is a number of months from 1-23. Examples: 93 (3 months * 31), 341 (11 months * 31), 589 (19 months * 31) 731 Default: 7 days If you specify a retention period that isn't valid, such as 94, Amazon RDS issues an error. + /// - performanceInsightsKMSKeyId: The Amazon Web Services KMS key identifier for encryption of Performance Insights data. The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key. If you don't specify a value for PerformanceInsightsKMSKeyId, then Amazon RDS uses your default KMS key. There is a default KMS key for your Amazon Web Services account. Your Amazon Web Services account has a different default KMS key for each Amazon Web Services Region. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters + /// - performanceInsightsRetentionPeriod: The number of days to retain Performance Insights data. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters Valid Values: 7 month * 31, where month is a number of months from 1-23. Examples: 93 (3 months * 31), 341 (11 months * 31), 589 (19 months * 31) 731 Default: 7 days If you specify a retention period that isn't valid, such as 94, Amazon RDS issues an error. /// - port: The port number on which the instances in the DB cluster accept connections. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters Valid Values: 1150-65535 Default: RDS for MySQL and Aurora MySQL - 3306 RDS for PostgreSQL and Aurora PostgreSQL - 5432 /// - preferredBackupWindow: The daily time range during which automated backups are created if automated backups are enabled using the BackupRetentionPeriod parameter. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters The default is a 30-minute window selected at random from an 8-hour block of time for each Amazon Web Services Region. To view the time blocks available, see Backup window in the Amazon Aurora User Guide. Constraints: Must be in the format hh24:mi-hh24:mi. Must be in Universal Coordinated Time (UTC). Must not conflict with the preferred maintenance window. Must be at least 30 minutes. /// - preferredMaintenanceWindow: The weekly time range during which system maintenance can occur. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters The default is a 30-minute window selected at random from an 8-hour block of time for each Amazon Web Services Region, occurring on a random day of the week. To see the time blocks available, see Adjusting the Preferred DB Cluster Maintenance Window in the Amazon Aurora User Guide. Constraints: Must be in the format ddd:hh24:mi-ddd:hh24:mi. Days must be one of Mon | Tue | Wed | Thu | Fri | Sat | Sun. Must be in Universal Coordinated Time (UTC). Must be at least 30 minutes. @@ -1042,7 +1042,7 @@ public struct RDS: AWSService { /// - characterSetName: For supported engines, the character set (CharacterSet) to associate the DB instance with. This setting doesn't apply to the following DB instances: Amazon Aurora - The character set is managed by the DB cluster. For more information, see CreateDBCluster. RDS Custom - However, if you need to change the character set, you can change it on the database itself. /// - copyTagsToSnapshot: Specifies whether to copy tags from the DB instance to snapshots of the DB instance. By default, tags are not copied. This setting doesn't apply to Amazon Aurora DB instances. Copying tags to snapshots is managed by the DB cluster. Setting this value for an Aurora DB instance has no effect on the DB cluster setting. /// - customIamInstanceProfile: The instance profile associated with the underlying Amazon EC2 instance of an RDS Custom DB instance. This setting is required for RDS Custom. Constraints: The profile must exist in your account. The profile must have an IAM role that Amazon EC2 has permissions to assume. The instance profile name and the associated IAM role name must start with the prefix AWSRDSCustom. For the list of permissions required for the IAM role, see Configure IAM and your VPC in the Amazon RDS User Guide. - /// - databaseInsightsMode: Specifies the mode of Database Insights to enable for the instance. + /// - databaseInsightsMode: The mode of Database Insights to enable for the DB instance. This setting only applies to Amazon Aurora DB instances. Currently, this value is inherited from the DB cluster and can't be changed. /// - dbClusterIdentifier: The identifier of the DB cluster that this DB instance will belong to. This setting doesn't apply to RDS Custom DB instances. /// - dbInstanceClass: The compute and memory capacity of the DB instance, for example db.m5.large. Not all DB instance classes are available in all Amazon Web Services Regions, or for all database engines. For the full list of DB instance classes, and availability for your engine, see DB instance classes in the Amazon RDS User Guide or Aurora DB instance classes in the Amazon Aurora User Guide. /// - dbInstanceIdentifier: The identifier for this DB instance. This parameter is stored as a lowercase string. Constraints: Must contain from 1 to 63 letters, numbers, or hyphens. First character must be a letter. Can't end with a hyphen or contain two consecutive hyphens. Example: mydbinstance @@ -1251,13 +1251,13 @@ public struct RDS: AWSService { /// Creates a new DB instance that acts as a read replica for an existing source DB instance or Multi-AZ DB cluster. You can create a read replica for a DB instance running Db2, MariaDB, MySQL, Oracle, PostgreSQL, or SQL Server. You can create a read replica for a Multi-AZ DB cluster running MySQL or PostgreSQL. For more information, see Working with read replicas and Migrating from a Multi-AZ DB cluster to a DB instance using a read replica in the Amazon RDS User Guide. Amazon Aurora doesn't support this operation. To create a DB instance for an Aurora DB cluster, use the CreateDBInstance operation. All read replica DB instances are created with backups disabled. All other attributes (including DB security groups and DB parameter groups) are inherited from the source DB instance or cluster, except as specified. Your source DB instance or cluster must have backup retention enabled. /// /// Parameters: - /// - allocatedStorage: The amount of storage (in gibibytes) to allocate initially for the read replica. Follow the allocation rules specified in CreateDBInstance. Be sure to allocate enough storage for your read replica so that the create operation can succeed. You can also allocate additional storage for future growth. + /// - allocatedStorage: The amount of storage (in gibibytes) to allocate initially for the read replica. Follow the allocation rules specified in CreateDBInstance. This setting isn't valid for RDS for SQL Server. Be sure to allocate enough storage for your read replica so that the create operation can succeed. You can also allocate additional storage for future growth. /// - autoMinorVersionUpgrade: Specifies whether to automatically apply minor engine upgrades to the read replica during the maintenance window. This setting doesn't apply to RDS Custom DB instances. Default: Inherits the value from the source DB instance. /// - availabilityZone: The Availability Zone (AZ) where the read replica will be created. Default: A random, system-chosen Availability Zone in the endpoint's Amazon Web Services Region. Example: us-east-1d /// - caCertificateIdentifier: The CA certificate identifier to use for the read replica's server certificate. This setting doesn't apply to RDS Custom DB instances. For more information, see Using SSL/TLS to encrypt a connection to a DB instance in the Amazon RDS User Guide and Using SSL/TLS to encrypt a connection to a DB cluster in the Amazon Aurora User Guide. /// - copyTagsToSnapshot: Specifies whether to copy all tags from the read replica to snapshots of the read replica. By default, tags aren't copied. /// - customIamInstanceProfile: The instance profile associated with the underlying Amazon EC2 instance of an RDS Custom DB instance. The instance profile must meet the following requirements: The profile must exist in your account. The profile must have an IAM role that Amazon EC2 has permissions to assume. The instance profile name and the associated IAM role name must start with the prefix AWSRDSCustom. For the list of permissions required for the IAM role, see Configure IAM and your VPC in the Amazon RDS User Guide. This setting is required for RDS Custom DB instances. - /// - databaseInsightsMode: Specifies the mode of Database Insights. + /// - databaseInsightsMode: The mode of Database Insights to enable for the read replica. Currently, this setting is not supported. /// - dbInstanceClass: The compute and memory capacity of the read replica, for example db.m4.large. Not all DB instance classes are available in all Amazon Web Services Regions, or for all database engines. For the full list of DB instance classes, and availability for your engine, see DB Instance Class in the Amazon RDS User Guide. Default: Inherits the value from the source DB instance. /// - dbInstanceIdentifier: The DB instance identifier of the read replica. This identifier is the unique key that identifies a DB instance. This parameter is stored as a lowercase string. /// - dbParameterGroupName: The name of the DB parameter group to associate with this read replica DB instance. For Single-AZ or Multi-AZ DB instance read replica instances, if you don't specify a value for DBParameterGroupName, then Amazon RDS uses the DBParameterGroup of the source DB instance for a same Region read replica, or the default DBParameterGroup for the specified DB engine for a cross-Region read replica. For Multi-AZ DB cluster same Region read replica instances, if you don't specify a value for DBParameterGroupName, then Amazon RDS uses the default DBParameterGroup. Specifying a parameter group for this operation is only supported for MySQL DB instances for cross-Region read replicas, for Multi-AZ DB cluster read replica instances, and for Oracle DB instances. It isn't supported for MySQL DB instances for same Region read replicas or for RDS Custom. Constraints: Must be 1 to 255 letters, numbers, or hyphens. First character must be a letter. Can't end with a hyphen or contain two consecutive hyphens. @@ -2893,10 +2893,10 @@ public struct RDS: AWSService { /// /// Parameters: /// - dbClusterParameterGroupName: The name of a specific DB cluster parameter group to return parameter details for. Constraints: If supplied, must match the name of an existing DBClusterParameterGroup. - /// - filters: This parameter isn't currently supported. + /// - filters: A filter that specifies one or more DB cluster parameters to describe. The only supported filter is parameter-name. The results list only includes information about the DB cluster parameters with these names. /// - marker: An optional pagination token provided by a previous DescribeDBClusterParameters request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords. /// - maxRecords: The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so you can retrieve the remaining results. Default: 100 Constraints: Minimum 20, maximum 100. - /// - source: A specific source to return parameters for. Valid Values: customer engine service + /// - source: A specific source to return parameters for. Valid Values: engine-default system user /// - logger: Logger use during operation @inlinable public func describeDBClusterParameters( @@ -3280,7 +3280,7 @@ public struct RDS: AWSService { /// /// Parameters: /// - dbParameterGroupName: The name of a specific DB parameter group to return details for. Constraints: If supplied, must match the name of an existing DBParameterGroup. - /// - filters: This parameter isn't currently supported. + /// - filters: A filter that specifies one or more DB parameters to describe. The only supported filter is parameter-name. The results list only includes information about the DB parameters with these names. /// - marker: An optional pagination token provided by a previous DescribeDBParameters request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords. /// - maxRecords: The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that you can retrieve the remaining results. Default: 100 Constraints: Minimum 20, maximum 100. /// - source: The parameter types to return. Default: All parameter types returned Valid Values: user | system | engine-default @@ -3807,7 +3807,7 @@ public struct RDS: AWSService { /// /// Parameters: /// - dbParameterGroupFamily: The name of the DB parameter group family. Valid Values: aurora-mysql5.7 aurora-mysql8.0 aurora-postgresql10 aurora-postgresql11 aurora-postgresql12 aurora-postgresql13 aurora-postgresql14 custom-oracle-ee-19 custom-oracle-ee-cdb-19 db2-ae db2-se mariadb10.2 mariadb10.3 mariadb10.4 mariadb10.5 mariadb10.6 mysql5.7 mysql8.0 oracle-ee-19 oracle-ee-cdb-19 oracle-ee-cdb-21 oracle-se2-19 oracle-se2-cdb-19 oracle-se2-cdb-21 postgres10 postgres11 postgres12 postgres13 postgres14 sqlserver-ee-11.0 sqlserver-ee-12.0 sqlserver-ee-13.0 sqlserver-ee-14.0 sqlserver-ee-15.0 sqlserver-ex-11.0 sqlserver-ex-12.0 sqlserver-ex-13.0 sqlserver-ex-14.0 sqlserver-ex-15.0 sqlserver-se-11.0 sqlserver-se-12.0 sqlserver-se-13.0 sqlserver-se-14.0 sqlserver-se-15.0 sqlserver-web-11.0 sqlserver-web-12.0 sqlserver-web-13.0 sqlserver-web-14.0 sqlserver-web-15.0 - /// - filters: This parameter isn't currently supported. + /// - filters: A filter that specifies one or more parameters to describe. The only supported filter is parameter-name. The results list only includes information about the parameters with these names. /// - marker: An optional pagination token provided by a previous DescribeEngineDefaultParameters request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords. /// - maxRecords: The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so you can retrieve the remaining results. Default: 100 Constraints: Minimum 20, maximum 100. /// - logger: Logger use during operation @@ -4825,14 +4825,14 @@ public struct RDS: AWSService { /// - allowEngineModeChange: Specifies whether engine mode changes from serverless to provisioned are allowed. Valid for Cluster Type: Aurora Serverless v1 DB clusters only Constraints: You must allow engine mode changes when specifying a different value for the EngineMode parameter from the DB cluster's current engine mode. /// - allowMajorVersionUpgrade: Specifies whether major version upgrades are allowed. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters Constraints: You must allow major version upgrades when specifying a value for the EngineVersion parameter that is a different major version than the DB cluster's current version. /// - applyImmediately: Specifies whether the modifications in this request are asynchronously applied as soon as possible, regardless of the PreferredMaintenanceWindow setting for the DB cluster. If this parameter is disabled, changes to the DB cluster are applied during the next maintenance window. Most modifications can be applied immediately or during the next scheduled maintenance window. Some modifications, such as turning on deletion protection and changing the master password, are applied immediately—regardless of when you choose to apply them. By default, this parameter is disabled. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters - /// - autoMinorVersionUpgrade: Specifies whether minor engine upgrades are applied automatically to the DB cluster during the maintenance window. By default, minor engine upgrades are applied automatically. Valid for Cluster Type: Multi-AZ DB clusters only + /// - autoMinorVersionUpgrade: Specifies whether minor engine upgrades are applied automatically to the DB cluster during the maintenance window. By default, minor engine upgrades are applied automatically. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters /// - awsBackupRecoveryPointArn: The Amazon Resource Name (ARN) of the recovery point in Amazon Web Services Backup. /// - backtrackWindow: The target backtrack window, in seconds. To disable backtracking, set this value to 0. Valid for Cluster Type: Aurora MySQL DB clusters only Default: 0 Constraints: If specified, this value must be set to a number from 0 to 259,200 (72 hours). /// - backupRetentionPeriod: The number of days for which automated backups are retained. Specify a minimum value of 1. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters Default: 1 Constraints: Must be a value from 1 to 35. /// - caCertificateIdentifier: The CA certificate identifier to use for the DB cluster's server certificate. For more information, see Using SSL/TLS to encrypt a connection to a DB instance in the Amazon RDS User Guide. Valid for Cluster Type: Multi-AZ DB clusters /// - cloudwatchLogsExportConfiguration: The configuration setting for the log types to be enabled for export to CloudWatch Logs for a specific DB cluster. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters The following values are valid for each DB engine: Aurora MySQL - audit | error | general | slowquery Aurora PostgreSQL - postgresql RDS for MySQL - error | general | slowquery RDS for PostgreSQL - postgresql | upgrade For more information about exporting CloudWatch Logs for Amazon RDS, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon RDS User Guide. For more information about exporting CloudWatch Logs for Amazon Aurora, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon Aurora User Guide. /// - copyTagsToSnapshot: Specifies whether to copy all tags from the DB cluster to snapshots of the DB cluster. The default is not to copy them. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters - /// - databaseInsightsMode: Specifies the mode of Database Insights to enable for the cluster. + /// - databaseInsightsMode: Specifies the mode of Database Insights to enable for the DB cluster. If you change the value from standard to advanced, you must set the PerformanceInsightsEnabled parameter to true and the PerformanceInsightsRetentionPeriod parameter to 465. If you change the value from advanced to standard, you must set the PerformanceInsightsEnabled parameter to false. Valid for Cluster Type: Aurora DB clusters only /// - dbClusterIdentifier: The DB cluster identifier for the cluster being modified. This parameter isn't case-sensitive. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters Constraints: Must match the identifier of an existing DB cluster. /// - dbClusterInstanceClass: The compute and memory capacity of each DB instance in the Multi-AZ DB cluster, for example db.m6gd.xlarge. Not all DB instance classes are available in all Amazon Web Services Regions, or for all database engines. For the full list of DB instance classes and availability for your engine, see DB Instance Class in the Amazon RDS User Guide. Valid for Cluster Type: Multi-AZ DB clusters only /// - dbClusterParameterGroupName: The name of the DB cluster parameter group to use for the DB cluster. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters @@ -4857,8 +4857,8 @@ public struct RDS: AWSService { /// - networkType: The network type of the DB cluster. The network type is determined by the DBSubnetGroup specified for the DB cluster. A DBSubnetGroup can support only the IPv4 protocol or the IPv4 and the IPv6 protocols (DUAL). For more information, see Working with a DB instance in a VPC in the Amazon Aurora User Guide. Valid for Cluster Type: Aurora DB clusters only Valid Values: IPV4 | DUAL /// - newDBClusterIdentifier: The new DB cluster identifier for the DB cluster when renaming a DB cluster. This value is stored as a lowercase string. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters Constraints: Must contain from 1 to 63 letters, numbers, or hyphens. The first character must be a letter. Can't end with a hyphen or contain two consecutive hyphens. Example: my-cluster2 /// - optionGroupName: The option group to associate the DB cluster with. DB clusters are associated with a default option group that can't be modified. - /// - performanceInsightsKMSKeyId: The Amazon Web Services KMS key identifier for encryption of Performance Insights data. The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key. If you don't specify a value for PerformanceInsightsKMSKeyId, then Amazon RDS uses your default KMS key. There is a default KMS key for your Amazon Web Services account. Your Amazon Web Services account has a different default KMS key for each Amazon Web Services Region. Valid for Cluster Type: Multi-AZ DB clusters only - /// - performanceInsightsRetentionPeriod: The number of days to retain Performance Insights data. Valid for Cluster Type: Multi-AZ DB clusters only Valid Values: 7 month * 31, where month is a number of months from 1-23. Examples: 93 (3 months * 31), 341 (11 months * 31), 589 (19 months * 31) 731 Default: 7 days If you specify a retention period that isn't valid, such as 94, Amazon RDS issues an error. + /// - performanceInsightsKMSKeyId: The Amazon Web Services KMS key identifier for encryption of Performance Insights data. The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key. If you don't specify a value for PerformanceInsightsKMSKeyId, then Amazon RDS uses your default KMS key. There is a default KMS key for your Amazon Web Services account. Your Amazon Web Services account has a different default KMS key for each Amazon Web Services Region. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters + /// - performanceInsightsRetentionPeriod: The number of days to retain Performance Insights data. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters Valid Values: 7 month * 31, where month is a number of months from 1-23. Examples: 93 (3 months * 31), 341 (11 months * 31), 589 (19 months * 31) 731 Default: 7 days If you specify a retention period that isn't valid, such as 94, Amazon RDS issues an error. /// - port: The port number on which the DB cluster accepts connections. Valid for Cluster Type: Aurora DB clusters only Valid Values: 1150-65535 Default: The same port as the original DB cluster. /// - preferredBackupWindow: The daily time range during which automated backups are created if automated backups are enabled, using the BackupRetentionPeriod parameter. The default is a 30-minute window selected at random from an 8-hour block of time for each Amazon Web Services Region. To view the time blocks available, see Backup window in the Amazon Aurora User Guide. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters Constraints: Must be in the format hh24:mi-hh24:mi. Must be in Universal Coordinated Time (UTC). Must not conflict with the preferred maintenance window. Must be at least 30 minutes. /// - preferredMaintenanceWindow: The weekly time range during which system maintenance can occur, in Universal Coordinated Time (UTC). Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters The default is a 30-minute window selected at random from an 8-hour block of time for each Amazon Web Services Region, occurring on a random day of the week. To see the time blocks available, see Adjusting the Preferred DB Cluster Maintenance Window in the Amazon Aurora User Guide. Constraints: Must be in the format ddd:hh24:mi-ddd:hh24:mi. Days must be one of Mon | Tue | Wed | Thu | Fri | Sat | Sun. Must be in Universal Coordinated Time (UTC). Must be at least 30 minutes. @@ -5102,9 +5102,9 @@ public struct RDS: AWSService { /// - backupRetentionPeriod: The number of days to retain automated backups. Setting this parameter to a positive number enables backups. Setting this parameter to 0 disables automated backups. Enabling and disabling backups can result in a brief I/O suspension that lasts from a few seconds to a few minutes, depending on the size and class of your DB instance. These changes are applied during the next maintenance window unless the ApplyImmediately parameter is enabled for this request. If you change the parameter from one non-zero value to another non-zero value, the change is asynchronously applied as soon as possible. This setting doesn't apply to Amazon Aurora DB instances. The retention period for automated backups is managed by the DB cluster. For more information, see ModifyDBCluster. Default: Uses existing setting Constraints: Must be a value from 0 to 35. Can't be set to 0 if the DB instance is a source to read replicas. Can't be set to 0 for an RDS Custom for Oracle DB instance. /// - caCertificateIdentifier: The CA certificate identifier to use for the DB instance's server certificate. This setting doesn't apply to RDS Custom DB instances. For more information, see Using SSL/TLS to encrypt a connection to a DB instance in the Amazon RDS User Guide and Using SSL/TLS to encrypt a connection to a DB cluster in the Amazon Aurora User Guide. /// - certificateRotationRestart: Specifies whether the DB instance is restarted when you rotate your SSL/TLS certificate. By default, the DB instance is restarted when you rotate your SSL/TLS certificate. The certificate is not updated until the DB instance is restarted. Set this parameter only if you are not using SSL/TLS to connect to the DB instance. If you are using SSL/TLS to connect to the DB instance, follow the appropriate instructions for your DB engine to rotate your SSL/TLS certificate: For more information about rotating your SSL/TLS certificate for RDS DB engines, see Rotating Your SSL/TLS Certificate. in the Amazon RDS User Guide. For more information about rotating your SSL/TLS certificate for Aurora DB engines, see Rotating Your SSL/TLS Certificate in the Amazon Aurora User Guide. This setting doesn't apply to RDS Custom DB instances. - /// - cloudwatchLogsExportConfiguration: The log types to be enabled for export to CloudWatch Logs for a specific DB instance. A change to the CloudwatchLogsExportConfiguration parameter is always applied to the DB instance immediately. Therefore, the ApplyImmediately parameter has no effect. This setting doesn't apply to RDS Custom DB instances. + /// - cloudwatchLogsExportConfiguration: The log types to be enabled for export to CloudWatch Logs for a specific DB instance. A change to the CloudwatchLogsExportConfiguration parameter is always applied to the DB instance immediately. Therefore, the ApplyImmediately parameter has no effect. This setting doesn't apply to RDS Custom DB instances. The following values are valid for each DB engine: Aurora MySQL - audit | error | general | slowquery Aurora PostgreSQL - postgresql RDS for MySQL - error | general | slowquery RDS for PostgreSQL - postgresql | upgrade For more information about exporting CloudWatch Logs for Amazon RDS, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon RDS User Guide. For more information about exporting CloudWatch Logs for Amazon Aurora, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon Aurora User Guide. /// - copyTagsToSnapshot: Specifies whether to copy all tags from the DB instance to snapshots of the DB instance. By default, tags aren't copied. This setting doesn't apply to Amazon Aurora DB instances. Copying tags to snapshots is managed by the DB cluster. Setting this value for an Aurora DB instance has no effect on the DB cluster setting. For more information, see ModifyDBCluster. - /// - databaseInsightsMode: Specifies the mode of Database Insights to enable for the instance. + /// - databaseInsightsMode: Specifies the mode of Database Insights to enable for the DB instance. This setting only applies to Amazon Aurora DB instances. Currently, this value is inherited from the DB cluster and can't be changed. /// - dbInstanceClass: The new compute and memory capacity of the DB instance, for example db.m4.large. Not all DB instance classes are available in all Amazon Web Services Regions, or for all database engines. For the full list of DB instance classes, and availability for your engine, see DB Instance Class in the Amazon RDS User Guide or Aurora DB instance classes in the Amazon Aurora User Guide. For RDS Custom, see DB instance class support for RDS Custom for Oracle and DB instance class support for RDS Custom for SQL Server. If you modify the DB instance class, an outage occurs during the change. The change is applied during the next maintenance window, unless you specify ApplyImmediately in your request. Default: Uses existing setting Constraints: If you are modifying the DB instance class and upgrading the engine version at the same time, the currently running engine version must be supported on the specified DB instance class. Otherwise, the operation returns an error. In this case, first run the operation to upgrade the engine version, and then run it again to modify the DB instance class. /// - dbInstanceIdentifier: The identifier of DB instance to modify. This value is stored as a lowercase string. Constraints: Must match the identifier of an existing DB instance. /// - dbParameterGroupName: The name of the DB parameter group to apply to the DB instance. Changing this setting doesn't result in an outage. The parameter group name itself is changed immediately, but the actual parameter changes are not applied until you reboot the instance without failover. In this case, the DB instance isn't rebooted automatically, and the parameter changes aren't applied during the next maintenance window. However, if you modify dynamic parameters in the newly associated DB parameter group, these changes are applied immediately without a reboot. This setting doesn't apply to RDS Custom DB instances. Default: Uses existing setting Constraints: Must be in the same DB parameter group family as the DB instance. @@ -5125,7 +5125,7 @@ public struct RDS: AWSService { /// - enablePerformanceInsights: Specifies whether to enable Performance Insights for the DB instance. For more information, see Using Amazon Performance Insights in the Amazon RDS User Guide. This setting doesn't apply to RDS Custom DB instances. /// - engine: The target Oracle DB engine when you convert a non-CDB to a CDB. This intermediate step is necessary to upgrade an Oracle Database 19c non-CDB to an Oracle Database 21c CDB. Note the following requirements: Make sure that you specify oracle-ee-cdb or oracle-se2-cdb. Make sure that your DB engine runs Oracle Database 19c with an April 2021 or later RU. Note the following limitations: You can't convert a CDB to a non-CDB. You can't convert a replica database. You can't convert a non-CDB to a CDB and upgrade the engine version in the same command. You can't convert the existing custom parameter or option group when it has options or parameters that are permanent or persistent. In this situation, the DB instance reverts to the default option and parameter group. To avoid reverting to the default, specify a new parameter group with --db-parameter-group-name and a new option group with --option-group-name. /// - engineVersion: The version number of the database engine to upgrade to. Changing this parameter results in an outage and the change is applied during the next maintenance window unless the ApplyImmediately parameter is enabled for this request. For major version upgrades, if a nondefault DB parameter group is currently in use, a new DB parameter group in the DB parameter group family for the new engine version must be specified. The new DB parameter group can be the default for that DB parameter group family. If you specify only a major version, Amazon RDS updates the DB instance to the default minor version if the current minor version is lower. For information about valid engine versions, see CreateDBInstance, or call DescribeDBEngineVersions. If the instance that you're modifying is acting as a read replica, the engine version that you specify must be the same or higher than the version that the source DB instance or cluster is running. In RDS Custom for Oracle, this parameter is supported for read replicas only if they are in the PATCH_DB_FAILURE lifecycle. Constraints: If you are upgrading the engine version and modifying the DB instance class at the same time, the currently running engine version must be supported on the specified DB instance class. Otherwise, the operation returns an error. In this case, first run the operation to upgrade the engine version, and then run it again to modify the DB instance class. - /// - iops: The new Provisioned IOPS (I/O operations per second) value for the RDS instance. Changing this setting doesn't result in an outage and the change is applied during the next maintenance window unless the ApplyImmediately parameter is enabled for this request. If you are migrating from Provisioned IOPS to standard storage, set this value to 0. The DB instance will require a reboot for the change in storage type to take effect. If you choose to migrate your DB instance from using standard storage to using Provisioned IOPS, or from using Provisioned IOPS to using standard storage, the process can take time. The duration of the migration depends on several factors such as database load, storage size, storage type (standard or Provisioned IOPS), amount of IOPS provisioned (if any), and the number of prior scale storage operations. Typical migration times are under 24 hours, but the process can take up to several days in some cases. During the migration, the DB instance is available for use, but might experience performance degradation. While the migration takes place, nightly backups for the instance are suspended. No other Amazon RDS operations can take place for the instance, including modifying the instance, rebooting the instance, deleting the instance, creating a read replica for the instance, and creating a DB snapshot of the instance. Constraints: For RDS for MariaDB, RDS for MySQL, RDS for Oracle, and RDS for PostgreSQL - The value supplied must be at least 10% greater than the current value. Values that are not at least 10% greater than the existing value are rounded up so that they are 10% greater than the current value. When you increase the Provisioned IOPS, you must also specify the AllocatedStorage parameter. You can use the current value for AllocatedStorage. Default: Uses existing setting + /// - iops: The new Provisioned IOPS (I/O operations per second) value for the RDS instance. Changing this setting doesn't result in an outage and the change is applied during the next maintenance window unless the ApplyImmediately parameter is enabled for this request. If you are migrating from Provisioned IOPS to standard storage, set this value to 0. The DB instance will require a reboot for the change in storage type to take effect. If you choose to migrate your DB instance from using standard storage to Provisioned IOPS (io1), or from Provisioned IOPS to standard storage, the process can take time. The duration of the migration depends on several factors such as database load, storage size, storage type (standard or Provisioned IOPS), amount of IOPS provisioned (if any), and the number of prior scale storage operations. Typical migration times are under 24 hours, but the process can take up to several days in some cases. During the migration, the DB instance is available for use, but might experience performance degradation. While the migration takes place, nightly backups for the instance are suspended. No other Amazon RDS operations can take place for the instance, including modifying the instance, rebooting the instance, deleting the instance, creating a read replica for the instance, and creating a DB snapshot of the instance. Constraints: For RDS for MariaDB, RDS for MySQL, RDS for Oracle, and RDS for PostgreSQL - The value supplied must be at least 10% greater than the current value. Values that are not at least 10% greater than the existing value are rounded up so that they are 10% greater than the current value. When you increase the Provisioned IOPS, you must also specify the AllocatedStorage parameter. You can use the current value for AllocatedStorage. Default: Uses existing setting /// - licenseModel: The license model for the DB instance. This setting doesn't apply to Amazon Aurora or RDS Custom DB instances. Valid Values: RDS for Db2 - bring-your-own-license RDS for MariaDB - general-public-license RDS for Microsoft SQL Server - license-included RDS for MySQL - general-public-license RDS for Oracle - bring-your-own-license | license-included RDS for PostgreSQL - postgresql-license /// - manageMasterUserPassword: Specifies whether to manage the master user password with Amazon Web Services Secrets Manager. If the DB instance doesn't manage the master user password with Amazon Web Services Secrets Manager, you can turn on this management. In this case, you can't specify MasterUserPassword. If the DB instance already manages the master user password with Amazon Web Services Secrets Manager, and you specify that the master user password is not managed with Amazon Web Services Secrets Manager, then you must specify MasterUserPassword. In this case, Amazon RDS deletes the secret and uses the new password for the master user specified by MasterUserPassword. For more information, see Password management with Amazon Web Services Secrets Manager in the Amazon RDS User Guide. Constraints: Can't manage the master user password with Amazon Web Services Secrets Manager if MasterUserPassword is specified. /// - masterUserPassword: The new password for the master user. Changing this parameter doesn't result in an outage and the change is asynchronously applied as soon as possible. Between the time of the request and the completion of the request, the MasterUserPassword element exists in the PendingModifiedValues element of the operation response. Amazon RDS API operations never return the password, so this operation provides a way to regain access to a primary instance user if the password is lost. This includes restoring privileges that might have been accidentally revoked. This setting doesn't apply to the following DB instances: Amazon Aurora (The password for the master user is managed by the DB cluster. For more information, see ModifyDBCluster.) RDS Custom Default: Uses existing setting Constraints: Can't be specified if ManageMasterUserPassword is turned on. Can include any printable ASCII character except "/", """, or "@". For RDS for Oracle, can't include the "&" (ampersand) or the "'" (single quotes) character. Length Constraints: RDS for Db2 - Must contain from 8 to 255 characters. RDS for MariaDB - Must contain from 8 to 41 characters. RDS for Microsoft SQL Server - Must contain from 8 to 128 characters. RDS for MySQL - Must contain from 8 to 41 characters. RDS for Oracle - Must contain from 8 to 30 characters. RDS for PostgreSQL - Must contain from 8 to 128 characters. @@ -5149,7 +5149,7 @@ public struct RDS: AWSService { /// - resumeFullAutomationModeMinutes: The number of minutes to pause the automation. When the time period ends, RDS Custom resumes full automation. Default: 60 Constraints: Must be at least 60. Must be no more than 1,440. /// - rotateMasterUserPassword: Specifies whether to rotate the secret managed by Amazon Web Services Secrets Manager for the master user password. This setting is valid only if the master user password is managed by RDS in Amazon Web Services Secrets Manager for the DB cluster. The secret value contains the updated password. For more information, see Password management with Amazon Web Services Secrets Manager in the Amazon RDS User Guide. Constraints: You must apply the change immediately when rotating the master user password. /// - storageThroughput: The storage throughput value for the DB instance. This setting applies only to the gp3 storage type. This setting doesn't apply to Amazon Aurora or RDS Custom DB instances. - /// - storageType: The storage type to associate with the DB instance. If you specify io1, io2, or gp3 you must also include a value for the Iops parameter. If you choose to migrate your DB instance from using standard storage to using Provisioned IOPS, or from using Provisioned IOPS to using standard storage, the process can take time. The duration of the migration depends on several factors such as database load, storage size, storage type (standard or Provisioned IOPS), amount of IOPS provisioned (if any), and the number of prior scale storage operations. Typical migration times are under 24 hours, but the process can take up to several days in some cases. During the migration, the DB instance is available for use, but might experience performance degradation. While the migration takes place, nightly backups for the instance are suspended. No other Amazon RDS operations can take place for the instance, including modifying the instance, rebooting the instance, deleting the instance, creating a read replica for the instance, and creating a DB snapshot of the instance. Valid Values: gp2 | gp3 | io1 | io2 | standard Default: io1, if the Iops parameter is specified. Otherwise, gp2. + /// - storageType: The storage type to associate with the DB instance. If you specify io1, io2, or gp3 you must also include a value for the Iops parameter. If you choose to migrate your DB instance from using standard storage to gp2 (General Purpose SSD), gp3, or Provisioned IOPS (io1), or from these storage types to standard storage, the process can take time. The duration of the migration depends on several factors such as database load, storage size, storage type (standard or Provisioned IOPS), amount of IOPS provisioned (if any), and the number of prior scale storage operations. Typical migration times are under 24 hours, but the process can take up to several days in some cases. During the migration, the DB instance is available for use, but might experience performance degradation. While the migration takes place, nightly backups for the instance are suspended. No other Amazon RDS operations can take place for the instance, including modifying the instance, rebooting the instance, deleting the instance, creating a read replica for the instance, and creating a DB snapshot of the instance. Valid Values: gp2 | gp3 | io1 | io2 | standard Default: io1, if the Iops parameter is specified. Otherwise, gp2. /// - tdeCredentialArn: The ARN from the key store with which to associate the instance for TDE encryption. This setting doesn't apply to RDS Custom DB instances. /// - tdeCredentialPassword: The password for the given ARN from the key store in order to access the device. This setting doesn't apply to RDS Custom DB instances. /// - useDefaultProcessorFeatures: Specifies whether the DB instance class of the DB instance uses its default processor features. This setting doesn't apply to RDS Custom DB instances. @@ -6584,7 +6584,7 @@ public struct RDS: AWSService { /// - enableIAMDatabaseAuthentication: Specifies whether to enable mapping of Amazon Web Services Identity and Access Management (IAM) accounts to database accounts. By default, mapping isn't enabled. For more information, see IAM Database Authentication in the Amazon Aurora User Guide or IAM database authentication for MariaDB, MySQL, and PostgreSQL in the Amazon RDS User Guide. Valid for: Aurora DB clusters and Multi-AZ DB clusters /// - enablePerformanceInsights: Specifies whether to turn on Performance Insights for the DB cluster. /// - engineLifecycleSupport: The life cycle type for this DB cluster. By default, this value is set to open-source-rds-extended-support, which enrolls your DB cluster into Amazon RDS Extended Support. At the end of standard support, you can avoid charges for Extended Support by setting the value to open-source-rds-extended-support-disabled. In this case, RDS automatically upgrades your restored DB cluster to a higher engine version, if the major engine version is past its end of standard support date. You can use this setting to enroll your DB cluster into Amazon RDS Extended Support. With RDS Extended Support, you can run the selected major engine version on your DB cluster past the end of standard support for that engine version. For more information, see the following sections: Amazon Aurora - Using Amazon RDS Extended Support in the Amazon Aurora User Guide Amazon RDS - Using Amazon RDS Extended Support in the Amazon RDS User Guide Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters Valid Values: open-source-rds-extended-support | open-source-rds-extended-support-disabled Default: open-source-rds-extended-support - /// - engineMode: The engine mode of the new cluster. Specify provisioned or serverless, depending on the type of the cluster you are creating. You can create an Aurora Serverless v1 clone from a provisioned cluster, or a provisioned clone from an Aurora Serverless v1 cluster. To create a clone that is an Aurora Serverless v1 cluster, the original cluster must be an Aurora Serverless v1 cluster or an encrypted provisioned cluster. Valid for: Aurora DB clusters only + /// - engineMode: The engine mode of the new cluster. Specify provisioned or serverless, depending on the type of the cluster you are creating. You can create an Aurora Serverless v1 clone from a provisioned cluster, or a provisioned clone from an Aurora Serverless v1 cluster. To create a clone that is an Aurora Serverless v1 cluster, the original cluster must be an Aurora Serverless v1 cluster or an encrypted provisioned cluster. To create a full copy that is an Aurora Serverless v1 cluster, specify the engine mode serverless. Valid for: Aurora DB clusters only /// - iops: The amount of Provisioned IOPS (input/output operations per second) to be initially allocated for each DB instance in the Multi-AZ DB cluster. For information about valid IOPS values, see Amazon RDS Provisioned IOPS storage in the Amazon RDS User Guide. Constraints: Must be a multiple between .5 and 50 of the storage amount for the DB instance. Valid for: Multi-AZ DB clusters only /// - kmsKeyId: The Amazon Web Services KMS key identifier to use when restoring an encrypted DB cluster from an encrypted DB cluster. The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key. To use a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN. You can restore to a new DB cluster and encrypt the new DB cluster with a KMS key that is different from the KMS key used to encrypt the source DB cluster. The new DB cluster is encrypted with the KMS key identified by the KmsKeyId parameter. If you don't specify a value for the KmsKeyId parameter, then the following occurs: If the DB cluster is encrypted, then the restored DB cluster is encrypted using the KMS key that was used to encrypt the source DB cluster. If the DB cluster isn't encrypted, then the restored DB cluster isn't encrypted. If DBClusterIdentifier refers to a DB cluster that isn't encrypted, then the restore request is rejected. Valid for: Aurora DB clusters and Multi-AZ DB clusters /// - monitoringInterval: The interval, in seconds, between points when Enhanced Monitoring metrics are collected for the DB cluster. To turn off collecting Enhanced Monitoring metrics, specify 0. If MonitoringRoleArn is specified, also set MonitoringInterval to a value other than 0. Valid Values: 0 | 1 | 5 | 10 | 15 | 30 | 60 Default: 0 @@ -6702,7 +6702,7 @@ public struct RDS: AWSService { /// Creates a new DB instance from a DB snapshot. The target database is created from the source database restore point with most of the source's original configuration, including the default security group and DB parameter group. By default, the new DB instance is created as a Single-AZ deployment, except when the instance is a SQL Server instance that has an option group associated with mirroring. In this case, the instance becomes a Multi-AZ deployment, not a Single-AZ deployment. If you want to replace your original DB instance with the new, restored DB instance, then rename your original DB instance before you call the RestoreDBInstanceFromDBSnapshot operation. RDS doesn't allow two DB instances with the same name. After you have renamed your original DB instance with a different identifier, then you can pass the original name of the DB instance as the DBInstanceIdentifier in the call to the RestoreDBInstanceFromDBSnapshot operation. The result is that you replace the original DB instance with the DB instance created from the snapshot. If you are restoring from a shared manual DB snapshot, the DBSnapshotIdentifier must be the ARN of the shared DB snapshot. To restore from a DB snapshot with an unsupported engine version, you must first upgrade the engine version of the snapshot. For more information about upgrading a RDS for MySQL DB snapshot engine version, see Upgrading a MySQL DB snapshot engine version. For more information about upgrading a RDS for PostgreSQL DB snapshot engine version, Upgrading a PostgreSQL DB snapshot engine version. This command doesn't apply to Aurora MySQL and Aurora PostgreSQL. For Aurora, use RestoreDBClusterFromSnapshot. /// /// Parameters: - /// - allocatedStorage: The amount of storage (in gibibytes) to allocate initially for the DB instance. Follow the allocation rules specified in CreateDBInstance. Be sure to allocate enough storage for your new DB instance so that the restore operation can succeed. You can also allocate additional storage for future growth. + /// - allocatedStorage: The amount of storage (in gibibytes) to allocate initially for the DB instance. Follow the allocation rules specified in CreateDBInstance. This setting isn't valid for RDS for SQL Server. Be sure to allocate enough storage for your new DB instance so that the restore operation can succeed. You can also allocate additional storage for future growth. /// - autoMinorVersionUpgrade: Specifies whether to automatically apply minor version upgrades to the DB instance during the maintenance window. If you restore an RDS Custom DB instance, you must disable this parameter. /// - availabilityZone: The Availability Zone (AZ) where the DB instance will be created. Default: A random, system-chosen Availability Zone. Constraint: You can't specify the AvailabilityZone parameter if the DB instance is a Multi-AZ deployment. Example: us-east-1a /// - backupTarget: Specifies where automated backups and manual snapshots are stored for the restored DB instance. Possible values are outposts (Amazon Web Services Outposts) and region (Amazon Web Services Region). The default is region. For more information, see Working with Amazon RDS on Amazon Web Services Outposts in the Amazon RDS User Guide. @@ -6854,13 +6854,13 @@ public struct RDS: AWSService { /// Amazon Relational Database Service (Amazon RDS) supports importing MySQL databases by using backup files. You can create a backup of your on-premises database, store it on Amazon Simple Storage Service (Amazon S3), and then restore the backup file onto a new Amazon RDS DB instance running MySQL. For more information, see Importing Data into an Amazon RDS MySQL DB Instance in the Amazon RDS User Guide. This operation doesn't apply to RDS Custom. /// /// Parameters: - /// - allocatedStorage: The amount of storage (in gibibytes) to allocate initially for the DB instance. Follow the allocation rules specified in CreateDBInstance. Be sure to allocate enough storage for your new DB instance so that the restore operation can succeed. You can also allocate additional storage for future growth. + /// - allocatedStorage: The amount of storage (in gibibytes) to allocate initially for the DB instance. Follow the allocation rules specified in CreateDBInstance. This setting isn't valid for RDS for SQL Server. Be sure to allocate enough storage for your new DB instance so that the restore operation can succeed. You can also allocate additional storage for future growth. /// - autoMinorVersionUpgrade: Specifies whether to automatically apply minor engine upgrades to the DB instance during the maintenance window. By default, minor engine upgrades are not applied automatically. /// - availabilityZone: The Availability Zone that the DB instance is created in. For information about Amazon Web Services Regions and Availability Zones, see Regions and Availability Zones in the Amazon RDS User Guide. Default: A random, system-chosen Availability Zone in the endpoint's Amazon Web Services Region. Example: us-east-1d Constraint: The AvailabilityZone parameter can't be specified if the DB instance is a Multi-AZ deployment. The specified Availability Zone must be in the same Amazon Web Services Region as the current endpoint. /// - backupRetentionPeriod: The number of days for which automated backups are retained. Setting this parameter to a positive number enables backups. For more information, see CreateDBInstance. /// - caCertificateIdentifier: The CA certificate identifier to use for the DB instance's server certificate. This setting doesn't apply to RDS Custom DB instances. For more information, see Using SSL/TLS to encrypt a connection to a DB instance in the Amazon RDS User Guide and Using SSL/TLS to encrypt a connection to a DB cluster in the Amazon Aurora User Guide. /// - copyTagsToSnapshot: Specifies whether to copy all tags from the DB instance to snapshots of the DB instance. By default, tags are not copied. - /// - databaseInsightsMode: Specifies the mode of Database Insights to enable for the instance. + /// - databaseInsightsMode: Specifies the mode of Database Insights to enable for the DB instance. This setting only applies to Amazon Aurora DB instances. Currently, this value is inherited from the DB cluster and can't be changed. /// - dbInstanceClass: The compute and memory capacity of the DB instance, for example db.m4.large. Not all DB instance classes are available in all Amazon Web Services Regions, or for all database engines. For the full list of DB instance classes, and availability for your engine, see DB Instance Class in the Amazon RDS User Guide. Importing from Amazon S3 isn't supported on the db.t2.micro DB instance class. /// - dbInstanceIdentifier: The DB instance identifier. This parameter is stored as a lowercase string. Constraints: Must contain from 1 to 63 letters, numbers, or hyphens. First character must be a letter. Can't end with a hyphen or contain two consecutive hyphens. Example: mydbinstance /// - dbName: The name of the database to create when the DB instance is created. Follow the naming rules specified in CreateDBInstance. @@ -7036,7 +7036,7 @@ public struct RDS: AWSService { /// Restores a DB instance to an arbitrary point in time. You can restore to any point in time before the time identified by the LatestRestorableTime property. You can restore to a point up to the number of days specified by the BackupRetentionPeriod property. The target database is created with most of the original configuration, but in a system-selected Availability Zone, with the default security group, the default subnet group, and the default DB parameter group. By default, the new DB instance is created as a single-AZ deployment except when the instance is a SQL Server instance that has an option group that is associated with mirroring; in this case, the instance becomes a mirrored deployment and not a single-AZ deployment. This operation doesn't apply to Aurora MySQL and Aurora PostgreSQL. For Aurora, use RestoreDBClusterToPointInTime. /// /// Parameters: - /// - allocatedStorage: The amount of storage (in gibibytes) to allocate initially for the DB instance. Follow the allocation rules specified in CreateDBInstance. Be sure to allocate enough storage for your new DB instance so that the restore operation can succeed. You can also allocate additional storage for future growth. + /// - allocatedStorage: The amount of storage (in gibibytes) to allocate initially for the DB instance. Follow the allocation rules specified in CreateDBInstance. This setting isn't valid for RDS for SQL Server. Be sure to allocate enough storage for your new DB instance so that the restore operation can succeed. You can also allocate additional storage for future growth. /// - autoMinorVersionUpgrade: Specifies whether minor version upgrades are applied automatically to the DB instance during the maintenance window. This setting doesn't apply to RDS Custom. /// - availabilityZone: The Availability Zone (AZ) where the DB instance will be created. Default: A random, system-chosen Availability Zone. Constraints: You can't specify the AvailabilityZone parameter if the DB instance is a Multi-AZ deployment. Example: us-east-1a /// - backupTarget: The location for storing automated backups and manual snapshots for the restored DB instance. Valid Values: outposts (Amazon Web Services Outposts) region (Amazon Web Services Region) Default: region For more information, see Working with Amazon RDS on Amazon Web Services Outposts in the Amazon RDS User Guide. @@ -7909,9 +7909,9 @@ extension RDS { /// /// - Parameters: /// - dbClusterParameterGroupName: The name of a specific DB cluster parameter group to return parameter details for. Constraints: If supplied, must match the name of an existing DBClusterParameterGroup. - /// - filters: This parameter isn't currently supported. + /// - filters: A filter that specifies one or more DB cluster parameters to describe. The only supported filter is parameter-name. The results list only includes information about the DB cluster parameters with these names. /// - maxRecords: The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so you can retrieve the remaining results. Default: 100 Constraints: Minimum 20, maximum 100. - /// - source: A specific source to return parameters for. Valid Values: customer engine service + /// - source: A specific source to return parameters for. Valid Values: engine-default system user /// - logger: Logger used for logging @inlinable public func describeDBClusterParametersPaginator( @@ -8283,7 +8283,7 @@ extension RDS { /// /// - Parameters: /// - dbParameterGroupName: The name of a specific DB parameter group to return details for. Constraints: If supplied, must match the name of an existing DBParameterGroup. - /// - filters: This parameter isn't currently supported. + /// - filters: A filter that specifies one or more DB parameters to describe. The only supported filter is parameter-name. The results list only includes information about the DB parameters with these names. /// - maxRecords: The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that you can retrieve the remaining results. Default: 100 Constraints: Minimum 20, maximum 100. /// - source: The parameter types to return. Default: All parameter types returned Valid Values: user | system | engine-default /// - logger: Logger used for logging @@ -8725,7 +8725,7 @@ extension RDS { /// /// - Parameters: /// - dbParameterGroupFamily: The name of the DB parameter group family. Valid Values: aurora-mysql5.7 aurora-mysql8.0 aurora-postgresql10 aurora-postgresql11 aurora-postgresql12 aurora-postgresql13 aurora-postgresql14 custom-oracle-ee-19 custom-oracle-ee-cdb-19 db2-ae db2-se mariadb10.2 mariadb10.3 mariadb10.4 mariadb10.5 mariadb10.6 mysql5.7 mysql8.0 oracle-ee-19 oracle-ee-cdb-19 oracle-ee-cdb-21 oracle-se2-19 oracle-se2-cdb-19 oracle-se2-cdb-21 postgres10 postgres11 postgres12 postgres13 postgres14 sqlserver-ee-11.0 sqlserver-ee-12.0 sqlserver-ee-13.0 sqlserver-ee-14.0 sqlserver-ee-15.0 sqlserver-ex-11.0 sqlserver-ex-12.0 sqlserver-ex-13.0 sqlserver-ex-14.0 sqlserver-ex-15.0 sqlserver-se-11.0 sqlserver-se-12.0 sqlserver-se-13.0 sqlserver-se-14.0 sqlserver-se-15.0 sqlserver-web-11.0 sqlserver-web-12.0 sqlserver-web-13.0 sqlserver-web-14.0 sqlserver-web-15.0 - /// - filters: This parameter isn't currently supported. + /// - filters: A filter that specifies one or more parameters to describe. The only supported filter is parameter-name. The results list only includes information about the parameters with these names. /// - maxRecords: The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so you can retrieve the remaining results. Default: 100 Constraints: Minimum 20, maximum 100. /// - logger: Logger used for logging @inlinable diff --git a/Sources/Soto/Services/RDS/RDS_shapes.swift b/Sources/Soto/Services/RDS/RDS_shapes.swift index 64b8daa7fa..ff9138429a 100644 --- a/Sources/Soto/Services/RDS/RDS_shapes.swift +++ b/Sources/Soto/Services/RDS/RDS_shapes.swift @@ -722,10 +722,10 @@ extension RDS { } public struct CloudwatchLogsExportConfiguration: AWSEncodableShape { - /// The list of log types to disable. + /// The list of log types to disable. The following values are valid for each DB engine: Aurora MySQL - audit | error | general | slowquery Aurora PostgreSQL - postgresql RDS for MySQL - error | general | slowquery RDS for PostgreSQL - postgresql | upgrade @OptionalCustomCoding> public var disableLogTypes: [String]? - /// The list of log types to enable. + /// The list of log types to enable. The following values are valid for each DB engine: Aurora MySQL - audit | error | general | slowquery Aurora PostgreSQL - postgresql RDS for MySQL - error | general | slowquery RDS for PostgreSQL - postgresql | upgrade @OptionalCustomCoding> public var enableLogTypes: [String]? @@ -1333,7 +1333,7 @@ extension RDS { /// The amount of storage in gibibytes (GiB) to allocate to each DB instance in the Multi-AZ DB cluster. Valid for Cluster Type: Multi-AZ DB clusters only This setting is required to create a Multi-AZ DB cluster. public let allocatedStorage: Int? - /// Specifies whether minor engine upgrades are applied automatically to the DB cluster during the maintenance window. By default, minor engine upgrades are applied automatically. Valid for Cluster Type: Multi-AZ DB clusters only + /// Specifies whether minor engine upgrades are applied automatically to the DB cluster during the maintenance window. By default, minor engine upgrades are applied automatically. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB cluster public let autoMinorVersionUpgrade: Bool? /// A list of Availability Zones (AZs) where you specifically want to create DB instances in the DB cluster. For information on AZs, see Availability Zones in the Amazon Aurora User Guide. Valid for Cluster Type: Aurora DB clusters only Constraints: Can't specify more than three AZs. @OptionalCustomCoding> @@ -1350,7 +1350,7 @@ extension RDS { public let clusterScalabilityType: ClusterScalabilityType? /// Specifies whether to copy all tags from the DB cluster to snapshots of the DB cluster. The default is not to copy them. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters public let copyTagsToSnapshot: Bool? - /// Specifies the mode of Database Insights to enable for the cluster. + /// The mode of Database Insights to enable for the DB cluster. If you set this value to advanced, you must also set the PerformanceInsightsEnabled parameter to true and the PerformanceInsightsRetentionPeriod parameter to 465. Valid for Cluster Type: Aurora DB clusters only public let databaseInsightsMode: DatabaseInsightsMode? /// The name for your database of up to 64 alphanumeric characters. A database named postgres is always created. If this parameter is specified, an additional database with this name is created. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters public let databaseName: String? @@ -1375,7 +1375,7 @@ extension RDS { public var enableCloudwatchLogsExports: [String]? /// Specifies whether to enable this DB cluster to forward write operations to the primary cluster of a global cluster (Aurora global database). By default, write operations are not allowed on Aurora DB clusters that are secondary clusters in an Aurora global database. You can set this value only on Aurora DB clusters that are members of an Aurora global database. With this parameter enabled, a secondary cluster can forward writes to the current primary cluster, and the resulting changes are replicated back to this cluster. For the primary DB cluster of an Aurora global database, this value is used immediately if the primary is demoted by a global cluster API operation, but it does nothing until then. Valid for Cluster Type: Aurora DB clusters only public let enableGlobalWriteForwarding: Bool? - /// Specifies whether to enable the HTTP endpoint for the DB cluster. By default, the HTTP endpoint isn't enabled. When enabled, the HTTP endpoint provides a connectionless web service API (RDS Data API) for running SQL queries on the DB cluster. You can also query your database from inside the RDS console with the RDS query editor. RDS Data API is supported with the following DB clusters: Aurora PostgreSQL Serverless v2 and provisioned Aurora PostgreSQL and Aurora MySQL Serverless v1 For more information, see Using RDS Data API in the Amazon Aurora User Guide. Valid for Cluster Type: Aurora DB clusters only + /// Specifies whether to enable the HTTP endpoint for the DB cluster. By default, the HTTP endpoint isn't enabled. When enabled, the HTTP endpoint provides a connectionless web service API (RDS Data API) for running SQL queries on the DB cluster. You can also query your database from inside the RDS console with the RDS query editor. For more information, see Using RDS Data API in the Amazon Aurora User Guide. Valid for Cluster Type: Aurora DB clusters only public let enableHttpEndpoint: Bool? /// Specifies whether to enable mapping of Amazon Web Services Identity and Access Management (IAM) accounts to database accounts. By default, mapping isn't enabled. For more information, see IAM Database Authentication in the Amazon Aurora User Guide or IAM database authentication for MariaDB, MySQL, and PostgreSQL in the Amazon RDS User Guide. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters public let enableIAMDatabaseAuthentication: Bool? @@ -1383,7 +1383,7 @@ extension RDS { public let enableLimitlessDatabase: Bool? /// Specifies whether read replicas can forward write operations to the writer DB instance in the DB cluster. By default, write operations aren't allowed on reader DB instances. Valid for: Aurora DB clusters only public let enableLocalWriteForwarding: Bool? - /// Specifies whether to turn on Performance Insights for the DB cluster. For more information, see Using Amazon Performance Insights in the Amazon RDS User Guide. Valid for Cluster Type: Multi-AZ DB clusters only + /// Specifies whether to turn on Performance Insights for the DB cluster. For more information, see Using Amazon Performance Insights in the Amazon RDS User Guide. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters public let enablePerformanceInsights: Bool? /// The database engine to use for this DB cluster. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters Valid Values: aurora-mysql aurora-postgresql mysql postgres neptune - For information about using Amazon Neptune, see the Amazon Neptune User Guide . public let engine: String? @@ -1407,17 +1407,17 @@ extension RDS { public let masterUserPassword: String? /// The Amazon Web Services KMS key identifier to encrypt a secret that is automatically generated and managed in Amazon Web Services Secrets Manager. This setting is valid only if the master user password is managed by RDS in Amazon Web Services Secrets Manager for the DB cluster. The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key. To use a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN. If you don't specify MasterUserSecretKmsKeyId, then the aws/secretsmanager KMS key is used to encrypt the secret. If the secret is in a different Amazon Web Services account, then you can't use the aws/secretsmanager KMS key to encrypt the secret, and you must use a customer managed KMS key. There is a default KMS key for your Amazon Web Services account. Your Amazon Web Services account has a different default KMS key for each Amazon Web Services Region. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters public let masterUserSecretKmsKeyId: String? - /// The interval, in seconds, between points when Enhanced Monitoring metrics are collected for the DB cluster. To turn off collecting Enhanced Monitoring metrics, specify 0. If MonitoringRoleArn is specified, also set MonitoringInterval to a value other than 0. Valid for Cluster Type: Multi-AZ DB clusters only Valid Values: 0 | 1 | 5 | 10 | 15 | 30 | 60 Default: 0 + /// The interval, in seconds, between points when Enhanced Monitoring metrics are collected for the DB cluster. To turn off collecting Enhanced Monitoring metrics, specify 0. If MonitoringRoleArn is specified, also set MonitoringInterval to a value other than 0. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters Valid Values: 0 | 1 | 5 | 10 | 15 | 30 | 60 Default: 0 public let monitoringInterval: Int? - /// The Amazon Resource Name (ARN) for the IAM role that permits RDS to send Enhanced Monitoring metrics to Amazon CloudWatch Logs. An example is arn:aws:iam:123456789012:role/emaccess. For information on creating a monitoring role, see Setting up and enabling Enhanced Monitoring in the Amazon RDS User Guide. If MonitoringInterval is set to a value other than 0, supply a MonitoringRoleArn value. Valid for Cluster Type: Multi-AZ DB clusters only + /// The Amazon Resource Name (ARN) for the IAM role that permits RDS to send Enhanced Monitoring metrics to Amazon CloudWatch Logs. An example is arn:aws:iam:123456789012:role/emaccess. For information on creating a monitoring role, see Setting up and enabling Enhanced Monitoring in the Amazon RDS User Guide. If MonitoringInterval is set to a value other than 0, supply a MonitoringRoleArn value. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters public let monitoringRoleArn: String? /// The network type of the DB cluster. The network type is determined by the DBSubnetGroup specified for the DB cluster. A DBSubnetGroup can support only the IPv4 protocol or the IPv4 and the IPv6 protocols (DUAL). For more information, see Working with a DB instance in a VPC in the Amazon Aurora User Guide. Valid for Cluster Type: Aurora DB clusters only Valid Values: IPV4 | DUAL public let networkType: String? /// The option group to associate the DB cluster with. DB clusters are associated with a default option group that can't be modified. public let optionGroupName: String? - /// The Amazon Web Services KMS key identifier for encryption of Performance Insights data. The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key. If you don't specify a value for PerformanceInsightsKMSKeyId, then Amazon RDS uses your default KMS key. There is a default KMS key for your Amazon Web Services account. Your Amazon Web Services account has a different default KMS key for each Amazon Web Services Region. Valid for Cluster Type: Multi-AZ DB clusters only + /// The Amazon Web Services KMS key identifier for encryption of Performance Insights data. The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key. If you don't specify a value for PerformanceInsightsKMSKeyId, then Amazon RDS uses your default KMS key. There is a default KMS key for your Amazon Web Services account. Your Amazon Web Services account has a different default KMS key for each Amazon Web Services Region. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters public let performanceInsightsKMSKeyId: String? - /// The number of days to retain Performance Insights data. Valid for Cluster Type: Multi-AZ DB clusters only Valid Values: 7 month * 31, where month is a number of months from 1-23. Examples: 93 (3 months * 31), 341 (11 months * 31), 589 (19 months * 31) 731 Default: 7 days If you specify a retention period that isn't valid, such as 94, Amazon RDS issues an error. + /// The number of days to retain Performance Insights data. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters Valid Values: 7 month * 31, where month is a number of months from 1-23. Examples: 93 (3 months * 31), 341 (11 months * 31), 589 (19 months * 31) 731 Default: 7 days If you specify a retention period that isn't valid, such as 94, Amazon RDS issues an error. public let performanceInsightsRetentionPeriod: Int? /// The port number on which the instances in the DB cluster accept connections. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters Valid Values: 1150-65535 Default: RDS for MySQL and Aurora MySQL - 3306 RDS for PostgreSQL and Aurora PostgreSQL - 5432 public let port: Int? @@ -1684,7 +1684,7 @@ extension RDS { public let copyTagsToSnapshot: Bool? /// The instance profile associated with the underlying Amazon EC2 instance of an RDS Custom DB instance. This setting is required for RDS Custom. Constraints: The profile must exist in your account. The profile must have an IAM role that Amazon EC2 has permissions to assume. The instance profile name and the associated IAM role name must start with the prefix AWSRDSCustom. For the list of permissions required for the IAM role, see Configure IAM and your VPC in the Amazon RDS User Guide. public let customIamInstanceProfile: String? - /// Specifies the mode of Database Insights to enable for the instance. + /// The mode of Database Insights to enable for the DB instance. This setting only applies to Amazon Aurora DB instances. Currently, this value is inherited from the DB cluster and can't be changed. public let databaseInsightsMode: DatabaseInsightsMode? /// The identifier of the DB cluster that this DB instance will belong to. This setting doesn't apply to RDS Custom DB instances. public let dbClusterIdentifier: String? @@ -1942,7 +1942,7 @@ extension RDS { public struct _TagsEncoding: ArrayCoderProperties { public static let member = "Tag" } public struct _VpcSecurityGroupIdsEncoding: ArrayCoderProperties { public static let member = "VpcSecurityGroupId" } - /// The amount of storage (in gibibytes) to allocate initially for the read replica. Follow the allocation rules specified in CreateDBInstance. Be sure to allocate enough storage for your read replica so that the create operation can succeed. You can also allocate additional storage for future growth. + /// The amount of storage (in gibibytes) to allocate initially for the read replica. Follow the allocation rules specified in CreateDBInstance. This setting isn't valid for RDS for SQL Server. Be sure to allocate enough storage for your read replica so that the create operation can succeed. You can also allocate additional storage for future growth. public let allocatedStorage: Int? /// Specifies whether to automatically apply minor engine upgrades to the read replica during the maintenance window. This setting doesn't apply to RDS Custom DB instances. Default: Inherits the value from the source DB instance. public let autoMinorVersionUpgrade: Bool? @@ -1954,7 +1954,7 @@ extension RDS { public let copyTagsToSnapshot: Bool? /// The instance profile associated with the underlying Amazon EC2 instance of an RDS Custom DB instance. The instance profile must meet the following requirements: The profile must exist in your account. The profile must have an IAM role that Amazon EC2 has permissions to assume. The instance profile name and the associated IAM role name must start with the prefix AWSRDSCustom. For the list of permissions required for the IAM role, see Configure IAM and your VPC in the Amazon RDS User Guide. This setting is required for RDS Custom DB instances. public let customIamInstanceProfile: String? - /// Specifies the mode of Database Insights. + /// The mode of Database Insights to enable for the read replica. Currently, this setting is not supported. public let databaseInsightsMode: DatabaseInsightsMode? /// The compute and memory capacity of the read replica, for example db.m4.large. Not all DB instance classes are available in all Amazon Web Services Regions, or for all database engines. For the full list of DB instance classes, and availability for your engine, see DB Instance Class in the Amazon RDS User Guide. Default: Inherits the value from the source DB instance. public let dbInstanceClass: String? @@ -2820,7 +2820,7 @@ extension RDS { public var associatedRoles: [DBClusterRole]? /// The time when a stopped DB cluster is restarted automatically. public let automaticRestartTime: Date? - /// Indicates whether minor version patches are applied automatically. This setting is only for non-Aurora Multi-AZ DB clusters. + /// Indicates whether minor version patches are applied automatically. This setting is for Aurora DB clusters and Multi-AZ DB clusters. public let autoMinorVersionUpgrade: Bool? /// The list of Availability Zones (AZs) where instances in the DB cluster can be created. @OptionalCustomCoding> @@ -2851,7 +2851,7 @@ extension RDS { /// The custom endpoints associated with the DB cluster. @OptionalCustomCoding> public var customEndpoints: [String]? - /// The mode of Database Insights that is enabled for the cluster. + /// The mode of Database Insights that is enabled for the DB cluster. public let databaseInsightsMode: DatabaseInsightsMode? /// The name of the initial database that was specified for the DB cluster when it was created, if one was provided. This same name is returned for the life of the DB cluster. public let databaseName: String? @@ -2923,9 +2923,9 @@ extension RDS { public let masterUsername: String? /// The secret managed by RDS in Amazon Web Services Secrets Manager for the master user password. For more information, see Password management with Amazon Web Services Secrets Manager in the Amazon RDS User Guide and Password management with Amazon Web Services Secrets Manager in the Amazon Aurora User Guide. public let masterUserSecret: MasterUserSecret? - /// The interval, in seconds, between points when Enhanced Monitoring metrics are collected for the DB cluster. This setting is only for non-Aurora Multi-AZ DB clusters. + /// The interval, in seconds, between points when Enhanced Monitoring metrics are collected for the DB cluster. This setting is only for -Aurora DB clusters and Multi-AZ DB clusters. public let monitoringInterval: Int? - /// The ARN for the IAM role that permits RDS to send Enhanced Monitoring metrics to Amazon CloudWatch Logs. This setting is only for non-Aurora Multi-AZ DB clusters. + /// The ARN for the IAM role that permits RDS to send Enhanced Monitoring metrics to Amazon CloudWatch Logs. This setting is only for Aurora DB clusters and Multi-AZ DB clusters. public let monitoringRoleArn: String? /// Indicates whether the DB cluster has instances in multiple Availability Zones. public let multiAZ: Bool? @@ -2935,11 +2935,11 @@ extension RDS { public let pendingModifiedValues: ClusterPendingModifiedValues? /// The progress of the operation as a percentage. public let percentProgress: String? - /// Indicates whether Performance Insights is enabled for the DB cluster. This setting is only for non-Aurora Multi-AZ DB clusters. + /// Indicates whether Performance Insights is enabled for the DB cluster. This setting is only for Aurora DB clusters and Multi-AZ DB clusters. public let performanceInsightsEnabled: Bool? - /// The Amazon Web Services KMS key identifier for encryption of Performance Insights data. The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key. This setting is only for non-Aurora Multi-AZ DB clusters. + /// The Amazon Web Services KMS key identifier for encryption of Performance Insights data. The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key. This setting is only for Aurora DB clusters and Multi-AZ DB clusters. public let performanceInsightsKMSKeyId: String? - /// The number of days to retain Performance Insights data. This setting is only for non-Aurora Multi-AZ DB clusters. Valid Values: 7 month * 31, where month is a number of months from 1-23. Examples: 93 (3 months * 31), 341 (11 months * 31), 589 (19 months * 31) 731 Default: 7 days + /// The number of days to retain Performance Insights data. This setting is only for Aurora DB clusters and Multi-AZ DB clusters. Valid Values: 7 month * 31, where month is a number of months from 1-23. Examples: 93 (3 months * 31), 341 (11 months * 31), 589 (19 months * 31) 731 Default: 7 days public let performanceInsightsRetentionPeriod: Int? /// The port that the database engine is listening on. public let port: Int? @@ -6356,14 +6356,14 @@ extension RDS { /// The name of a specific DB cluster parameter group to return parameter details for. Constraints: If supplied, must match the name of an existing DBClusterParameterGroup. public let dbClusterParameterGroupName: String? - /// This parameter isn't currently supported. + /// A filter that specifies one or more DB cluster parameters to describe. The only supported filter is parameter-name. The results list only includes information about the DB cluster parameters with these names. @OptionalCustomCoding> public var filters: [Filter]? /// An optional pagination token provided by a previous DescribeDBClusterParameters request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords. public let marker: String? /// The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so you can retrieve the remaining results. Default: 100 Constraints: Minimum 20, maximum 100. public let maxRecords: Int? - /// A specific source to return parameters for. Valid Values: customer engine service + /// A specific source to return parameters for. Valid Values: engine-default system user public let source: String? @inlinable @@ -6730,7 +6730,7 @@ extension RDS { /// The name of a specific DB parameter group to return details for. Constraints: If supplied, must match the name of an existing DBParameterGroup. public let dbParameterGroupName: String? - /// This parameter isn't currently supported. + /// A filter that specifies one or more DB parameters to describe. The only supported filter is parameter-name. The results list only includes information about the DB parameters with these names. @OptionalCustomCoding> public var filters: [Filter]? /// An optional pagination token provided by a previous DescribeDBParameters request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords. @@ -7305,7 +7305,7 @@ extension RDS { /// The name of the DB parameter group family. Valid Values: aurora-mysql5.7 aurora-mysql8.0 aurora-postgresql10 aurora-postgresql11 aurora-postgresql12 aurora-postgresql13 aurora-postgresql14 custom-oracle-ee-19 custom-oracle-ee-cdb-19 db2-ae db2-se mariadb10.2 mariadb10.3 mariadb10.4 mariadb10.5 mariadb10.6 mysql5.7 mysql8.0 oracle-ee-19 oracle-ee-cdb-19 oracle-ee-cdb-21 oracle-se2-19 oracle-se2-cdb-19 oracle-se2-cdb-21 postgres10 postgres11 postgres12 postgres13 postgres14 sqlserver-ee-11.0 sqlserver-ee-12.0 sqlserver-ee-13.0 sqlserver-ee-14.0 sqlserver-ee-15.0 sqlserver-ex-11.0 sqlserver-ex-12.0 sqlserver-ex-13.0 sqlserver-ex-14.0 sqlserver-ex-15.0 sqlserver-se-11.0 sqlserver-se-12.0 sqlserver-se-13.0 sqlserver-se-14.0 sqlserver-se-15.0 sqlserver-web-11.0 sqlserver-web-12.0 sqlserver-web-13.0 sqlserver-web-14.0 sqlserver-web-15.0 public let dbParameterGroupFamily: String? - /// This parameter isn't currently supported. + /// A filter that specifies one or more parameters to describe. The only supported filter is parameter-name. The results list only includes information about the parameters with these names. @OptionalCustomCoding> public var filters: [Filter]? /// An optional pagination token provided by a previous DescribeEngineDefaultParameters request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords. @@ -9128,7 +9128,7 @@ extension RDS { public let allowMajorVersionUpgrade: Bool? /// Specifies whether the modifications in this request are asynchronously applied as soon as possible, regardless of the PreferredMaintenanceWindow setting for the DB cluster. If this parameter is disabled, changes to the DB cluster are applied during the next maintenance window. Most modifications can be applied immediately or during the next scheduled maintenance window. Some modifications, such as turning on deletion protection and changing the master password, are applied immediately—regardless of when you choose to apply them. By default, this parameter is disabled. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters public let applyImmediately: Bool? - /// Specifies whether minor engine upgrades are applied automatically to the DB cluster during the maintenance window. By default, minor engine upgrades are applied automatically. Valid for Cluster Type: Multi-AZ DB clusters only + /// Specifies whether minor engine upgrades are applied automatically to the DB cluster during the maintenance window. By default, minor engine upgrades are applied automatically. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters public let autoMinorVersionUpgrade: Bool? /// The Amazon Resource Name (ARN) of the recovery point in Amazon Web Services Backup. public let awsBackupRecoveryPointArn: String? @@ -9142,7 +9142,7 @@ extension RDS { public let cloudwatchLogsExportConfiguration: CloudwatchLogsExportConfiguration? /// Specifies whether to copy all tags from the DB cluster to snapshots of the DB cluster. The default is not to copy them. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters public let copyTagsToSnapshot: Bool? - /// Specifies the mode of Database Insights to enable for the cluster. + /// Specifies the mode of Database Insights to enable for the DB cluster. If you change the value from standard to advanced, you must set the PerformanceInsightsEnabled parameter to true and the PerformanceInsightsRetentionPeriod parameter to 465. If you change the value from advanced to standard, you must set the PerformanceInsightsEnabled parameter to false. Valid for Cluster Type: Aurora DB clusters only public let databaseInsightsMode: DatabaseInsightsMode? /// The DB cluster identifier for the cluster being modified. This parameter isn't case-sensitive. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters Constraints: Must match the identifier of an existing DB cluster. public let dbClusterIdentifier: String? @@ -9192,9 +9192,9 @@ extension RDS { public let newDBClusterIdentifier: String? /// The option group to associate the DB cluster with. DB clusters are associated with a default option group that can't be modified. public let optionGroupName: String? - /// The Amazon Web Services KMS key identifier for encryption of Performance Insights data. The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key. If you don't specify a value for PerformanceInsightsKMSKeyId, then Amazon RDS uses your default KMS key. There is a default KMS key for your Amazon Web Services account. Your Amazon Web Services account has a different default KMS key for each Amazon Web Services Region. Valid for Cluster Type: Multi-AZ DB clusters only + /// The Amazon Web Services KMS key identifier for encryption of Performance Insights data. The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key. If you don't specify a value for PerformanceInsightsKMSKeyId, then Amazon RDS uses your default KMS key. There is a default KMS key for your Amazon Web Services account. Your Amazon Web Services account has a different default KMS key for each Amazon Web Services Region. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters public let performanceInsightsKMSKeyId: String? - /// The number of days to retain Performance Insights data. Valid for Cluster Type: Multi-AZ DB clusters only Valid Values: 7 month * 31, where month is a number of months from 1-23. Examples: 93 (3 months * 31), 341 (11 months * 31), 589 (19 months * 31) 731 Default: 7 days If you specify a retention period that isn't valid, such as 94, Amazon RDS issues an error. + /// The number of days to retain Performance Insights data. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters Valid Values: 7 month * 31, where month is a number of months from 1-23. Examples: 93 (3 months * 31), 341 (11 months * 31), 589 (19 months * 31) 731 Default: 7 days If you specify a retention period that isn't valid, such as 94, Amazon RDS issues an error. public let performanceInsightsRetentionPeriod: Int? /// The port number on which the DB cluster accepts connections. Valid for Cluster Type: Aurora DB clusters only Valid Values: 1150-65535 Default: The same port as the original DB cluster. public let port: Int? @@ -9420,11 +9420,11 @@ extension RDS { public let caCertificateIdentifier: String? /// Specifies whether the DB instance is restarted when you rotate your SSL/TLS certificate. By default, the DB instance is restarted when you rotate your SSL/TLS certificate. The certificate is not updated until the DB instance is restarted. Set this parameter only if you are not using SSL/TLS to connect to the DB instance. If you are using SSL/TLS to connect to the DB instance, follow the appropriate instructions for your DB engine to rotate your SSL/TLS certificate: For more information about rotating your SSL/TLS certificate for RDS DB engines, see Rotating Your SSL/TLS Certificate. in the Amazon RDS User Guide. For more information about rotating your SSL/TLS certificate for Aurora DB engines, see Rotating Your SSL/TLS Certificate in the Amazon Aurora User Guide. This setting doesn't apply to RDS Custom DB instances. public let certificateRotationRestart: Bool? - /// The log types to be enabled for export to CloudWatch Logs for a specific DB instance. A change to the CloudwatchLogsExportConfiguration parameter is always applied to the DB instance immediately. Therefore, the ApplyImmediately parameter has no effect. This setting doesn't apply to RDS Custom DB instances. + /// The log types to be enabled for export to CloudWatch Logs for a specific DB instance. A change to the CloudwatchLogsExportConfiguration parameter is always applied to the DB instance immediately. Therefore, the ApplyImmediately parameter has no effect. This setting doesn't apply to RDS Custom DB instances. The following values are valid for each DB engine: Aurora MySQL - audit | error | general | slowquery Aurora PostgreSQL - postgresql RDS for MySQL - error | general | slowquery RDS for PostgreSQL - postgresql | upgrade For more information about exporting CloudWatch Logs for Amazon RDS, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon RDS User Guide. For more information about exporting CloudWatch Logs for Amazon Aurora, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon Aurora User Guide. public let cloudwatchLogsExportConfiguration: CloudwatchLogsExportConfiguration? /// Specifies whether to copy all tags from the DB instance to snapshots of the DB instance. By default, tags aren't copied. This setting doesn't apply to Amazon Aurora DB instances. Copying tags to snapshots is managed by the DB cluster. Setting this value for an Aurora DB instance has no effect on the DB cluster setting. For more information, see ModifyDBCluster. public let copyTagsToSnapshot: Bool? - /// Specifies the mode of Database Insights to enable for the instance. + /// Specifies the mode of Database Insights to enable for the DB instance. This setting only applies to Amazon Aurora DB instances. Currently, this value is inherited from the DB cluster and can't be changed. public let databaseInsightsMode: DatabaseInsightsMode? /// The new compute and memory capacity of the DB instance, for example db.m4.large. Not all DB instance classes are available in all Amazon Web Services Regions, or for all database engines. For the full list of DB instance classes, and availability for your engine, see DB Instance Class in the Amazon RDS User Guide or Aurora DB instance classes in the Amazon Aurora User Guide. For RDS Custom, see DB instance class support for RDS Custom for Oracle and DB instance class support for RDS Custom for SQL Server. If you modify the DB instance class, an outage occurs during the change. The change is applied during the next maintenance window, unless you specify ApplyImmediately in your request. Default: Uses existing setting Constraints: If you are modifying the DB instance class and upgrading the engine version at the same time, the currently running engine version must be supported on the specified DB instance class. Otherwise, the operation returns an error. In this case, first run the operation to upgrade the engine version, and then run it again to modify the DB instance class. public let dbInstanceClass: String? @@ -9468,7 +9468,7 @@ extension RDS { public let engine: String? /// The version number of the database engine to upgrade to. Changing this parameter results in an outage and the change is applied during the next maintenance window unless the ApplyImmediately parameter is enabled for this request. For major version upgrades, if a nondefault DB parameter group is currently in use, a new DB parameter group in the DB parameter group family for the new engine version must be specified. The new DB parameter group can be the default for that DB parameter group family. If you specify only a major version, Amazon RDS updates the DB instance to the default minor version if the current minor version is lower. For information about valid engine versions, see CreateDBInstance, or call DescribeDBEngineVersions. If the instance that you're modifying is acting as a read replica, the engine version that you specify must be the same or higher than the version that the source DB instance or cluster is running. In RDS Custom for Oracle, this parameter is supported for read replicas only if they are in the PATCH_DB_FAILURE lifecycle. Constraints: If you are upgrading the engine version and modifying the DB instance class at the same time, the currently running engine version must be supported on the specified DB instance class. Otherwise, the operation returns an error. In this case, first run the operation to upgrade the engine version, and then run it again to modify the DB instance class. public let engineVersion: String? - /// The new Provisioned IOPS (I/O operations per second) value for the RDS instance. Changing this setting doesn't result in an outage and the change is applied during the next maintenance window unless the ApplyImmediately parameter is enabled for this request. If you are migrating from Provisioned IOPS to standard storage, set this value to 0. The DB instance will require a reboot for the change in storage type to take effect. If you choose to migrate your DB instance from using standard storage to using Provisioned IOPS, or from using Provisioned IOPS to using standard storage, the process can take time. The duration of the migration depends on several factors such as database load, storage size, storage type (standard or Provisioned IOPS), amount of IOPS provisioned (if any), and the number of prior scale storage operations. Typical migration times are under 24 hours, but the process can take up to several days in some cases. During the migration, the DB instance is available for use, but might experience performance degradation. While the migration takes place, nightly backups for the instance are suspended. No other Amazon RDS operations can take place for the instance, including modifying the instance, rebooting the instance, deleting the instance, creating a read replica for the instance, and creating a DB snapshot of the instance. Constraints: For RDS for MariaDB, RDS for MySQL, RDS for Oracle, and RDS for PostgreSQL - The value supplied must be at least 10% greater than the current value. Values that are not at least 10% greater than the existing value are rounded up so that they are 10% greater than the current value. When you increase the Provisioned IOPS, you must also specify the AllocatedStorage parameter. You can use the current value for AllocatedStorage. Default: Uses existing setting + /// The new Provisioned IOPS (I/O operations per second) value for the RDS instance. Changing this setting doesn't result in an outage and the change is applied during the next maintenance window unless the ApplyImmediately parameter is enabled for this request. If you are migrating from Provisioned IOPS to standard storage, set this value to 0. The DB instance will require a reboot for the change in storage type to take effect. If you choose to migrate your DB instance from using standard storage to Provisioned IOPS (io1), or from Provisioned IOPS to standard storage, the process can take time. The duration of the migration depends on several factors such as database load, storage size, storage type (standard or Provisioned IOPS), amount of IOPS provisioned (if any), and the number of prior scale storage operations. Typical migration times are under 24 hours, but the process can take up to several days in some cases. During the migration, the DB instance is available for use, but might experience performance degradation. While the migration takes place, nightly backups for the instance are suspended. No other Amazon RDS operations can take place for the instance, including modifying the instance, rebooting the instance, deleting the instance, creating a read replica for the instance, and creating a DB snapshot of the instance. Constraints: For RDS for MariaDB, RDS for MySQL, RDS for Oracle, and RDS for PostgreSQL - The value supplied must be at least 10% greater than the current value. Values that are not at least 10% greater than the existing value are rounded up so that they are 10% greater than the current value. When you increase the Provisioned IOPS, you must also specify the AllocatedStorage parameter. You can use the current value for AllocatedStorage. Default: Uses existing setting public let iops: Int? /// The license model for the DB instance. This setting doesn't apply to Amazon Aurora or RDS Custom DB instances. Valid Values: RDS for Db2 - bring-your-own-license RDS for MariaDB - general-public-license RDS for Microsoft SQL Server - license-included RDS for MySQL - general-public-license RDS for Oracle - bring-your-own-license | license-included RDS for PostgreSQL - postgresql-license public let licenseModel: String? @@ -9517,7 +9517,7 @@ extension RDS { public let rotateMasterUserPassword: Bool? /// The storage throughput value for the DB instance. This setting applies only to the gp3 storage type. This setting doesn't apply to Amazon Aurora or RDS Custom DB instances. public let storageThroughput: Int? - /// The storage type to associate with the DB instance. If you specify io1, io2, or gp3 you must also include a value for the Iops parameter. If you choose to migrate your DB instance from using standard storage to using Provisioned IOPS, or from using Provisioned IOPS to using standard storage, the process can take time. The duration of the migration depends on several factors such as database load, storage size, storage type (standard or Provisioned IOPS), amount of IOPS provisioned (if any), and the number of prior scale storage operations. Typical migration times are under 24 hours, but the process can take up to several days in some cases. During the migration, the DB instance is available for use, but might experience performance degradation. While the migration takes place, nightly backups for the instance are suspended. No other Amazon RDS operations can take place for the instance, including modifying the instance, rebooting the instance, deleting the instance, creating a read replica for the instance, and creating a DB snapshot of the instance. Valid Values: gp2 | gp3 | io1 | io2 | standard Default: io1, if the Iops parameter is specified. Otherwise, gp2. + /// The storage type to associate with the DB instance. If you specify io1, io2, or gp3 you must also include a value for the Iops parameter. If you choose to migrate your DB instance from using standard storage to gp2 (General Purpose SSD), gp3, or Provisioned IOPS (io1), or from these storage types to standard storage, the process can take time. The duration of the migration depends on several factors such as database load, storage size, storage type (standard or Provisioned IOPS), amount of IOPS provisioned (if any), and the number of prior scale storage operations. Typical migration times are under 24 hours, but the process can take up to several days in some cases. During the migration, the DB instance is available for use, but might experience performance degradation. While the migration takes place, nightly backups for the instance are suspended. No other Amazon RDS operations can take place for the instance, including modifying the instance, rebooting the instance, deleting the instance, creating a read replica for the instance, and creating a DB snapshot of the instance. Valid Values: gp2 | gp3 | io1 | io2 | standard Default: io1, if the Iops parameter is specified. Otherwise, gp2. public let storageType: String? /// The ARN from the key store with which to associate the instance for TDE encryption. This setting doesn't apply to RDS Custom DB instances. public let tdeCredentialArn: String? @@ -12280,7 +12280,7 @@ extension RDS { public let enablePerformanceInsights: Bool? /// The life cycle type for this DB cluster. By default, this value is set to open-source-rds-extended-support, which enrolls your DB cluster into Amazon RDS Extended Support. At the end of standard support, you can avoid charges for Extended Support by setting the value to open-source-rds-extended-support-disabled. In this case, RDS automatically upgrades your restored DB cluster to a higher engine version, if the major engine version is past its end of standard support date. You can use this setting to enroll your DB cluster into Amazon RDS Extended Support. With RDS Extended Support, you can run the selected major engine version on your DB cluster past the end of standard support for that engine version. For more information, see the following sections: Amazon Aurora - Using Amazon RDS Extended Support in the Amazon Aurora User Guide Amazon RDS - Using Amazon RDS Extended Support in the Amazon RDS User Guide Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters Valid Values: open-source-rds-extended-support | open-source-rds-extended-support-disabled Default: open-source-rds-extended-support public let engineLifecycleSupport: String? - /// The engine mode of the new cluster. Specify provisioned or serverless, depending on the type of the cluster you are creating. You can create an Aurora Serverless v1 clone from a provisioned cluster, or a provisioned clone from an Aurora Serverless v1 cluster. To create a clone that is an Aurora Serverless v1 cluster, the original cluster must be an Aurora Serverless v1 cluster or an encrypted provisioned cluster. Valid for: Aurora DB clusters only + /// The engine mode of the new cluster. Specify provisioned or serverless, depending on the type of the cluster you are creating. You can create an Aurora Serverless v1 clone from a provisioned cluster, or a provisioned clone from an Aurora Serverless v1 cluster. To create a clone that is an Aurora Serverless v1 cluster, the original cluster must be an Aurora Serverless v1 cluster or an encrypted provisioned cluster. To create a full copy that is an Aurora Serverless v1 cluster, specify the engine mode serverless. Valid for: Aurora DB clusters only public let engineMode: String? /// The amount of Provisioned IOPS (input/output operations per second) to be initially allocated for each DB instance in the Multi-AZ DB cluster. For information about valid IOPS values, see Amazon RDS Provisioned IOPS storage in the Amazon RDS User Guide. Constraints: Must be a multiple between .5 and 50 of the storage amount for the DB instance. Valid for: Multi-AZ DB clusters only public let iops: Int? @@ -12421,7 +12421,7 @@ extension RDS { public struct _TagsEncoding: ArrayCoderProperties { public static let member = "Tag" } public struct _VpcSecurityGroupIdsEncoding: ArrayCoderProperties { public static let member = "VpcSecurityGroupId" } - /// The amount of storage (in gibibytes) to allocate initially for the DB instance. Follow the allocation rules specified in CreateDBInstance. Be sure to allocate enough storage for your new DB instance so that the restore operation can succeed. You can also allocate additional storage for future growth. + /// The amount of storage (in gibibytes) to allocate initially for the DB instance. Follow the allocation rules specified in CreateDBInstance. This setting isn't valid for RDS for SQL Server. Be sure to allocate enough storage for your new DB instance so that the restore operation can succeed. You can also allocate additional storage for future growth. public let allocatedStorage: Int? /// Specifies whether to automatically apply minor version upgrades to the DB instance during the maintenance window. If you restore an RDS Custom DB instance, you must disable this parameter. public let autoMinorVersionUpgrade: Bool? @@ -12621,7 +12621,7 @@ extension RDS { public struct _TagsEncoding: ArrayCoderProperties { public static let member = "Tag" } public struct _VpcSecurityGroupIdsEncoding: ArrayCoderProperties { public static let member = "VpcSecurityGroupId" } - /// The amount of storage (in gibibytes) to allocate initially for the DB instance. Follow the allocation rules specified in CreateDBInstance. Be sure to allocate enough storage for your new DB instance so that the restore operation can succeed. You can also allocate additional storage for future growth. + /// The amount of storage (in gibibytes) to allocate initially for the DB instance. Follow the allocation rules specified in CreateDBInstance. This setting isn't valid for RDS for SQL Server. Be sure to allocate enough storage for your new DB instance so that the restore operation can succeed. You can also allocate additional storage for future growth. public let allocatedStorage: Int? /// Specifies whether to automatically apply minor engine upgrades to the DB instance during the maintenance window. By default, minor engine upgrades are not applied automatically. public let autoMinorVersionUpgrade: Bool? @@ -12633,7 +12633,7 @@ extension RDS { public let caCertificateIdentifier: String? /// Specifies whether to copy all tags from the DB instance to snapshots of the DB instance. By default, tags are not copied. public let copyTagsToSnapshot: Bool? - /// Specifies the mode of Database Insights to enable for the instance. + /// Specifies the mode of Database Insights to enable for the DB instance. This setting only applies to Amazon Aurora DB instances. Currently, this value is inherited from the DB cluster and can't be changed. public let databaseInsightsMode: DatabaseInsightsMode? /// The compute and memory capacity of the DB instance, for example db.m4.large. Not all DB instance classes are available in all Amazon Web Services Regions, or for all database engines. For the full list of DB instance classes, and availability for your engine, see DB Instance Class in the Amazon RDS User Guide. Importing from Amazon S3 isn't supported on the db.t2.micro DB instance class. public let dbInstanceClass: String? @@ -12861,7 +12861,7 @@ extension RDS { public struct _TagsEncoding: ArrayCoderProperties { public static let member = "Tag" } public struct _VpcSecurityGroupIdsEncoding: ArrayCoderProperties { public static let member = "VpcSecurityGroupId" } - /// The amount of storage (in gibibytes) to allocate initially for the DB instance. Follow the allocation rules specified in CreateDBInstance. Be sure to allocate enough storage for your new DB instance so that the restore operation can succeed. You can also allocate additional storage for future growth. + /// The amount of storage (in gibibytes) to allocate initially for the DB instance. Follow the allocation rules specified in CreateDBInstance. This setting isn't valid for RDS for SQL Server. Be sure to allocate enough storage for your new DB instance so that the restore operation can succeed. You can also allocate additional storage for future growth. public let allocatedStorage: Int? /// Specifies whether minor version upgrades are applied automatically to the DB instance during the maintenance window. This setting doesn't apply to RDS Custom. public let autoMinorVersionUpgrade: Bool? diff --git a/Sources/Soto/Services/Rbin/Rbin_api.swift b/Sources/Soto/Services/Rbin/Rbin_api.swift index 88be5e729e..67847cb181 100644 --- a/Sources/Soto/Services/Rbin/Rbin_api.swift +++ b/Sources/Soto/Services/Rbin/Rbin_api.swift @@ -79,6 +79,52 @@ public struct Rbin: AWSService { /// FIPS and dualstack endpoints static var variantEndpoints: [EndpointVariantType: AWSServiceConfig.EndpointVariant] {[ + [.dualstack]: .init(endpoints: [ + "af-south-1": "rbin.af-south-1.api.aws", + "ap-east-1": "rbin.ap-east-1.api.aws", + "ap-northeast-1": "rbin.ap-northeast-1.api.aws", + "ap-northeast-2": "rbin.ap-northeast-2.api.aws", + "ap-northeast-3": "rbin.ap-northeast-3.api.aws", + "ap-south-1": "rbin.ap-south-1.api.aws", + "ap-south-2": "rbin.ap-south-2.api.aws", + "ap-southeast-1": "rbin.ap-southeast-1.api.aws", + "ap-southeast-2": "rbin.ap-southeast-2.api.aws", + "ap-southeast-3": "rbin.ap-southeast-3.api.aws", + "ap-southeast-4": "rbin.ap-southeast-4.api.aws", + "ap-southeast-5": "rbin.ap-southeast-5.api.aws", + "ca-central-1": "rbin.ca-central-1.api.aws", + "ca-west-1": "rbin.ca-west-1.api.aws", + "cn-north-1": "rbin.cn-north-1.api.amazonwebservices.com.cn", + "cn-northwest-1": "rbin.cn-northwest-1.api.amazonwebservices.com.cn", + "eu-central-1": "rbin.eu-central-1.api.aws", + "eu-central-2": "rbin.eu-central-2.api.aws", + "eu-north-1": "rbin.eu-north-1.api.aws", + "eu-south-1": "rbin.eu-south-1.api.aws", + "eu-south-2": "rbin.eu-south-2.api.aws", + "eu-west-1": "rbin.eu-west-1.api.aws", + "eu-west-2": "rbin.eu-west-2.api.aws", + "eu-west-3": "rbin.eu-west-3.api.aws", + "il-central-1": "rbin.il-central-1.api.aws", + "me-central-1": "rbin.me-central-1.api.aws", + "me-south-1": "rbin.me-south-1.api.aws", + "sa-east-1": "rbin.sa-east-1.api.aws", + "us-east-1": "rbin.us-east-1.api.aws", + "us-east-2": "rbin.us-east-2.api.aws", + "us-gov-east-1": "rbin.us-gov-east-1.api.aws", + "us-gov-west-1": "rbin.us-gov-west-1.api.aws", + "us-west-1": "rbin.us-west-1.api.aws", + "us-west-2": "rbin.us-west-2.api.aws" + ]), + [.dualstack, .fips]: .init(endpoints: [ + "ca-central-1": "rbin-fips.ca-central-1.api.aws", + "ca-west-1": "rbin-fips.ca-west-1.api.aws", + "us-east-1": "rbin-fips.us-east-1.api.aws", + "us-east-2": "rbin-fips.us-east-2.api.aws", + "us-gov-east-1": "rbin-fips.us-gov-east-1.api.aws", + "us-gov-west-1": "rbin-fips.us-gov-west-1.api.aws", + "us-west-1": "rbin-fips.us-west-1.api.aws", + "us-west-2": "rbin-fips.us-west-2.api.aws" + ]), [.fips]: .init(endpoints: [ "ca-central-1": "rbin-fips.ca-central-1.amazonaws.com", "ca-west-1": "rbin-fips.ca-west-1.amazonaws.com", diff --git a/Sources/Soto/Services/Redshift/Redshift_api.swift b/Sources/Soto/Services/Redshift/Redshift_api.swift index 47142d7769..5e47f056c1 100644 --- a/Sources/Soto/Services/Redshift/Redshift_api.swift +++ b/Sources/Soto/Services/Redshift/Redshift_api.swift @@ -570,7 +570,7 @@ public struct Redshift: AWSService { /// - dbName: The name of the first database to be created when the cluster is created. To create additional databases after the cluster is created, connect to the cluster with a SQL client and use SQL commands to create a database. For more information, go to Create a Database in the Amazon Redshift Database Developer Guide. Default: dev Constraints: Must contain 1 to 64 alphanumeric characters. Must contain only lowercase letters. Cannot be a word that is reserved by the service. A list of reserved words can be found in Reserved Words in the Amazon Redshift Database Developer Guide. /// - defaultIamRoleArn: The Amazon Resource Name (ARN) for the IAM role that was set as default for the cluster when the cluster was created. /// - elasticIp: The Elastic IP (EIP) address for the cluster. Constraints: The cluster must be provisioned in EC2-VPC and publicly-accessible through an Internet gateway. Don't specify the Elastic IP address for a publicly accessible cluster with availability zone relocation turned on. For more information about provisioning clusters in EC2-VPC, go to Supported Platforms to Launch Your Cluster in the Amazon Redshift Cluster Management Guide. - /// - encrypted: If true, the data in the cluster is encrypted at rest. Default: false + /// - encrypted: If true, the data in the cluster is encrypted at rest. If you set the value on this parameter to false, the request will fail. Default: true /// - enhancedVpcRouting: An option that specifies whether to create the cluster with enhanced VPC routing enabled. To create a cluster that uses enhanced VPC routing, the cluster must be in a VPC. For more information, see Enhanced VPC Routing in the Amazon Redshift Cluster Management Guide. If this option is true, enhanced VPC routing is enabled. Default: false /// - hsmClientCertificateIdentifier: Specifies the name of the HSM client certificate the Amazon Redshift cluster uses to retrieve the data encryption keys stored in an HSM. /// - hsmConfigurationIdentifier: Specifies the name of the HSM configuration that contains the information the Amazon Redshift cluster can use to retrieve and store keys in an HSM. @@ -589,7 +589,7 @@ public struct Redshift: AWSService { /// - numberOfNodes: The number of compute nodes in the cluster. This parameter is required when the ClusterType parameter is specified as multi-node. For information about determining how many nodes you need, go to Working with Clusters in the Amazon Redshift Cluster Management Guide. If you don't specify this parameter, you get a single-node cluster. When requesting a multi-node cluster, you must specify the number of nodes that you want in the cluster. Default: 1 Constraints: Value must be at least 1 and no more than 100. /// - port: The port number on which the cluster accepts incoming connections. The cluster is accessible only via the JDBC and ODBC connection strings. Part of the connection string requires the port on which the cluster will listen for incoming connections. Default: 5439 Valid Values: For clusters with ra3 nodes - Select a port within the ranges 5431-5455 or 8191-8215. (If you have an existing cluster with ra3 nodes, it isn't required that you change the port to these ranges.) For clusters with dc2 nodes - Select a port within the range 1150-65535. /// - preferredMaintenanceWindow: The weekly time range (in UTC) during which automated cluster maintenance can occur. Format: ddd:hh24:mi-ddd:hh24:mi Default: A 30-minute window selected at random from an 8-hour block of time per region, occurring on a random day of the week. For more information about the time blocks for each region, see Maintenance Windows in Amazon Redshift Cluster Management Guide. Valid Days: Mon | Tue | Wed | Thu | Fri | Sat | Sun Constraints: Minimum 30-minute window. - /// - publiclyAccessible: If true, the cluster can be accessed from a public network. + /// - publiclyAccessible: If true, the cluster can be accessed from a public network. Default: false /// - redshiftIdcApplicationArn: The Amazon resource name (ARN) of the Amazon Redshift IAM Identity Center application. /// - snapshotScheduleIdentifier: A unique identifier for the snapshot schedule. /// - tags: A list of tag instances. @@ -4240,7 +4240,7 @@ public struct Redshift: AWSService { /// - numberOfNodes: The new number of nodes of the cluster. If you specify a new number of nodes, you must also specify the node type parameter. /// - port: The option to change the port of an Amazon Redshift cluster. Valid Values: For clusters with ra3 nodes - Select a port within the ranges 5431-5455 or 8191-8215. (If you have an existing cluster with ra3 nodes, it isn't required that you change the port to these ranges.) For clusters with dc2 nodes - Select a port within the range 1150-65535. /// - preferredMaintenanceWindow: The weekly time range (in UTC) during which system maintenance can occur, if necessary. If system maintenance is necessary during the window, it may result in an outage. This maintenance window change is made immediately. If the new maintenance window indicates the current time, there must be at least 120 minutes between the current time and end of the window in order to ensure that pending changes are applied. Default: Uses existing setting. Format: ddd:hh24:mi-ddd:hh24:mi, for example wed:07:30-wed:08:00. Valid Days: Mon | Tue | Wed | Thu | Fri | Sat | Sun Constraints: Must be at least 30 minutes. - /// - publiclyAccessible: If true, the cluster can be accessed from a public network. Only clusters in VPCs can be set to be publicly available. + /// - publiclyAccessible: If true, the cluster can be accessed from a public network. Only clusters in VPCs can be set to be publicly available. Default: false /// - vpcSecurityGroupIds: A list of virtual private cloud (VPC) security groups to be associated with the cluster. This change is asynchronously applied as soon as possible. /// - logger: Logger use during operation @inlinable @@ -5240,7 +5240,7 @@ public struct Redshift: AWSService { /// - ownerAccount: The Amazon Web Services account used to create or copy the snapshot. Required if you are restoring a snapshot you do not own, optional if you own the snapshot. /// - port: The port number on which the cluster accepts connections. Default: The same port as the original cluster. Valid values: For clusters with DC2 nodes, must be within the range 1150-65535. For clusters with ra3 nodes, must be within the ranges 5431-5455 or 8191-8215. /// - preferredMaintenanceWindow: The weekly time range (in UTC) during which automated cluster maintenance can occur. Format: ddd:hh24:mi-ddd:hh24:mi Default: The value selected for the cluster from which the snapshot was taken. For more information about the time blocks for each region, see Maintenance Windows in Amazon Redshift Cluster Management Guide. Valid Days: Mon | Tue | Wed | Thu | Fri | Sat | Sun Constraints: Minimum 30-minute window. - /// - publiclyAccessible: If true, the cluster can be accessed from a public network. + /// - publiclyAccessible: If true, the cluster can be accessed from a public network. Default: false /// - reservedNodeId: The identifier of the target reserved node offering. /// - snapshotArn: The Amazon Resource Name (ARN) of the snapshot associated with the message to restore from a cluster. You must specify this parameter or snapshotIdentifier, but not both. /// - snapshotClusterIdentifier: The name of the cluster the source snapshot was created from. This parameter is required if your IAM user has a policy containing a snapshot resource element that specifies anything other than * for the cluster name. diff --git a/Sources/Soto/Services/Redshift/Redshift_shapes.swift b/Sources/Soto/Services/Redshift/Redshift_shapes.swift index 2f205b8ab3..2eb60d5d2b 100644 --- a/Sources/Soto/Services/Redshift/Redshift_shapes.swift +++ b/Sources/Soto/Services/Redshift/Redshift_shapes.swift @@ -1002,7 +1002,7 @@ extension Redshift { public let pendingModifiedValues: PendingModifiedValues? /// The weekly time range, in Universal Coordinated Time (UTC), during which system maintenance can occur. public let preferredMaintenanceWindow: String? - /// A boolean value that, if true, indicates that the cluster can be accessed from a public network. + /// A boolean value that, if true, indicates that the cluster can be accessed from a public network. Default: false public let publiclyAccessible: Bool? /// The status of the reserved-node exchange request. Statuses include in-progress and requested. public let reservedNodeExchangeStatus: ReservedNodeExchangeStatus? @@ -1800,7 +1800,7 @@ extension Redshift { public let defaultIamRoleArn: String? /// The Elastic IP (EIP) address for the cluster. Constraints: The cluster must be provisioned in EC2-VPC and publicly-accessible through an Internet gateway. Don't specify the Elastic IP address for a publicly accessible cluster with availability zone relocation turned on. For more information about provisioning clusters in EC2-VPC, go to Supported Platforms to Launch Your Cluster in the Amazon Redshift Cluster Management Guide. public let elasticIp: String? - /// If true, the data in the cluster is encrypted at rest. Default: false + /// If true, the data in the cluster is encrypted at rest. If you set the value on this parameter to false, the request will fail. Default: true public let encrypted: Bool? /// An option that specifies whether to create the cluster with enhanced VPC routing enabled. To create a cluster that uses enhanced VPC routing, the cluster must be in a VPC. For more information, see Enhanced VPC Routing in the Amazon Redshift Cluster Management Guide. If this option is true, enhanced VPC routing is enabled. Default: false public let enhancedVpcRouting: Bool? @@ -1839,7 +1839,7 @@ extension Redshift { public let port: Int? /// The weekly time range (in UTC) during which automated cluster maintenance can occur. Format: ddd:hh24:mi-ddd:hh24:mi Default: A 30-minute window selected at random from an 8-hour block of time per region, occurring on a random day of the week. For more information about the time blocks for each region, see Maintenance Windows in Amazon Redshift Cluster Management Guide. Valid Days: Mon | Tue | Wed | Thu | Fri | Sat | Sun Constraints: Minimum 30-minute window. public let preferredMaintenanceWindow: String? - /// If true, the cluster can be accessed from a public network. + /// If true, the cluster can be accessed from a public network. Default: false public let publiclyAccessible: Bool? /// The Amazon resource name (ARN) of the Amazon Redshift IAM Identity Center application. public let redshiftIdcApplicationArn: String? @@ -6764,7 +6764,7 @@ extension Redshift { public let port: Int? /// The weekly time range (in UTC) during which system maintenance can occur, if necessary. If system maintenance is necessary during the window, it may result in an outage. This maintenance window change is made immediately. If the new maintenance window indicates the current time, there must be at least 120 minutes between the current time and end of the window in order to ensure that pending changes are applied. Default: Uses existing setting. Format: ddd:hh24:mi-ddd:hh24:mi, for example wed:07:30-wed:08:00. Valid Days: Mon | Tue | Wed | Thu | Fri | Sat | Sun Constraints: Must be at least 30 minutes. public let preferredMaintenanceWindow: String? - /// If true, the cluster can be accessed from a public network. Only clusters in VPCs can be set to be publicly available. + /// If true, the cluster can be accessed from a public network. Only clusters in VPCs can be set to be publicly available. Default: false public let publiclyAccessible: Bool? /// A list of virtual private cloud (VPC) security groups to be associated with the cluster. This change is asynchronously applied as soon as possible. @OptionalCustomCoding> @@ -8660,7 +8660,7 @@ extension Redshift { public let port: Int? /// The weekly time range (in UTC) during which automated cluster maintenance can occur. Format: ddd:hh24:mi-ddd:hh24:mi Default: The value selected for the cluster from which the snapshot was taken. For more information about the time blocks for each region, see Maintenance Windows in Amazon Redshift Cluster Management Guide. Valid Days: Mon | Tue | Wed | Thu | Fri | Sat | Sun Constraints: Minimum 30-minute window. public let preferredMaintenanceWindow: String? - /// If true, the cluster can be accessed from a public network. + /// If true, the cluster can be accessed from a public network. Default: false public let publiclyAccessible: Bool? /// The identifier of the target reserved node offering. public let reservedNodeId: String? diff --git a/Sources/Soto/Services/Resiliencehub/Resiliencehub_api.swift b/Sources/Soto/Services/Resiliencehub/Resiliencehub_api.swift index ba9cdb0658..07a86aa082 100644 --- a/Sources/Soto/Services/Resiliencehub/Resiliencehub_api.swift +++ b/Sources/Soto/Services/Resiliencehub/Resiliencehub_api.swift @@ -79,6 +79,34 @@ public struct Resiliencehub: AWSService { /// FIPS and dualstack endpoints static var variantEndpoints: [EndpointVariantType: AWSServiceConfig.EndpointVariant] {[ + [.dualstack]: .init(endpoints: [ + "af-south-1": "resiliencehub.af-south-1.api.aws", + "ap-east-1": "resiliencehub.ap-east-1.api.aws", + "ap-northeast-1": "resiliencehub.ap-northeast-1.api.aws", + "ap-northeast-2": "resiliencehub.ap-northeast-2.api.aws", + "ap-south-1": "resiliencehub.ap-south-1.api.aws", + "ap-southeast-1": "resiliencehub.ap-southeast-1.api.aws", + "ap-southeast-2": "resiliencehub.ap-southeast-2.api.aws", + "ca-central-1": "resiliencehub.ca-central-1.api.aws", + "eu-central-1": "resiliencehub.eu-central-1.api.aws", + "eu-north-1": "resiliencehub.eu-north-1.api.aws", + "eu-south-1": "resiliencehub.eu-south-1.api.aws", + "eu-west-1": "resiliencehub.eu-west-1.api.aws", + "eu-west-2": "resiliencehub.eu-west-2.api.aws", + "eu-west-3": "resiliencehub.eu-west-3.api.aws", + "me-south-1": "resiliencehub.me-south-1.api.aws", + "sa-east-1": "resiliencehub.sa-east-1.api.aws", + "us-east-1": "resiliencehub.us-east-1.api.aws", + "us-east-2": "resiliencehub.us-east-2.api.aws", + "us-gov-east-1": "resiliencehub.us-gov-east-1.api.aws", + "us-gov-west-1": "resiliencehub.us-gov-west-1.api.aws", + "us-west-1": "resiliencehub.us-west-1.api.aws", + "us-west-2": "resiliencehub.us-west-2.api.aws" + ]), + [.dualstack, .fips]: .init(endpoints: [ + "us-gov-east-1": "resiliencehub-fips.us-gov-east-1.api.aws", + "us-gov-west-1": "resiliencehub-fips.us-gov-west-1.api.aws" + ]), [.fips]: .init(endpoints: [ "us-gov-east-1": "resiliencehub-fips.us-gov-east-1.amazonaws.com", "us-gov-west-1": "resiliencehub-fips.us-gov-west-1.amazonaws.com" diff --git a/Sources/Soto/Services/Resiliencehub/Resiliencehub_shapes.swift b/Sources/Soto/Services/Resiliencehub/Resiliencehub_shapes.swift index 050d686ce2..39679ae19a 100644 --- a/Sources/Soto/Services/Resiliencehub/Resiliencehub_shapes.swift +++ b/Sources/Soto/Services/Resiliencehub/Resiliencehub_shapes.swift @@ -471,6 +471,24 @@ extension Resiliencehub { } } + public struct Alarm: AWSDecodableShape { + /// Amazon Resource Name (ARN) of the Amazon CloudWatch alarm. + public let alarmArn: String? + /// Indicates the source of the Amazon CloudWatch alarm. That is, it indicates if the alarm was created using Resilience Hub recommendation (AwsResilienceHub), or if you had created the alarm in Amazon CloudWatch (Customer). + public let source: String? + + @inlinable + public init(alarmArn: String? = nil, source: String? = nil) { + self.alarmArn = alarmArn + self.source = source + } + + private enum CodingKeys: String, CodingKey { + case alarmArn = "alarmArn" + case source = "source" + } + } + public struct AlarmRecommendation: AWSDecodableShape { /// Application Component name for the CloudWatch alarm recommendation. This name is saved as the first item in the appComponentNames list. public let appComponentName: String? @@ -976,7 +994,7 @@ extension Resiliencehub { } public struct AssessmentRiskRecommendation: AWSDecodableShape { - /// Indicates the Application Components (AppComponents) that were assessed as part of the assessnent and are associated with the identified risk and recommendation. This property is available only in the US East (N. Virginia) Region. + /// Indicates the Application Components (AppComponents) that were assessed as part of the assessment and are associated with the identified risk and recommendation. This property is available only in the US East (N. Virginia) Region. public let appComponents: [String]? /// Indicates the recommendation provided by the Resilience Hub to address the identified risks in the application. This property is available only in the US East (N. Virginia) Region. public let recommendation: String? @@ -1087,6 +1105,8 @@ extension Resiliencehub { } public struct BatchUpdateRecommendationStatusSuccessfulEntry: AWSDecodableShape { + /// Indicates the identifier of an AppComponent. + public let appComponentId: String? /// An identifier for an entry in this batch that is used to communicate the result. The entryIds of a batch request need to be unique within a request. public let entryId: String /// Indicates if the operational recommendation was successfully excluded. @@ -1099,7 +1119,8 @@ extension Resiliencehub { public let referenceId: String @inlinable - public init(entryId: String, excluded: Bool, excludeReason: ExcludeRecommendationReason? = nil, item: UpdateRecommendationStatusItem? = nil, referenceId: String) { + public init(appComponentId: String? = nil, entryId: String, excluded: Bool, excludeReason: ExcludeRecommendationReason? = nil, item: UpdateRecommendationStatusItem? = nil, referenceId: String) { + self.appComponentId = appComponentId self.entryId = entryId self.excluded = excluded self.excludeReason = excludeReason @@ -1108,6 +1129,7 @@ extension Resiliencehub { } private enum CodingKeys: String, CodingKey { + case appComponentId = "appComponentId" case entryId = "entryId" case excluded = "excluded" case excludeReason = "excludeReason" @@ -2792,6 +2814,24 @@ extension Resiliencehub { } } + public struct Experiment: AWSDecodableShape { + /// Amazon Resource Name (ARN) of the FIS experiment. + public let experimentArn: String? + /// Identifier of the FIS experiment template. + public let experimentTemplateId: String? + + @inlinable + public init(experimentArn: String? = nil, experimentTemplateId: String? = nil) { + self.experimentArn = experimentArn + self.experimentTemplateId = experimentTemplateId + } + + private enum CodingKeys: String, CodingKey { + case experimentArn = "experimentArn" + case experimentTemplateId = "experimentTemplateId" + } + } + public struct FailedGroupingRecommendationEntry: AWSDecodableShape { /// Indicates the error that occurred while implementing a grouping recommendation. public let errorMessage: String @@ -4281,7 +4321,7 @@ extension Resiliencehub { public struct PermissionModel: AWSEncodableShape & AWSDecodableShape { /// Defines a list of role Amazon Resource Names (ARNs) to be used in other accounts. These ARNs are used for querying purposes while importing resources and assessing your application. These ARNs are required only when your resources are in other accounts and you have different role name in these accounts. Else, the invoker role name will be used in the other accounts. These roles must have a trust policy with iam:AssumeRole permission to the invoker role in the primary account. public let crossAccountRoleArns: [String]? - /// Existing Amazon Web Services IAM role name in the primary Amazon Web Services account that will be assumed by Resilience Hub Service Principle to obtain a read-only access to your application resources while running an assessment. You must have iam:passRole permission for this role while creating or updating the application. Currently, invokerRoleName accepts only [A-Za-z0-9_+=,.@-] characters. + /// Existing Amazon Web Services IAM role name in the primary Amazon Web Services account that will be assumed by Resilience Hub Service Principle to obtain a read-only access to your application resources while running an assessment. If your IAM role includes a path, you must include the path in the invokerRoleName parameter. For example, if your IAM role's ARN is arn:aws:iam:123456789012:role/my-path/role-name, you should pass my-path/role-name. You must have iam:passRole permission for this role while creating or updating the application. Currently, invokerRoleName accepts only [A-Za-z0-9_+=,.@-] characters. public let invokerRoleName: String? /// Defines how Resilience Hub scans your resources. It can scan for the resources by using a pre-existing role in your Amazon Web Services account, or by using the credentials of the current IAM user. public let type: PermissionModelType @@ -4519,10 +4559,14 @@ extension Resiliencehub { public struct RecommendationItem: AWSDecodableShape { /// Specifies if the recommendation has already been implemented. public let alreadyImplemented: Bool? + /// Indicates the previously implemented Amazon CloudWatch alarm discovered by Resilience Hub. + public let discoveredAlarm: Alarm? /// Indicates if an operational recommendation item is excluded. public let excluded: Bool? /// Indicates the reason for excluding an operational recommendation. public let excludeReason: ExcludeRecommendationReason? + /// Indicates the experiment created in FIS that was discovered by Resilience Hub, which matches the recommendation. + public let latestDiscoveredExperiment: Experiment? /// Identifier of the resource. public let resourceId: String? /// Identifier of the target account. @@ -4531,10 +4575,12 @@ extension Resiliencehub { public let targetRegion: String? @inlinable - public init(alreadyImplemented: Bool? = nil, excluded: Bool? = nil, excludeReason: ExcludeRecommendationReason? = nil, resourceId: String? = nil, targetAccountId: String? = nil, targetRegion: String? = nil) { + public init(alreadyImplemented: Bool? = nil, discoveredAlarm: Alarm? = nil, excluded: Bool? = nil, excludeReason: ExcludeRecommendationReason? = nil, latestDiscoveredExperiment: Experiment? = nil, resourceId: String? = nil, targetAccountId: String? = nil, targetRegion: String? = nil) { self.alreadyImplemented = alreadyImplemented + self.discoveredAlarm = discoveredAlarm self.excluded = excluded self.excludeReason = excludeReason + self.latestDiscoveredExperiment = latestDiscoveredExperiment self.resourceId = resourceId self.targetAccountId = targetAccountId self.targetRegion = targetRegion @@ -4542,8 +4588,10 @@ extension Resiliencehub { private enum CodingKeys: String, CodingKey { case alreadyImplemented = "alreadyImplemented" + case discoveredAlarm = "discoveredAlarm" case excluded = "excluded" case excludeReason = "excludeReason" + case latestDiscoveredExperiment = "latestDiscoveredExperiment" case resourceId = "resourceId" case targetAccountId = "targetAccountId" case targetRegion = "targetRegion" @@ -5379,6 +5427,8 @@ extension Resiliencehub { } public struct TestRecommendation: AWSDecodableShape { + /// Indicates the identifier of the AppComponent. + public let appComponentId: String? /// Name of the Application Component. public let appComponentName: String? /// A list of recommended alarms that are used in the test and must be exported before or with the test. @@ -5405,7 +5455,8 @@ extension Resiliencehub { public let type: TestType? @inlinable - public init(appComponentName: String? = nil, dependsOnAlarms: [String]? = nil, description: String? = nil, intent: String? = nil, items: [RecommendationItem]? = nil, name: String? = nil, prerequisite: String? = nil, recommendationId: String? = nil, recommendationStatus: RecommendationStatus? = nil, referenceId: String, risk: TestRisk? = nil, type: TestType? = nil) { + public init(appComponentId: String? = nil, appComponentName: String? = nil, dependsOnAlarms: [String]? = nil, description: String? = nil, intent: String? = nil, items: [RecommendationItem]? = nil, name: String? = nil, prerequisite: String? = nil, recommendationId: String? = nil, recommendationStatus: RecommendationStatus? = nil, referenceId: String, risk: TestRisk? = nil, type: TestType? = nil) { + self.appComponentId = appComponentId self.appComponentName = appComponentName self.dependsOnAlarms = dependsOnAlarms self.description = description @@ -5421,6 +5472,7 @@ extension Resiliencehub { } private enum CodingKeys: String, CodingKey { + case appComponentId = "appComponentId" case appComponentName = "appComponentName" case dependsOnAlarms = "dependsOnAlarms" case description = "description" @@ -5817,6 +5869,8 @@ extension Resiliencehub { } public struct UpdateRecommendationStatusRequestEntry: AWSEncodableShape { + /// Indicates the identifier of the AppComponent. + public let appComponentId: String? /// An identifier for an entry in this batch that is used to communicate the result. The entryIds of a batch request need to be unique within a request. public let entryId: String /// Indicates if the operational recommendation needs to be excluded. If set to True, the operational recommendation will be excluded. @@ -5829,7 +5883,8 @@ extension Resiliencehub { public let referenceId: String @inlinable - public init(entryId: String, excluded: Bool, excludeReason: ExcludeRecommendationReason? = nil, item: UpdateRecommendationStatusItem? = nil, referenceId: String) { + public init(appComponentId: String? = nil, entryId: String, excluded: Bool, excludeReason: ExcludeRecommendationReason? = nil, item: UpdateRecommendationStatusItem? = nil, referenceId: String) { + self.appComponentId = appComponentId self.entryId = entryId self.excluded = excluded self.excludeReason = excludeReason @@ -5838,6 +5893,7 @@ extension Resiliencehub { } public func validate(name: String) throws { + try self.validate(self.appComponentId, name: "appComponentId", parent: name, pattern: "^[A-Za-z0-9][A-Za-z0-9_\\-]{0,254}$") try self.validate(self.entryId, name: "entryId", parent: name, max: 255) try self.validate(self.entryId, name: "entryId", parent: name, min: 1) try self.item?.validate(name: "\(name).item") @@ -5846,6 +5902,7 @@ extension Resiliencehub { } private enum CodingKeys: String, CodingKey { + case appComponentId = "appComponentId" case entryId = "entryId" case excluded = "excluded" case excludeReason = "excludeReason" diff --git a/Sources/Soto/Services/Route53/Route53_shapes.swift b/Sources/Soto/Services/Route53/Route53_shapes.swift index 698dd45a4d..cba9604dd2 100644 --- a/Sources/Soto/Services/Route53/Route53_shapes.swift +++ b/Sources/Soto/Services/Route53/Route53_shapes.swift @@ -67,6 +67,7 @@ extension Route53 { case apSoutheast3 = "ap-southeast-3" case apSoutheast4 = "ap-southeast-4" case apSoutheast5 = "ap-southeast-5" + case apSoutheast7 = "ap-southeast-7" case caCentral1 = "ca-central-1" case caWest1 = "ca-west-1" case cnNorth1 = "cn-north-1" @@ -82,6 +83,7 @@ extension Route53 { case ilCentral1 = "il-central-1" case meCentral1 = "me-central-1" case meSouth1 = "me-south-1" + case mxCentral1 = "mx-central-1" case saEast1 = "sa-east-1" case usEast1 = "us-east-1" case usEast2 = "us-east-2" @@ -193,6 +195,7 @@ extension Route53 { case apSoutheast3 = "ap-southeast-3" case apSoutheast4 = "ap-southeast-4" case apSoutheast5 = "ap-southeast-5" + case apSoutheast7 = "ap-southeast-7" case caCentral1 = "ca-central-1" case caWest1 = "ca-west-1" case cnNorth1 = "cn-north-1" @@ -208,6 +211,7 @@ extension Route53 { case ilCentral1 = "il-central-1" case meCentral1 = "me-central-1" case meSouth1 = "me-south-1" + case mxCentral1 = "mx-central-1" case saEast1 = "sa-east-1" case usEast1 = "us-east-1" case usEast2 = "us-east-2" @@ -249,6 +253,7 @@ extension Route53 { case apSoutheast3 = "ap-southeast-3" case apSoutheast4 = "ap-southeast-4" case apSoutheast5 = "ap-southeast-5" + case apSoutheast7 = "ap-southeast-7" case caCentral1 = "ca-central-1" case caWest1 = "ca-west-1" case cnNorth1 = "cn-north-1" @@ -264,6 +269,7 @@ extension Route53 { case ilCentral1 = "il-central-1" case meCentral1 = "me-central-1" case meSouth1 = "me-south-1" + case mxCentral1 = "mx-central-1" case saEast1 = "sa-east-1" case usEast1 = "us-east-1" case usEast2 = "us-east-2" diff --git a/Sources/Soto/Services/Route53Domains/Route53Domains_shapes.swift b/Sources/Soto/Services/Route53Domains/Route53Domains_shapes.swift index 54c222223d..095b9aa438 100644 --- a/Sources/Soto/Services/Route53Domains/Route53Domains_shapes.swift +++ b/Sources/Soto/Services/Route53Domains/Route53Domains_shapes.swift @@ -511,7 +511,7 @@ extension Route53Domains { /// Domain Name Format in the Amazon Route 53 Developer /// Guide. public let domainName: String? - /// The ID of the invoice that is associated with the billing record. + /// Deprecated property. This field is retained in report structure for backwards compatibility, but will appear blank. public let invoiceId: String? /// The operation that you were charged for. public let operation: OperationType? diff --git a/Sources/Soto/Services/S3/S3_api.swift b/Sources/Soto/Services/S3/S3_api.swift index 879b057112..0352cd5d53 100644 --- a/Sources/Soto/Services/S3/S3_api.swift +++ b/Sources/Soto/Services/S3/S3_api.swift @@ -129,6 +129,7 @@ public struct S3: AWSService { "ap-southeast-3": "s3.ap-southeast-3.amazonaws.com", "ap-southeast-4": "s3.ap-southeast-4.amazonaws.com", "ap-southeast-5": "s3.ap-southeast-5.amazonaws.com", + "ap-southeast-7": "s3.ap-southeast-7.amazonaws.com", "aws-global": "s3.amazonaws.com", "ca-central-1": "s3.ca-central-1.amazonaws.com", "ca-west-1": "s3.ca-west-1.amazonaws.com", @@ -143,6 +144,7 @@ public struct S3: AWSService { "il-central-1": "s3.il-central-1.amazonaws.com", "me-central-1": "s3.me-central-1.amazonaws.com", "me-south-1": "s3.me-south-1.amazonaws.com", + "mx-central-1": "s3.mx-central-1.amazonaws.com", "s3-external-1": "s3-external-1.amazonaws.com", "sa-east-1": "s3.sa-east-1.amazonaws.com", "us-east-1": "s3.us-east-1.amazonaws.com", @@ -169,6 +171,7 @@ public struct S3: AWSService { "ap-southeast-3": "s3.dualstack.ap-southeast-3.amazonaws.com", "ap-southeast-4": "s3.dualstack.ap-southeast-4.amazonaws.com", "ap-southeast-5": "s3.dualstack.ap-southeast-5.amazonaws.com", + "ap-southeast-7": "s3.dualstack.ap-southeast-7.amazonaws.com", "aws-global": "s3.dualstack.aws-global.amazonaws.com", "ca-central-1": "s3.dualstack.ca-central-1.amazonaws.com", "ca-west-1": "s3.dualstack.ca-west-1.amazonaws.com", @@ -185,6 +188,7 @@ public struct S3: AWSService { "il-central-1": "s3.dualstack.il-central-1.amazonaws.com", "me-central-1": "s3.dualstack.me-central-1.amazonaws.com", "me-south-1": "s3.dualstack.me-south-1.amazonaws.com", + "mx-central-1": "s3.dualstack.mx-central-1.amazonaws.com", "s3-external-1": "s3.dualstack.s3-external-1.amazonaws.com", "sa-east-1": "s3.dualstack.sa-east-1.amazonaws.com", "us-east-1": "s3.dualstack.us-east-1.amazonaws.com", @@ -286,14 +290,17 @@ public struct S3: AWSService { /// /// Parameters: /// - bucket: Name of the bucket to which the multipart upload was initiated. Directory buckets - When you use this operation with a directory bucket, you must use virtual-hosted-style requests in the format Bucket-name.s3express-zone-id.region-code.amazonaws.com. Path-style requests are not supported. Directory bucket names must be unique in the chosen Zone (Availability Zone or Local Zone). Bucket names must follow the format bucket-base-name--zone-id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about bucket naming restrictions, see Directory bucket naming rules in the Amazon S3 User Guide. Access points - When you use this action with an access point, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide. Access points and Object Lambda access points are not supported by directory buckets. S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide. - /// - checksumCRC32: This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the base64-encoded, 32-bit CRC-32 checksum of the object. For more information, see Checking object integrity in the Amazon S3 User Guide. - /// - checksumCRC32C: This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the base64-encoded, 32-bit CRC-32C checksum of the object. For more information, see Checking object integrity in the Amazon S3 User Guide. - /// - checksumSHA1: This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the base64-encoded, 160-bit SHA-1 digest of the object. For more information, see Checking object integrity in the Amazon S3 User Guide. - /// - checksumSHA256: This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the base64-encoded, 256-bit SHA-256 digest of the object. For more information, see Checking object integrity in the Amazon S3 User Guide. + /// - checksumCRC32: This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the Base64 encoded, 32-bit CRC-32 checksum of the object. For more information, see Checking object integrity in the Amazon S3 User Guide. + /// - checksumCRC32C: This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the Base64 encoded, 32-bit CRC-32C checksum of the object. For more information, see Checking object integrity in the Amazon S3 User Guide. + /// - checksumCRC64NVME: This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the Base64 encoded, 64-bit CRC-64NVME checksum of the object. The CRC-64NVME checksum is always a full object checksum. For more information, see Checking object integrity in the Amazon S3 User Guide. + /// - checksumSHA1: This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the Base64 encoded, 160-bit SHA-1 digest of the object. For more information, see Checking object integrity in the Amazon S3 User Guide. + /// - checksumSHA256: This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the Base64 encoded, 256-bit SHA-256 digest of the object. For more information, see Checking object integrity in the Amazon S3 User Guide. + /// - checksumType: This header specifies the checksum type of the object, which determines how part-level checksums are combined to create an object-level checksum for multipart objects. You can use this header as a data integrity check to verify that the checksum type that is received is the same checksum that was specified. If the checksum type doesn’t match the checksum type that was specified for the object during the CreateMultipartUpload request, it’ll result in a BadDigest error. For more information, see Checking object integrity in the Amazon S3 User Guide. /// - expectedBucketOwner: The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden (access denied). /// - ifMatch: Uploads the object only if the ETag (entity tag) value provided during the WRITE operation matches the ETag of the object in S3. If the ETag values do not match, the operation returns a 412 Precondition Failed error. If a conflicting operation occurs during the upload S3 returns a 409 ConditionalRequestConflict response. On a 409 failure you should fetch the object's ETag, re-initiate the multipart upload with CreateMultipartUpload, and re-upload each part. Expects the ETag value as a string. For more information about conditional requests, see RFC 7232, or Conditional requests in the Amazon S3 User Guide. /// - ifNoneMatch: Uploads the object only if the object key name does not already exist in the bucket specified. Otherwise, Amazon S3 returns a 412 Precondition Failed error. If a conflicting operation occurs during the upload S3 returns a 409 ConditionalRequestConflict response. On a 409 failure you should re-initiate the multipart upload with CreateMultipartUpload and re-upload each part. Expects the '*' (asterisk) character. For more information about conditional requests, see RFC 7232, or Conditional requests in the Amazon S3 User Guide. /// - key: Object key for which the multipart upload was initiated. + /// - mpuObjectSize: The expected total object size of the multipart upload request. If there’s a mismatch between the specified object size value and the actual object size value, it results in an HTTP 400 InvalidRequest error. /// - multipartUpload: The container for the multipart upload request information. /// - requestPayer: /// - sseCustomerAlgorithm: The server-side encryption (SSE) algorithm used to encrypt the object. This parameter is required only when the object was created using a checksum algorithm or if your bucket policy requires the use of SSE-C. For more information, see Protecting data using SSE-C keys in the Amazon S3 User Guide. This functionality is not supported for directory buckets. @@ -306,12 +313,15 @@ public struct S3: AWSService { bucket: String, checksumCRC32: String? = nil, checksumCRC32C: String? = nil, + checksumCRC64NVME: String? = nil, checksumSHA1: String? = nil, checksumSHA256: String? = nil, + checksumType: ChecksumType? = nil, expectedBucketOwner: String? = nil, ifMatch: String? = nil, ifNoneMatch: String? = nil, key: String, + mpuObjectSize: String? = nil, multipartUpload: CompletedMultipartUpload? = nil, requestPayer: RequestPayer? = nil, sseCustomerAlgorithm: String? = nil, @@ -324,12 +334,15 @@ public struct S3: AWSService { bucket: bucket, checksumCRC32: checksumCRC32, checksumCRC32C: checksumCRC32C, + checksumCRC64NVME: checksumCRC64NVME, checksumSHA1: checksumSHA1, checksumSHA256: checksumSHA256, + checksumType: checksumType, expectedBucketOwner: expectedBucketOwner, ifMatch: ifMatch, ifNoneMatch: ifNoneMatch, key: key, + mpuObjectSize: mpuObjectSize, multipartUpload: multipartUpload, requestPayer: requestPayer, sseCustomerAlgorithm: sseCustomerAlgorithm, @@ -617,6 +630,7 @@ public struct S3: AWSService { /// - bucketKeyEnabled: Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption with server-side encryption using Key Management Service (KMS) keys (SSE-KMS). General purpose buckets - Setting this header to true causes Amazon S3 to use an S3 Bucket Key for object encryption with SSE-KMS. Also, specifying this header with a PUT action doesn't affect bucket-level settings for S3 Bucket Key. Directory buckets - S3 Bucket Keys are always enabled for GET and PUT operations in a directory bucket and can’t be disabled. S3 Bucket Keys aren't supported, when you copy SSE-KMS encrypted objects from general purpose buckets /// - cacheControl: Specifies caching behavior along the request/reply chain. /// - checksumAlgorithm: Indicates the algorithm that you want Amazon S3 to use to create the checksum for the object. For more information, see Checking object integrity in the Amazon S3 User Guide. + /// - checksumType: Indicates the checksum type that you want Amazon S3 to use to calculate the object’s checksum value. For more information, see Checking object integrity in the Amazon S3 User Guide. /// - contentDisposition: Specifies presentational information for the object. /// - contentEncoding: Specifies what content encodings have been applied to the object and thus what decoding mechanisms must be applied to obtain the media-type referenced by the Content-Type header field. For directory buckets, only the aws-chunked value is supported in this header field. /// - contentLanguage: The language that the content is in. @@ -637,7 +651,7 @@ public struct S3: AWSService { /// - sseCustomerAlgorithm: Specifies the algorithm to use when encrypting the object (for example, AES256). This functionality is not supported for directory buckets. /// - sseCustomerKey: Specifies the customer-provided encryption key for Amazon S3 to use in encrypting data. This value is used to store the object and then it is discarded; Amazon S3 does not store the encryption key. The key must be appropriate for use with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm header. This functionality is not supported for directory buckets. /// - sseCustomerKeyMD5: Specifies the 128-bit MD5 digest of the customer-provided encryption key according to RFC 1321. Amazon S3 uses this header for a message integrity check to ensure that the encryption key was transmitted without error. This functionality is not supported for directory buckets. - /// - ssekmsEncryptionContext: Specifies the Amazon Web Services KMS Encryption Context to use for object encryption. The value of this header is a Base64-encoded string of a UTF-8 encoded JSON, which contains the encryption context as key-value pairs. Directory buckets - You can optionally provide an explicit encryption context value. The value must match the default encryption context - the bucket Amazon Resource Name (ARN). An additional encryption context value is not supported. + /// - ssekmsEncryptionContext: Specifies the Amazon Web Services KMS Encryption Context to use for object encryption. The value of this header is a Base64 encoded string of a UTF-8 encoded JSON, which contains the encryption context as key-value pairs. Directory buckets - You can optionally provide an explicit encryption context value. The value must match the default encryption context - the bucket Amazon Resource Name (ARN). An additional encryption context value is not supported. /// - ssekmsKeyId: Specifies the KMS key ID (Key ID, Key ARN, or Key Alias) to use for object encryption. If the KMS key doesn't exist in the same account that's issuing the command, you must use the full Key ARN not the Key ID. General purpose buckets - If you specify x-amz-server-side-encryption with aws:kms or aws:kms:dsse, this header specifies the ID (Key ID, Key ARN, or Key Alias) of the KMS key to use. If you specify x-amz-server-side-encryption:aws:kms or x-amz-server-side-encryption:aws:kms:dsse, but do not provide x-amz-server-side-encryption-aws-kms-key-id, Amazon S3 uses the Amazon Web Services managed key (aws/s3) to protect the data. Directory buckets - If you specify x-amz-server-side-encryption with aws:kms, the x-amz-server-side-encryption-aws-kms-key-id header is implicitly assigned the ID of the KMS symmetric encryption customer managed key that's configured for your directory bucket's default encryption setting. If you want to specify the x-amz-server-side-encryption-aws-kms-key-id header explicitly, you can only specify it with the ID (Key ID or Key ARN) of the KMS customer managed key that's configured for your directory bucket's default encryption setting. Otherwise, you get an HTTP 400 Bad Request error. Only use the key ID or key ARN. The key alias format of the KMS key isn't supported. Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. /// - storageClass: By default, Amazon S3 uses the STANDARD Storage Class to store newly created objects. The STANDARD storage class provides high durability and high availability. Depending on performance needs, you can specify a different Storage Class. For more information, see Storage Classes in the Amazon S3 User Guide. For directory buckets, only the S3 Express One Zone storage class is supported to store newly created objects. Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. /// - tagging: The tag-set for the object. The tag-set must be encoded as URL Query parameters. This functionality is not supported for directory buckets. @@ -650,6 +664,7 @@ public struct S3: AWSService { bucketKeyEnabled: Bool? = nil, cacheControl: String? = nil, checksumAlgorithm: ChecksumAlgorithm? = nil, + checksumType: ChecksumType? = nil, contentDisposition: String? = nil, contentEncoding: String? = nil, contentLanguage: String? = nil, @@ -683,6 +698,7 @@ public struct S3: AWSService { bucketKeyEnabled: bucketKeyEnabled, cacheControl: cacheControl, checksumAlgorithm: checksumAlgorithm, + checksumType: checksumType, contentDisposition: contentDisposition, contentEncoding: contentEncoding, contentLanguage: contentLanguage, @@ -736,7 +752,7 @@ public struct S3: AWSService { /// - bucketKeyEnabled: Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption with server-side encryption using KMS keys (SSE-KMS). S3 Bucket Keys are always enabled for GET and PUT operations in a directory bucket and can’t be disabled. S3 Bucket Keys aren't supported, when you copy SSE-KMS encrypted objects from general purpose buckets /// - serverSideEncryption: The server-side encryption algorithm to use when you store objects in the directory bucket. For directory buckets, there are only two supported options for server-side encryption: server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256) and server-side encryption with KMS keys (SSE-KMS) (aws:kms). By default, Amazon S3 encrypts data with SSE-S3. For more information, see Protecting data with server-side encryption in the Amazon S3 User Guide. /// - sessionMode: Specifies the mode of the session that will be created, either ReadWrite or ReadOnly. By default, a ReadWrite session is created. A ReadWrite session is capable of executing all the Zonal endpoint API operations on a directory bucket. A ReadOnly session is constrained to execute the following Zonal endpoint API operations: GetObject, HeadObject, ListObjectsV2, GetObjectAttributes, ListParts, and ListMultipartUploads. - /// - ssekmsEncryptionContext: Specifies the Amazon Web Services KMS Encryption Context as an additional encryption context to use for object encryption. The value of this header is a Base64-encoded string of a UTF-8 encoded JSON, which contains the encryption context as key-value pairs. This value is stored as object metadata and automatically gets passed on to Amazon Web Services KMS for future GetObject operations on this object. General purpose buckets - This value must be explicitly added during CopyObject operations if you want an additional encryption context for your object. For more information, see Encryption context in the Amazon S3 User Guide. Directory buckets - You can optionally provide an explicit encryption context value. The value must match the default encryption context - the bucket Amazon Resource Name (ARN). An additional encryption context value is not supported. + /// - ssekmsEncryptionContext: Specifies the Amazon Web Services KMS Encryption Context as an additional encryption context to use for object encryption. The value of this header is a Base64 encoded string of a UTF-8 encoded JSON, which contains the encryption context as key-value pairs. This value is stored as object metadata and automatically gets passed on to Amazon Web Services KMS for future GetObject operations on this object. General purpose buckets - This value must be explicitly added during CopyObject operations if you want an additional encryption context for your object. For more information, see Encryption context in the Amazon S3 User Guide. Directory buckets - You can optionally provide an explicit encryption context value. The value must match the default encryption context - the bucket Amazon Resource Name (ARN). An additional encryption context value is not supported. /// - ssekmsKeyId: If you specify x-amz-server-side-encryption with aws:kms, you must specify the x-amz-server-side-encryption-aws-kms-key-id header with the ID (Key ID or Key ARN) of the KMS symmetric encryption customer managed key to use. Otherwise, you get an HTTP 400 Bad Request error. Only use the key ID or key ARN. The key alias format of the KMS key isn't supported. Also, if the KMS key doesn't exist in the same account that't issuing the command, you must use the full Key ARN not the Key ID. Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. /// - logger: Logger use during operation @inlinable @@ -1367,7 +1383,7 @@ public struct S3: AWSService { /// Parameters: /// - bucket: The bucket name containing the objects to delete. Directory buckets - When you use this operation with a directory bucket, you must use virtual-hosted-style requests in the format Bucket-name.s3express-zone-id.region-code.amazonaws.com. Path-style requests are not supported. Directory bucket names must be unique in the chosen Zone (Availability Zone or Local Zone). Bucket names must follow the format bucket-base-name--zone-id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about bucket naming restrictions, see Directory bucket naming rules in the Amazon S3 User Guide. Access points - When you use this action with an access point, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide. Access points and Object Lambda access points are not supported by directory buckets. S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide. /// - bypassGovernanceRetention: Specifies whether you want to delete this object even if it has a Governance-type Object Lock in place. To use this header, you must have the s3:BypassGovernanceRetention permission. This functionality is not supported for directory buckets. - /// - checksumAlgorithm: Indicates the algorithm used to create the checksum for the object when you use the SDK. This header will not provide any additional functionality if you don't use the SDK. When you send this header, there must be a corresponding x-amz-checksum-algorithm or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request. For the x-amz-checksum-algorithm header, replace algorithm with the supported algorithm from the following list: CRC32 CRC32C SHA1 SHA256 For more information, see Checking object integrity in the Amazon S3 User Guide. If the individual checksum value you provide through x-amz-checksum-algorithm doesn't match the checksum algorithm you set through x-amz-sdk-checksum-algorithm, Amazon S3 ignores any provided ChecksumAlgorithm parameter and uses the checksum algorithm that matches the provided value in x-amz-checksum-algorithm . If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm parameter. + /// - checksumAlgorithm: Indicates the algorithm used to create the checksum for the object when you use the SDK. This header will not provide any additional functionality if you don't use the SDK. When you send this header, there must be a corresponding x-amz-checksum-algorithm or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request. For the x-amz-checksum-algorithm header, replace algorithm with the supported algorithm from the following list: CRC-32 CRC-32C CRC-64NVME SHA-1 SHA-256 For more information, see Checking object integrity in the Amazon S3 User Guide. If the individual checksum value you provide through x-amz-checksum-algorithm doesn't match the checksum algorithm you set through x-amz-sdk-checksum-algorithm, Amazon S3 fails the request with a BadDigest error. If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm parameter. /// - delete: Container for the request. /// - expectedBucketOwner: The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden (access denied). /// - mfa: The concatenation of the authentication device's serial number, a space, and the value that is displayed on your authentication device. Required to permanently delete a versioned object if versioning is configured with MFA delete enabled. When performing the DeleteObjects operation on an MFA delete enabled bucket, which attempts to delete the specified versioned objects, you must include an MFA token. If you don't provide an MFA token, the entire request will fail, even if there are non-versioned objects that you are trying to delete. If you provide an invalid token, whether there are versioned object keys in the request or not, the entire Multi-Object Delete request will fail. For information about MFA Delete, see MFA Delete in the Amazon S3 User Guide. This functionality is not supported for directory buckets. @@ -3283,7 +3299,7 @@ public struct S3: AWSService { /// - acl: The canned ACL to apply to the bucket. /// - bucket: The bucket to which to apply the ACL. /// - checksumAlgorithm: Indicates the algorithm used to create the checksum for the object when you use the SDK. This header will not provide any additional functionality if you don't use the SDK. When you send this header, there must be a corresponding x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request. For more information, see Checking object integrity in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm parameter. - /// - contentMD5: The base64-encoded 128-bit MD5 digest of the data. This header must be used as a message integrity check to verify that the request body was not corrupted in transit. For more information, go to RFC 1864. For requests made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is calculated automatically. + /// - contentMD5: The Base64 encoded 128-bit MD5 digest of the data. This header must be used as a message integrity check to verify that the request body was not corrupted in transit. For more information, go to RFC 1864. For requests made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is calculated automatically. /// - expectedBucketOwner: The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden (access denied). /// - grantFullControl: Allows grantee the read, write, read ACP, and write ACP permissions on the bucket. /// - grantRead: Allows grantee to list the objects in the bucket. @@ -3382,7 +3398,7 @@ public struct S3: AWSService { /// Parameters: /// - bucket: Specifies the bucket impacted by the corsconfiguration. /// - checksumAlgorithm: Indicates the algorithm used to create the checksum for the object when you use the SDK. This header will not provide any additional functionality if you don't use the SDK. When you send this header, there must be a corresponding x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request. For more information, see Checking object integrity in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm parameter. - /// - contentMD5: The base64-encoded 128-bit MD5 digest of the data. This header must be used as a message integrity check to verify that the request body was not corrupted in transit. For more information, go to RFC 1864. For requests made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is calculated automatically. + /// - contentMD5: The Base64 encoded 128-bit MD5 digest of the data. This header must be used as a message integrity check to verify that the request body was not corrupted in transit. For more information, go to RFC 1864. For requests made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is calculated automatically. /// - corsConfiguration: Describes the cross-origin access configuration for objects in an Amazon S3 bucket. For more information, see Enabling Cross-Origin Resource Sharing in the Amazon S3 User Guide. /// - expectedBucketOwner: The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden (access denied). /// - logger: Logger use during operation @@ -3433,7 +3449,7 @@ public struct S3: AWSService { /// Parameters: /// - bucket: Specifies default encryption for a bucket using server-side encryption with different key options. Directory buckets - When you use this operation with a directory bucket, you must use path-style requests in the format https://s3express-control.region-code.amazonaws.com/bucket-name . Virtual-hosted-style requests aren't supported. Directory bucket names must be unique in the chosen Zone (Availability Zone or Local Zone). Bucket names must also follow the format bucket-base-name--zone-id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about bucket naming restrictions, see Directory bucket naming rules in the Amazon S3 User Guide /// - checksumAlgorithm: Indicates the algorithm used to create the checksum for the object when you use the SDK. This header will not provide any additional functionality if you don't use the SDK. When you send this header, there must be a corresponding x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request. For more information, see Checking object integrity in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm parameter. For directory buckets, when you use Amazon Web Services SDKs, CRC32 is the default checksum algorithm that's used for performance. - /// - contentMD5: The base64-encoded 128-bit MD5 digest of the server-side encryption configuration. For requests made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is calculated automatically. This functionality is not supported for directory buckets. + /// - contentMD5: The Base64 encoded 128-bit MD5 digest of the server-side encryption configuration. For requests made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is calculated automatically. This functionality is not supported for directory buckets. /// - expectedBucketOwner: The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden (access denied). For directory buckets, this header is not supported in this API operation. If you specify this header, the request fails with the HTTP status code /// - serverSideEncryptionConfiguration: /// - logger: Logger use during operation @@ -3762,7 +3778,7 @@ public struct S3: AWSService { /// /// Parameters: /// - bucket: The name of the bucket. Directory buckets - When you use this operation with a directory bucket, you must use path-style requests in the format https://s3express-control.region-code.amazonaws.com/bucket-name . Virtual-hosted-style requests aren't supported. Directory bucket names must be unique in the chosen Zone (Availability Zone or Local Zone). Bucket names must also follow the format bucket-base-name--zone-id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about bucket naming restrictions, see Directory bucket naming rules in the Amazon S3 User Guide - /// - checksumAlgorithm: Indicates the algorithm used to create the checksum for the object when you use the SDK. This header will not provide any additional functionality if you don't use the SDK. When you send this header, there must be a corresponding x-amz-checksum-algorithm or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request. For the x-amz-checksum-algorithm header, replace algorithm with the supported algorithm from the following list: CRC32 CRC32C SHA1 SHA256 For more information, see Checking object integrity in the Amazon S3 User Guide. If the individual checksum value you provide through x-amz-checksum-algorithm doesn't match the checksum algorithm you set through x-amz-sdk-checksum-algorithm, Amazon S3 ignores any provided ChecksumAlgorithm parameter and uses the checksum algorithm that matches the provided value in x-amz-checksum-algorithm . For directory buckets, when you use Amazon Web Services SDKs, CRC32 is the default checksum algorithm that's used for performance. + /// - checksumAlgorithm: Indicates the algorithm used to create the checksum for the object when you use the SDK. This header will not provide any additional functionality if you don't use the SDK. When you send this header, there must be a corresponding x-amz-checksum-algorithm or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request. For the x-amz-checksum-algorithm header, replace algorithm with the supported algorithm from the following list: CRC-32 CRC-32C CRC-64NVME SHA-1 SHA-256 For more information, see Checking object integrity in the Amazon S3 User Guide. If the individual checksum value you provide through x-amz-checksum-algorithm doesn't match the checksum algorithm you set through x-amz-sdk-checksum-algorithm, Amazon S3 fails the request with a BadDigest error. For directory buckets, when you use Amazon Web Services SDKs, CRC32 is the default checksum algorithm that's used for performance. /// - confirmRemoveSelfBucketAccess: Set this parameter to true to confirm that you want to remove your permissions to change this bucket policy in the future. This functionality is not supported for directory buckets. /// - contentMD5: The MD5 hash of the request body. For requests made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is calculated automatically. This functionality is not supported for directory buckets. /// - expectedBucketOwner: The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden (access denied). For directory buckets, this header is not supported in this API operation. If you specify this header, the request fails with the HTTP status code @@ -3809,7 +3825,7 @@ public struct S3: AWSService { /// Parameters: /// - bucket: The name of the bucket /// - checksumAlgorithm: Indicates the algorithm used to create the checksum for the object when you use the SDK. This header will not provide any additional functionality if you don't use the SDK. When you send this header, there must be a corresponding x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request. For more information, see Checking object integrity in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm parameter. - /// - contentMD5: The base64-encoded 128-bit MD5 digest of the data. You must use this header as a message integrity check to verify that the request body was not corrupted in transit. For more information, see RFC 1864. For requests made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is calculated automatically. + /// - contentMD5: The Base64 encoded 128-bit MD5 digest of the data. You must use this header as a message integrity check to verify that the request body was not corrupted in transit. For more information, see RFC 1864. For requests made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is calculated automatically. /// - expectedBucketOwner: The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden (access denied). /// - replicationConfiguration: /// - token: A token to allow Object Lock to be enabled for an existing bucket. @@ -3855,7 +3871,7 @@ public struct S3: AWSService { /// Parameters: /// - bucket: The bucket name. /// - checksumAlgorithm: Indicates the algorithm used to create the checksum for the object when you use the SDK. This header will not provide any additional functionality if you don't use the SDK. When you send this header, there must be a corresponding x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request. For more information, see Checking object integrity in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm parameter. - /// - contentMD5: The base64-encoded 128-bit MD5 digest of the data. You must use this header as a message integrity check to verify that the request body was not corrupted in transit. For more information, see RFC 1864. For requests made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is calculated automatically. + /// - contentMD5: The Base64 encoded 128-bit MD5 digest of the data. You must use this header as a message integrity check to verify that the request body was not corrupted in transit. For more information, see RFC 1864. For requests made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is calculated automatically. /// - expectedBucketOwner: The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden (access denied). /// - requestPaymentConfiguration: Container for Payer. /// - logger: Logger use during operation @@ -3898,7 +3914,7 @@ public struct S3: AWSService { /// Parameters: /// - bucket: The bucket name. /// - checksumAlgorithm: Indicates the algorithm used to create the checksum for the object when you use the SDK. This header will not provide any additional functionality if you don't use the SDK. When you send this header, there must be a corresponding x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request. For more information, see Checking object integrity in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm parameter. - /// - contentMD5: The base64-encoded 128-bit MD5 digest of the data. You must use this header as a message integrity check to verify that the request body was not corrupted in transit. For more information, see RFC 1864. For requests made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is calculated automatically. + /// - contentMD5: The Base64 encoded 128-bit MD5 digest of the data. You must use this header as a message integrity check to verify that the request body was not corrupted in transit. For more information, see RFC 1864. For requests made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is calculated automatically. /// - expectedBucketOwner: The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden (access denied). /// - tagging: Container for the TagSet and Tag elements. /// - logger: Logger use during operation @@ -3921,7 +3937,7 @@ public struct S3: AWSService { return try await self.putBucketTagging(input, logger: logger) } - /// This operation is not supported for directory buckets. When you enable versioning on a bucket for the first time, it might take a short amount of time for the change to be fully propagated. While this change is propagating, you may encounter intermittent HTTP 404 NoSuchKey errors for requests to objects created or updated after enabling versioning. We recommend that you wait for 15 minutes after enabling versioning before issuing write operations (PUT or DELETE) on objects in the bucket. Sets the versioning state of an existing bucket. You can set the versioning state with one of the following values: Enabled—Enables versioning for the objects in the bucket. All objects added to the bucket receive a unique version ID. Suspended—Disables versioning for the objects in the bucket. All objects added to the bucket receive the version ID null. If the versioning state has never been set on a bucket, it has no versioning state; a GetBucketVersioning request does not return a versioning state value. In order to enable MFA Delete, you must be the bucket owner. If you are the bucket owner and want to enable MFA Delete in the bucket versioning configuration, you must include the x-amz-mfa request header and the Status and the MfaDelete request elements in a request to set the versioning state of the bucket. If you have an object expiration lifecycle configuration in your non-versioned bucket and you want to maintain the same permanent delete behavior when you enable versioning, you must add a noncurrent expiration policy. The noncurrent expiration lifecycle configuration will manage the deletes of the noncurrent object versions in the version-enabled bucket. (A version-enabled bucket maintains one current and zero or more noncurrent object versions.) For more information, see Lifecycle and Versioning. The following operations are related to PutBucketVersioning: CreateBucket DeleteBucket GetBucketVersioning + /// This operation is not supported for directory buckets. When you enable versioning on a bucket for the first time, it might take a short amount of time for the change to be fully propagated. While this change is propagating, you might encounter intermittent HTTP 404 NoSuchKey errors for requests to objects created or updated after enabling versioning. We recommend that you wait for 15 minutes after enabling versioning before issuing write operations (PUT or DELETE) on objects in the bucket. Sets the versioning state of an existing bucket. You can set the versioning state with one of the following values: Enabled—Enables versioning for the objects in the bucket. All objects added to the bucket receive a unique version ID. Suspended—Disables versioning for the objects in the bucket. All objects added to the bucket receive the version ID null. If the versioning state has never been set on a bucket, it has no versioning state; a GetBucketVersioning request does not return a versioning state value. In order to enable MFA Delete, you must be the bucket owner. If you are the bucket owner and want to enable MFA Delete in the bucket versioning configuration, you must include the x-amz-mfa request header and the Status and the MfaDelete request elements in a request to set the versioning state of the bucket. If you have an object expiration lifecycle configuration in your non-versioned bucket and you want to maintain the same permanent delete behavior when you enable versioning, you must add a noncurrent expiration policy. The noncurrent expiration lifecycle configuration will manage the deletes of the noncurrent object versions in the version-enabled bucket. (A version-enabled bucket maintains one current and zero or more noncurrent object versions.) For more information, see Lifecycle and Versioning. The following operations are related to PutBucketVersioning: CreateBucket DeleteBucket GetBucketVersioning @Sendable @inlinable public func putBucketVersioning(_ input: PutBucketVersioningRequest, logger: Logger = AWSClient.loggingDisabled) async throws { @@ -3936,12 +3952,12 @@ public struct S3: AWSService { ) } } - /// This operation is not supported for directory buckets. When you enable versioning on a bucket for the first time, it might take a short amount of time for the change to be fully propagated. While this change is propagating, you may encounter intermittent HTTP 404 NoSuchKey errors for requests to objects created or updated after enabling versioning. We recommend that you wait for 15 minutes after enabling versioning before issuing write operations (PUT or DELETE) on objects in the bucket. Sets the versioning state of an existing bucket. You can set the versioning state with one of the following values: Enabled—Enables versioning for the objects in the bucket. All objects added to the bucket receive a unique version ID. Suspended—Disables versioning for the objects in the bucket. All objects added to the bucket receive the version ID null. If the versioning state has never been set on a bucket, it has no versioning state; a GetBucketVersioning request does not return a versioning state value. In order to enable MFA Delete, you must be the bucket owner. If you are the bucket owner and want to enable MFA Delete in the bucket versioning configuration, you must include the x-amz-mfa request header and the Status and the MfaDelete request elements in a request to set the versioning state of the bucket. If you have an object expiration lifecycle configuration in your non-versioned bucket and you want to maintain the same permanent delete behavior when you enable versioning, you must add a noncurrent expiration policy. The noncurrent expiration lifecycle configuration will manage the deletes of the noncurrent object versions in the version-enabled bucket. (A version-enabled bucket maintains one current and zero or more noncurrent object versions.) For more information, see Lifecycle and Versioning. The following operations are related to PutBucketVersioning: CreateBucket DeleteBucket GetBucketVersioning + /// This operation is not supported for directory buckets. When you enable versioning on a bucket for the first time, it might take a short amount of time for the change to be fully propagated. While this change is propagating, you might encounter intermittent HTTP 404 NoSuchKey errors for requests to objects created or updated after enabling versioning. We recommend that you wait for 15 minutes after enabling versioning before issuing write operations (PUT or DELETE) on objects in the bucket. Sets the versioning state of an existing bucket. You can set the versioning state with one of the following values: Enabled—Enables versioning for the objects in the bucket. All objects added to the bucket receive a unique version ID. Suspended—Disables versioning for the objects in the bucket. All objects added to the bucket receive the version ID null. If the versioning state has never been set on a bucket, it has no versioning state; a GetBucketVersioning request does not return a versioning state value. In order to enable MFA Delete, you must be the bucket owner. If you are the bucket owner and want to enable MFA Delete in the bucket versioning configuration, you must include the x-amz-mfa request header and the Status and the MfaDelete request elements in a request to set the versioning state of the bucket. If you have an object expiration lifecycle configuration in your non-versioned bucket and you want to maintain the same permanent delete behavior when you enable versioning, you must add a noncurrent expiration policy. The noncurrent expiration lifecycle configuration will manage the deletes of the noncurrent object versions in the version-enabled bucket. (A version-enabled bucket maintains one current and zero or more noncurrent object versions.) For more information, see Lifecycle and Versioning. The following operations are related to PutBucketVersioning: CreateBucket DeleteBucket GetBucketVersioning /// /// Parameters: /// - bucket: The bucket name. /// - checksumAlgorithm: Indicates the algorithm used to create the checksum for the object when you use the SDK. This header will not provide any additional functionality if you don't use the SDK. When you send this header, there must be a corresponding x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request. For more information, see Checking object integrity in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm parameter. - /// - contentMD5: >The base64-encoded 128-bit MD5 digest of the data. You must use this header as a message integrity check to verify that the request body was not corrupted in transit. For more information, see RFC 1864. For requests made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is calculated automatically. + /// - contentMD5: >The Base64 encoded 128-bit MD5 digest of the data. You must use this header as a message integrity check to verify that the request body was not corrupted in transit. For more information, see RFC 1864. For requests made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is calculated automatically. /// - expectedBucketOwner: The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden (access denied). /// - mfa: The concatenation of the authentication device's serial number, a space, and the value that is displayed on your authentication device. /// - versioningConfiguration: Container for setting the versioning state. @@ -3987,7 +4003,7 @@ public struct S3: AWSService { /// Parameters: /// - bucket: The bucket name. /// - checksumAlgorithm: Indicates the algorithm used to create the checksum for the object when you use the SDK. This header will not provide any additional functionality if you don't use the SDK. When you send this header, there must be a corresponding x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request. For more information, see Checking object integrity in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm parameter. - /// - contentMD5: The base64-encoded 128-bit MD5 digest of the data. You must use this header as a message integrity check to verify that the request body was not corrupted in transit. For more information, see RFC 1864. For requests made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is calculated automatically. + /// - contentMD5: The Base64 encoded 128-bit MD5 digest of the data. You must use this header as a message integrity check to verify that the request body was not corrupted in transit. For more information, see RFC 1864. For requests made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is calculated automatically. /// - expectedBucketOwner: The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden (access denied). /// - websiteConfiguration: Container for the request. /// - logger: Logger use during operation @@ -4033,16 +4049,17 @@ public struct S3: AWSService { /// - bucket: The bucket name to which the PUT action was initiated. Directory buckets - When you use this operation with a directory bucket, you must use virtual-hosted-style requests in the format Bucket-name.s3express-zone-id.region-code.amazonaws.com. Path-style requests are not supported. Directory bucket names must be unique in the chosen Zone (Availability Zone or Local Zone). Bucket names must follow the format bucket-base-name--zone-id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about bucket naming restrictions, see Directory bucket naming rules in the Amazon S3 User Guide. Access points - When you use this action with an access point, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide. Access points and Object Lambda access points are not supported by directory buckets. S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide. /// - bucketKeyEnabled: Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption with server-side encryption using Key Management Service (KMS) keys (SSE-KMS). General purpose buckets - Setting this header to true causes Amazon S3 to use an S3 Bucket Key for object encryption with SSE-KMS. Also, specifying this header with a PUT action doesn't affect bucket-level settings for S3 Bucket Key. Directory buckets - S3 Bucket Keys are always enabled for GET and PUT operations in a directory bucket and can’t be disabled. S3 Bucket Keys aren't supported, when you copy SSE-KMS encrypted objects from general purpose buckets /// - cacheControl: Can be used to specify caching behavior along the request/reply chain. For more information, see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9. - /// - checksumAlgorithm: Indicates the algorithm used to create the checksum for the object when you use the SDK. This header will not provide any additional functionality if you don't use the SDK. When you send this header, there must be a corresponding x-amz-checksum-algorithm or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request. For the x-amz-checksum-algorithm header, replace algorithm with the supported algorithm from the following list: CRC32 CRC32C SHA1 SHA256 For more information, see Checking object integrity in the Amazon S3 User Guide. If the individual checksum value you provide through x-amz-checksum-algorithm doesn't match the checksum algorithm you set through x-amz-sdk-checksum-algorithm, Amazon S3 ignores any provided ChecksumAlgorithm parameter and uses the checksum algorithm that matches the provided value in x-amz-checksum-algorithm . The Content-MD5 or x-amz-sdk-checksum-algorithm header is required for any request to upload an object with a retention period configured using Amazon S3 Object Lock. For more information, see Uploading objects to an Object Lock enabled bucket in the Amazon S3 User Guide. For directory buckets, when you use Amazon Web Services SDKs, CRC32 is the default checksum algorithm that's used for performance. - /// - checksumCRC32: This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the base64-encoded, 32-bit CRC-32 checksum of the object. For more information, see Checking object integrity in the Amazon S3 User Guide. - /// - checksumCRC32C: This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the base64-encoded, 32-bit CRC-32C checksum of the object. For more information, see Checking object integrity in the Amazon S3 User Guide. - /// - checksumSHA1: This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the base64-encoded, 160-bit SHA-1 digest of the object. For more information, see Checking object integrity in the Amazon S3 User Guide. - /// - checksumSHA256: This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the base64-encoded, 256-bit SHA-256 digest of the object. For more information, see Checking object integrity in the Amazon S3 User Guide. + /// - checksumAlgorithm: Indicates the algorithm used to create the checksum for the object when you use the SDK. This header will not provide any additional functionality if you don't use the SDK. When you send this header, there must be a corresponding x-amz-checksum-algorithm or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request. For the x-amz-checksum-algorithm header, replace algorithm with the supported algorithm from the following list: CRC-32 CRC-32C CRC-64NVME SHA-1 SHA-256 For more information, see Checking object integrity in the Amazon S3 User Guide. If the individual checksum value you provide through x-amz-checksum-algorithm doesn't match the checksum algorithm you set through x-amz-sdk-checksum-algorithm, Amazon S3 fails the request with a BadDigest error. The Content-MD5 or x-amz-sdk-checksum-algorithm header is required for any request to upload an object with a retention period configured using Amazon S3 Object Lock. For more information, see Uploading objects to an Object Lock enabled bucket in the Amazon S3 User Guide. For directory buckets, when you use Amazon Web Services SDKs, CRC32 is the default checksum algorithm that's used for performance. + /// - checksumCRC32: This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the Base64 encoded, 32-bit CRC-32 checksum of the object. For more information, see Checking object integrity in the Amazon S3 User Guide. + /// - checksumCRC32C: This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the Base64 encoded, 32-bit CRC-32C checksum of the object. For more information, see Checking object integrity in the Amazon S3 User Guide. + /// - checksumCRC64NVME: This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the Base64 encoded, 64-bit CRC-64NVME checksum of the object. The CRC-64NVME checksum is always a full object checksum. For more information, see Checking object integrity in the Amazon S3 User Guide. + /// - checksumSHA1: This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the Base64 encoded, 160-bit SHA-1 digest of the object. For more information, see Checking object integrity in the Amazon S3 User Guide. + /// - checksumSHA256: This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the Base64 encoded, 256-bit SHA-256 digest of the object. For more information, see Checking object integrity in the Amazon S3 User Guide. /// - contentDisposition: Specifies presentational information for the object. For more information, see https://www.rfc-editor.org/rfc/rfc6266#section-4. /// - contentEncoding: Specifies what content encodings have been applied to the object and thus what decoding mechanisms must be applied to obtain the media-type referenced by the Content-Type header field. For more information, see https://www.rfc-editor.org/rfc/rfc9110.html#field.content-encoding. /// - contentLanguage: The language the content is in. /// - contentLength: Size of the body in bytes. This parameter is useful when the size of the body cannot be determined automatically. For more information, see https://www.rfc-editor.org/rfc/rfc9110.html#name-content-length. - /// - contentMD5: The base64-encoded 128-bit MD5 digest of the message (without the headers) according to RFC 1864. This header can be used as a message integrity check to verify that the data is the same data that was originally sent. Although it is optional, we recommend using the Content-MD5 mechanism as an end-to-end integrity check. For more information about REST request authentication, see REST Authentication. The Content-MD5 or x-amz-sdk-checksum-algorithm header is required for any request to upload an object with a retention period configured using Amazon S3 Object Lock. For more information, see Uploading objects to an Object Lock enabled bucket in the Amazon S3 User Guide. This functionality is not supported for directory buckets. + /// - contentMD5: The Base64 encoded 128-bit MD5 digest of the message (without the headers) according to RFC 1864. This header can be used as a message integrity check to verify that the data is the same data that was originally sent. Although it is optional, we recommend using the Content-MD5 mechanism as an end-to-end integrity check. For more information about REST request authentication, see REST Authentication. The Content-MD5 or x-amz-sdk-checksum-algorithm header is required for any request to upload an object with a retention period configured using Amazon S3 Object Lock. For more information, see Uploading objects to an Object Lock enabled bucket in the Amazon S3 User Guide. This functionality is not supported for directory buckets. /// - contentType: A standard MIME type describing the format of the contents. For more information, see https://www.rfc-editor.org/rfc/rfc9110.html#name-content-type. /// - expectedBucketOwner: The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden (access denied). /// - expires: The date and time at which the object is no longer cacheable. For more information, see https://www.rfc-editor.org/rfc/rfc7234#section-5.3. @@ -4062,7 +4079,7 @@ public struct S3: AWSService { /// - sseCustomerAlgorithm: Specifies the algorithm to use when encrypting the object (for example, AES256). This functionality is not supported for directory buckets. /// - sseCustomerKey: Specifies the customer-provided encryption key for Amazon S3 to use in encrypting data. This value is used to store the object and then it is discarded; Amazon S3 does not store the encryption key. The key must be appropriate for use with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm header. This functionality is not supported for directory buckets. /// - sseCustomerKeyMD5: Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Amazon S3 uses this header for a message integrity check to ensure that the encryption key was transmitted without error. This functionality is not supported for directory buckets. - /// - ssekmsEncryptionContext: Specifies the Amazon Web Services KMS Encryption Context as an additional encryption context to use for object encryption. The value of this header is a Base64-encoded string of a UTF-8 encoded JSON, which contains the encryption context as key-value pairs. This value is stored as object metadata and automatically gets passed on to Amazon Web Services KMS for future GetObject operations on this object. General purpose buckets - This value must be explicitly added during CopyObject operations if you want an additional encryption context for your object. For more information, see Encryption context in the Amazon S3 User Guide. Directory buckets - You can optionally provide an explicit encryption context value. The value must match the default encryption context - the bucket Amazon Resource Name (ARN). An additional encryption context value is not supported. + /// - ssekmsEncryptionContext: Specifies the Amazon Web Services KMS Encryption Context as an additional encryption context to use for object encryption. The value of this header is a Base64 encoded string of a UTF-8 encoded JSON, which contains the encryption context as key-value pairs. This value is stored as object metadata and automatically gets passed on to Amazon Web Services KMS for future GetObject operations on this object. General purpose buckets - This value must be explicitly added during CopyObject operations if you want an additional encryption context for your object. For more information, see Encryption context in the Amazon S3 User Guide. Directory buckets - You can optionally provide an explicit encryption context value. The value must match the default encryption context - the bucket Amazon Resource Name (ARN). An additional encryption context value is not supported. /// - ssekmsKeyId: Specifies the KMS key ID (Key ID, Key ARN, or Key Alias) to use for object encryption. If the KMS key doesn't exist in the same account that's issuing the command, you must use the full Key ARN not the Key ID. General purpose buckets - If you specify x-amz-server-side-encryption with aws:kms or aws:kms:dsse, this header specifies the ID (Key ID, Key ARN, or Key Alias) of the KMS key to use. If you specify x-amz-server-side-encryption:aws:kms or x-amz-server-side-encryption:aws:kms:dsse, but do not provide x-amz-server-side-encryption-aws-kms-key-id, Amazon S3 uses the Amazon Web Services managed key (aws/s3) to protect the data. Directory buckets - If you specify x-amz-server-side-encryption with aws:kms, the x-amz-server-side-encryption-aws-kms-key-id header is implicitly assigned the ID of the KMS symmetric encryption customer managed key that's configured for your directory bucket's default encryption setting. If you want to specify the x-amz-server-side-encryption-aws-kms-key-id header explicitly, you can only specify it with the ID (Key ID or Key ARN) of the KMS customer managed key that's configured for your directory bucket's default encryption setting. Otherwise, you get an HTTP 400 Bad Request error. Only use the key ID or key ARN. The key alias format of the KMS key isn't supported. Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. /// - storageClass: By default, Amazon S3 uses the STANDARD Storage Class to store newly created objects. The STANDARD storage class provides high durability and high availability. Depending on performance needs, you can specify a different Storage Class. For more information, see Storage Classes in the Amazon S3 User Guide. For directory buckets, only the S3 Express One Zone storage class is supported to store newly created objects. Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. /// - tagging: The tag-set for the object. The tag-set must be encoded as URL Query parameters. (For example, "Key1=Value1") This functionality is not supported for directory buckets. @@ -4079,6 +4096,7 @@ public struct S3: AWSService { checksumAlgorithm: ChecksumAlgorithm? = nil, checksumCRC32: String? = nil, checksumCRC32C: String? = nil, + checksumCRC64NVME: String? = nil, checksumSHA1: String? = nil, checksumSHA256: String? = nil, contentDisposition: String? = nil, @@ -4122,6 +4140,7 @@ public struct S3: AWSService { checksumAlgorithm: checksumAlgorithm, checksumCRC32: checksumCRC32, checksumCRC32C: checksumCRC32C, + checksumCRC64NVME: checksumCRC64NVME, checksumSHA1: checksumSHA1, checksumSHA256: checksumSHA256, contentDisposition: contentDisposition, @@ -4178,7 +4197,7 @@ public struct S3: AWSService { /// - acl: The canned ACL to apply to the object. For more information, see Canned ACL. /// - bucket: The bucket name that contains the object to which you want to attach the ACL. Access points - When you use this action with an access point, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide. S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide. /// - checksumAlgorithm: Indicates the algorithm used to create the checksum for the object when you use the SDK. This header will not provide any additional functionality if you don't use the SDK. When you send this header, there must be a corresponding x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request. For more information, see Checking object integrity in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm parameter. - /// - contentMD5: The base64-encoded 128-bit MD5 digest of the data. This header must be used as a message integrity check to verify that the request body was not corrupted in transit. For more information, go to RFC 1864.> For requests made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is calculated automatically. + /// - contentMD5: The Base64 encoded 128-bit MD5 digest of the data. This header must be used as a message integrity check to verify that the request body was not corrupted in transit. For more information, go to RFC 1864.> For requests made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is calculated automatically. /// - expectedBucketOwner: The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden (access denied). /// - grantFullControl: Allows grantee the read, write, read ACP, and write ACP permissions on the bucket. This functionality is not supported for Amazon S3 on Outposts. /// - grantRead: Allows grantee to list the objects in the bucket. This functionality is not supported for Amazon S3 on Outposts. @@ -4599,12 +4618,13 @@ public struct S3: AWSService { /// - body: Object data. /// - bucket: The name of the bucket to which the multipart upload was initiated. Directory buckets - When you use this operation with a directory bucket, you must use virtual-hosted-style requests in the format Bucket-name.s3express-zone-id.region-code.amazonaws.com. Path-style requests are not supported. Directory bucket names must be unique in the chosen Zone (Availability Zone or Local Zone). Bucket names must follow the format bucket-base-name--zone-id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about bucket naming restrictions, see Directory bucket naming rules in the Amazon S3 User Guide. Access points - When you use this action with an access point, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide. Access points and Object Lambda access points are not supported by directory buckets. S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide. /// - checksumAlgorithm: Indicates the algorithm used to create the checksum for the object when you use the SDK. This header will not provide any additional functionality if you don't use the SDK. When you send this header, there must be a corresponding x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request. For more information, see Checking object integrity in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm parameter. This checksum algorithm must be the same for all parts and it match the checksum value supplied in the CreateMultipartUpload request. - /// - checksumCRC32: This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the base64-encoded, 32-bit CRC-32 checksum of the object. For more information, see Checking object integrity in the Amazon S3 User Guide. - /// - checksumCRC32C: This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the base64-encoded, 32-bit CRC-32C checksum of the object. For more information, see Checking object integrity in the Amazon S3 User Guide. - /// - checksumSHA1: This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the base64-encoded, 160-bit SHA-1 digest of the object. For more information, see Checking object integrity in the Amazon S3 User Guide. - /// - checksumSHA256: This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the base64-encoded, 256-bit SHA-256 digest of the object. For more information, see Checking object integrity in the Amazon S3 User Guide. + /// - checksumCRC32: This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the Base64 encoded, 32-bit CRC-32 checksum of the object. For more information, see Checking object integrity in the Amazon S3 User Guide. + /// - checksumCRC32C: This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the Base64 encoded, 32-bit CRC-32C checksum of the object. For more information, see Checking object integrity in the Amazon S3 User Guide. + /// - checksumCRC64NVME: This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the Base64 encoded, 64-bit CRC-64NVME checksum of the part. For more information, see Checking object integrity in the Amazon S3 User Guide. + /// - checksumSHA1: This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the Base64 encoded, 160-bit SHA-1 digest of the object. For more information, see Checking object integrity in the Amazon S3 User Guide. + /// - checksumSHA256: This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the Base64 encoded, 256-bit SHA-256 digest of the object. For more information, see Checking object integrity in the Amazon S3 User Guide. /// - contentLength: Size of the body in bytes. This parameter is useful when the size of the body cannot be determined automatically. - /// - contentMD5: The base64-encoded 128-bit MD5 digest of the part data. This parameter is auto-populated when using the command from the CLI. This parameter is required if object lock parameters are specified. This functionality is not supported for directory buckets. + /// - contentMD5: The Base64 encoded 128-bit MD5 digest of the part data. This parameter is auto-populated when using the command from the CLI. This parameter is required if object lock parameters are specified. This functionality is not supported for directory buckets. /// - expectedBucketOwner: The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden (access denied). /// - key: Object key for which the multipart upload was initiated. /// - partNumber: Part number of part being uploaded. This is a positive integer between 1 and 10,000. @@ -4621,6 +4641,7 @@ public struct S3: AWSService { checksumAlgorithm: ChecksumAlgorithm? = nil, checksumCRC32: String? = nil, checksumCRC32C: String? = nil, + checksumCRC64NVME: String? = nil, checksumSHA1: String? = nil, checksumSHA256: String? = nil, contentLength: Int64? = nil, @@ -4641,6 +4662,7 @@ public struct S3: AWSService { checksumAlgorithm: checksumAlgorithm, checksumCRC32: checksumCRC32, checksumCRC32C: checksumCRC32C, + checksumCRC64NVME: checksumCRC64NVME, checksumSHA1: checksumSHA1, checksumSHA256: checksumSHA256, contentLength: contentLength, @@ -4763,10 +4785,11 @@ public struct S3: AWSService { /// - body: The object data. /// - bucketKeyEnabled: Indicates whether the object stored in Amazon S3 uses an S3 bucket key for server-side encryption with Amazon Web Services KMS (SSE-KMS). /// - cacheControl: Specifies caching behavior along the request/reply chain. - /// - checksumCRC32: This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This specifies the base64-encoded, 32-bit CRC-32 checksum of the object returned by the Object Lambda function. This may not match the checksum for the object stored in Amazon S3. Amazon S3 will perform validation of the checksum values only when the original GetObject request required checksum validation. For more information about checksums, see Checking object integrity in the Amazon S3 User Guide. Only one checksum header can be specified at a time. If you supply multiple checksum headers, this request will fail. - /// - checksumCRC32C: This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This specifies the base64-encoded, 32-bit CRC-32C checksum of the object returned by the Object Lambda function. This may not match the checksum for the object stored in Amazon S3. Amazon S3 will perform validation of the checksum values only when the original GetObject request required checksum validation. For more information about checksums, see Checking object integrity in the Amazon S3 User Guide. Only one checksum header can be specified at a time. If you supply multiple checksum headers, this request will fail. - /// - checksumSHA1: This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This specifies the base64-encoded, 160-bit SHA-1 digest of the object returned by the Object Lambda function. This may not match the checksum for the object stored in Amazon S3. Amazon S3 will perform validation of the checksum values only when the original GetObject request required checksum validation. For more information about checksums, see Checking object integrity in the Amazon S3 User Guide. Only one checksum header can be specified at a time. If you supply multiple checksum headers, this request will fail. - /// - checksumSHA256: This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This specifies the base64-encoded, 256-bit SHA-256 digest of the object returned by the Object Lambda function. This may not match the checksum for the object stored in Amazon S3. Amazon S3 will perform validation of the checksum values only when the original GetObject request required checksum validation. For more information about checksums, see Checking object integrity in the Amazon S3 User Guide. Only one checksum header can be specified at a time. If you supply multiple checksum headers, this request will fail. + /// - checksumCRC32: This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This specifies the Base64 encoded, 32-bit CRC-32 checksum of the object returned by the Object Lambda function. This may not match the checksum for the object stored in Amazon S3. Amazon S3 will perform validation of the checksum values only when the original GetObject request required checksum validation. For more information about checksums, see Checking object integrity in the Amazon S3 User Guide. Only one checksum header can be specified at a time. If you supply multiple checksum headers, this request will fail. + /// - checksumCRC32C: This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This specifies the Base64 encoded, 32-bit CRC-32C checksum of the object returned by the Object Lambda function. This may not match the checksum for the object stored in Amazon S3. Amazon S3 will perform validation of the checksum values only when the original GetObject request required checksum validation. For more information about checksums, see Checking object integrity in the Amazon S3 User Guide. Only one checksum header can be specified at a time. If you supply multiple checksum headers, this request will fail. + /// - checksumCRC64NVME: This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the Base64 encoded, 64-bit CRC-64NVME checksum of the part. For more information, see Checking object integrity in the Amazon S3 User Guide. + /// - checksumSHA1: This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This specifies the Base64 encoded, 160-bit SHA-1 digest of the object returned by the Object Lambda function. This may not match the checksum for the object stored in Amazon S3. Amazon S3 will perform validation of the checksum values only when the original GetObject request required checksum validation. For more information about checksums, see Checking object integrity in the Amazon S3 User Guide. Only one checksum header can be specified at a time. If you supply multiple checksum headers, this request will fail. + /// - checksumSHA256: This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This specifies the Base64 encoded, 256-bit SHA-256 digest of the object returned by the Object Lambda function. This may not match the checksum for the object stored in Amazon S3. Amazon S3 will perform validation of the checksum values only when the original GetObject request required checksum validation. For more information about checksums, see Checking object integrity in the Amazon S3 User Guide. Only one checksum header can be specified at a time. If you supply multiple checksum headers, this request will fail. /// - contentDisposition: Specifies presentational information for the object. /// - contentEncoding: Specifies what content encodings have been applied to the object and thus what decoding mechanisms must be applied to obtain the media-type referenced by the Content-Type header field. /// - contentLanguage: The language the content is in. @@ -4808,6 +4831,7 @@ public struct S3: AWSService { cacheControl: String? = nil, checksumCRC32: String? = nil, checksumCRC32C: String? = nil, + checksumCRC64NVME: String? = nil, checksumSHA1: String? = nil, checksumSHA256: String? = nil, contentDisposition: String? = nil, @@ -4851,6 +4875,7 @@ public struct S3: AWSService { cacheControl: cacheControl, checksumCRC32: checksumCRC32, checksumCRC32C: checksumCRC32C, + checksumCRC64NVME: checksumCRC64NVME, checksumSHA1: checksumSHA1, checksumSHA256: checksumSHA256, contentDisposition: contentDisposition, diff --git a/Sources/Soto/Services/S3/S3_shapes.swift b/Sources/Soto/Services/S3/S3_shapes.swift index 00641662d3..fcb93142bf 100644 --- a/Sources/Soto/Services/S3/S3_shapes.swift +++ b/Sources/Soto/Services/S3/S3_shapes.swift @@ -121,6 +121,12 @@ extension S3 { public var description: String { return self.rawValue } } + public enum ChecksumType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case composite = "COMPOSITE" + case fullObject = "FULL_OBJECT" + public var description: String { return self.rawValue } + } + public enum CompressionType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case bzip2 = "BZIP2" case gzip = "GZIP" @@ -1103,28 +1109,36 @@ extension S3 { } public struct Checksum: AWSDecodableShape { - /// The base64-encoded, 32-bit CRC-32 checksum of the object. This will only be present if it was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide. + /// The Base64 encoded, 32-bit CRC-32 checksum of the object. This checksum is only be present if the checksum was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide. public let checksumCRC32: String? - /// The base64-encoded, 32-bit CRC-32C checksum of the object. This will only be present if it was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide. + /// The Base64 encoded, 32-bit CRC-32C checksum of the object. This checksum is only present if the checksum was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide. public let checksumCRC32C: String? - /// The base64-encoded, 160-bit SHA-1 digest of the object. This will only be present if it was uploaded with the object. When you use the API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide. + /// The Base64 encoded, 64-bit CRC-64NVME checksum of the object. This checksum is present if the object was uploaded with the CRC-64NVME checksum algorithm, or if the object was uploaded without a checksum (and Amazon S3 added the default checksum, CRC-64NVME, to the uploaded object). For more information, see Checking object integrity in the Amazon S3 User Guide. + public let checksumCRC64NVME: String? + /// The Base64 encoded, 160-bit SHA-1 digest of the object. This will only be present if the object was uploaded with the object. When you use the API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide. public let checksumSHA1: String? - /// The base64-encoded, 256-bit SHA-256 digest of the object. This will only be present if it was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide. + /// The Base64 encoded, 256-bit SHA-256 digest of the object. This will only be present if the object was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide. public let checksumSHA256: String? + /// The checksum type that is used to calculate the object’s checksum value. For more information, see Checking object integrity in the Amazon S3 User Guide. + public let checksumType: ChecksumType? @inlinable - public init(checksumCRC32: String? = nil, checksumCRC32C: String? = nil, checksumSHA1: String? = nil, checksumSHA256: String? = nil) { + public init(checksumCRC32: String? = nil, checksumCRC32C: String? = nil, checksumCRC64NVME: String? = nil, checksumSHA1: String? = nil, checksumSHA256: String? = nil, checksumType: ChecksumType? = nil) { self.checksumCRC32 = checksumCRC32 self.checksumCRC32C = checksumCRC32C + self.checksumCRC64NVME = checksumCRC64NVME self.checksumSHA1 = checksumSHA1 self.checksumSHA256 = checksumSHA256 + self.checksumType = checksumType } private enum CodingKeys: String, CodingKey { case checksumCRC32 = "ChecksumCRC32" case checksumCRC32C = "ChecksumCRC32C" + case checksumCRC64NVME = "ChecksumCRC64NVME" case checksumSHA1 = "ChecksumSHA1" case checksumSHA256 = "ChecksumSHA256" + case checksumType = "ChecksumType" } } @@ -1147,14 +1161,18 @@ extension S3 { public let bucket: String? /// Indicates whether the multipart upload uses an S3 Bucket Key for server-side encryption with Key Management Service (KMS) keys (SSE-KMS). public let bucketKeyEnabled: Bool? - /// The base64-encoded, 32-bit CRC-32 checksum of the object. This will only be present if it was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide. + /// The Base64 encoded, 32-bit CRC-32 checksum of the object. This checksum is only be present if the checksum was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide. public let checksumCRC32: String? - /// The base64-encoded, 32-bit CRC-32C checksum of the object. This will only be present if it was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide. + /// The Base64 encoded, 32-bit CRC-32C checksum of the object. This checksum is only present if the checksum was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide. public let checksumCRC32C: String? - /// The base64-encoded, 160-bit SHA-1 digest of the object. This will only be present if it was uploaded with the object. When you use the API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide. + /// This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the Base64 encoded, 64-bit CRC-64NVME checksum of the object. The CRC-64NVME checksum is always a full object checksum. For more information, see Checking object integrity in the Amazon S3 User Guide. + public let checksumCRC64NVME: String? + /// The Base64 encoded, 160-bit SHA-1 digest of the object. This will only be present if the object was uploaded with the object. When you use the API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide. public let checksumSHA1: String? - /// The base64-encoded, 256-bit SHA-256 digest of the object. This will only be present if it was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide. + /// The Base64 encoded, 256-bit SHA-256 digest of the object. This will only be present if the object was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide. public let checksumSHA256: String? + /// The checksum type, which determines how part-level checksums are combined to create an object-level checksum for multipart objects. You can use this header as a data integrity check to verify that the checksum type that is received is the same checksum type that was specified during the CreateMultipartUpload request. For more information, see Checking object integrity in the Amazon S3 User Guide. + public let checksumType: ChecksumType? /// Entity tag that identifies the newly created object's data. Objects with different object data will have different entity tags. The entity tag is an opaque string. The entity tag may or may not be an MD5 digest of the object data. If the entity tag is not an MD5 digest of the object data, it will contain one or more nonhexadecimal characters and/or will consist of less than 32 or more than 32 hexadecimal digits. For more information about how the entity tag is calculated, see Checking object integrity in the Amazon S3 User Guide. public let eTag: String? /// If the object expiration is configured, this will contain the expiration date (expiry-date) and rule ID (rule-id). The value of rule-id is URL-encoded. This functionality is not supported for directory buckets. @@ -1172,13 +1190,15 @@ extension S3 { public let versionId: String? @inlinable - public init(bucket: String? = nil, bucketKeyEnabled: Bool? = nil, checksumCRC32: String? = nil, checksumCRC32C: String? = nil, checksumSHA1: String? = nil, checksumSHA256: String? = nil, eTag: String? = nil, expiration: String? = nil, key: String? = nil, location: String? = nil, requestCharged: RequestCharged? = nil, serverSideEncryption: ServerSideEncryption? = nil, ssekmsKeyId: String? = nil, versionId: String? = nil) { + public init(bucket: String? = nil, bucketKeyEnabled: Bool? = nil, checksumCRC32: String? = nil, checksumCRC32C: String? = nil, checksumCRC64NVME: String? = nil, checksumSHA1: String? = nil, checksumSHA256: String? = nil, checksumType: ChecksumType? = nil, eTag: String? = nil, expiration: String? = nil, key: String? = nil, location: String? = nil, requestCharged: RequestCharged? = nil, serverSideEncryption: ServerSideEncryption? = nil, ssekmsKeyId: String? = nil, versionId: String? = nil) { self.bucket = bucket self.bucketKeyEnabled = bucketKeyEnabled self.checksumCRC32 = checksumCRC32 self.checksumCRC32C = checksumCRC32C + self.checksumCRC64NVME = checksumCRC64NVME self.checksumSHA1 = checksumSHA1 self.checksumSHA256 = checksumSHA256 + self.checksumType = checksumType self.eTag = eTag self.expiration = expiration self.key = key @@ -1196,8 +1216,10 @@ extension S3 { self.bucketKeyEnabled = try response.decodeHeaderIfPresent(Bool.self, key: "x-amz-server-side-encryption-bucket-key-enabled") self.checksumCRC32 = try container.decodeIfPresent(String.self, forKey: .checksumCRC32) self.checksumCRC32C = try container.decodeIfPresent(String.self, forKey: .checksumCRC32C) + self.checksumCRC64NVME = try container.decodeIfPresent(String.self, forKey: .checksumCRC64NVME) self.checksumSHA1 = try container.decodeIfPresent(String.self, forKey: .checksumSHA1) self.checksumSHA256 = try container.decodeIfPresent(String.self, forKey: .checksumSHA256) + self.checksumType = try container.decodeIfPresent(ChecksumType.self, forKey: .checksumType) self.eTag = try container.decodeIfPresent(String.self, forKey: .eTag) self.expiration = try response.decodeHeaderIfPresent(String.self, key: "x-amz-expiration") self.key = try container.decodeIfPresent(String.self, forKey: .key) @@ -1212,8 +1234,10 @@ extension S3 { case bucket = "Bucket" case checksumCRC32 = "ChecksumCRC32" case checksumCRC32C = "ChecksumCRC32C" + case checksumCRC64NVME = "ChecksumCRC64NVME" case checksumSHA1 = "ChecksumSHA1" case checksumSHA256 = "ChecksumSHA256" + case checksumType = "ChecksumType" case eTag = "ETag" case key = "Key" case location = "Location" @@ -1224,14 +1248,18 @@ extension S3 { public static let _xmlRootNodeName: String? = "CompleteMultipartUpload" /// Name of the bucket to which the multipart upload was initiated. Directory buckets - When you use this operation with a directory bucket, you must use virtual-hosted-style requests in the format Bucket-name.s3express-zone-id.region-code.amazonaws.com. Path-style requests are not supported. Directory bucket names must be unique in the chosen Zone (Availability Zone or Local Zone). Bucket names must follow the format bucket-base-name--zone-id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about bucket naming restrictions, see Directory bucket naming rules in the Amazon S3 User Guide. Access points - When you use this action with an access point, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide. Access points and Object Lambda access points are not supported by directory buckets. S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide. public let bucket: String - /// This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the base64-encoded, 32-bit CRC-32 checksum of the object. For more information, see Checking object integrity in the Amazon S3 User Guide. + /// This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the Base64 encoded, 32-bit CRC-32 checksum of the object. For more information, see Checking object integrity in the Amazon S3 User Guide. public let checksumCRC32: String? - /// This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the base64-encoded, 32-bit CRC-32C checksum of the object. For more information, see Checking object integrity in the Amazon S3 User Guide. + /// This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the Base64 encoded, 32-bit CRC-32C checksum of the object. For more information, see Checking object integrity in the Amazon S3 User Guide. public let checksumCRC32C: String? - /// This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the base64-encoded, 160-bit SHA-1 digest of the object. For more information, see Checking object integrity in the Amazon S3 User Guide. + /// This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the Base64 encoded, 64-bit CRC-64NVME checksum of the object. The CRC-64NVME checksum is always a full object checksum. For more information, see Checking object integrity in the Amazon S3 User Guide. + public let checksumCRC64NVME: String? + /// This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the Base64 encoded, 160-bit SHA-1 digest of the object. For more information, see Checking object integrity in the Amazon S3 User Guide. public let checksumSHA1: String? - /// This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the base64-encoded, 256-bit SHA-256 digest of the object. For more information, see Checking object integrity in the Amazon S3 User Guide. + /// This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the Base64 encoded, 256-bit SHA-256 digest of the object. For more information, see Checking object integrity in the Amazon S3 User Guide. public let checksumSHA256: String? + /// This header specifies the checksum type of the object, which determines how part-level checksums are combined to create an object-level checksum for multipart objects. You can use this header as a data integrity check to verify that the checksum type that is received is the same checksum that was specified. If the checksum type doesn’t match the checksum type that was specified for the object during the CreateMultipartUpload request, it’ll result in a BadDigest error. For more information, see Checking object integrity in the Amazon S3 User Guide. + public let checksumType: ChecksumType? /// The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden (access denied). public let expectedBucketOwner: String? /// Uploads the object only if the ETag (entity tag) value provided during the WRITE operation matches the ETag of the object in S3. If the ETag values do not match, the operation returns a 412 Precondition Failed error. If a conflicting operation occurs during the upload S3 returns a 409 ConditionalRequestConflict response. On a 409 failure you should fetch the object's ETag, re-initiate the multipart upload with CreateMultipartUpload, and re-upload each part. Expects the ETag value as a string. For more information about conditional requests, see RFC 7232, or Conditional requests in the Amazon S3 User Guide. @@ -1240,6 +1268,8 @@ extension S3 { public let ifNoneMatch: String? /// Object key for which the multipart upload was initiated. public let key: String + /// The expected total object size of the multipart upload request. If there’s a mismatch between the specified object size value and the actual object size value, it results in an HTTP 400 InvalidRequest error. + public let mpuObjectSize: String? /// The container for the multipart upload request information. public let multipartUpload: CompletedMultipartUpload? public let requestPayer: RequestPayer? @@ -1253,16 +1283,19 @@ extension S3 { public let uploadId: String @inlinable - public init(bucket: String, checksumCRC32: String? = nil, checksumCRC32C: String? = nil, checksumSHA1: String? = nil, checksumSHA256: String? = nil, expectedBucketOwner: String? = nil, ifMatch: String? = nil, ifNoneMatch: String? = nil, key: String, multipartUpload: CompletedMultipartUpload? = nil, requestPayer: RequestPayer? = nil, sseCustomerAlgorithm: String? = nil, sseCustomerKey: String? = nil, sseCustomerKeyMD5: String? = nil, uploadId: String) { + public init(bucket: String, checksumCRC32: String? = nil, checksumCRC32C: String? = nil, checksumCRC64NVME: String? = nil, checksumSHA1: String? = nil, checksumSHA256: String? = nil, checksumType: ChecksumType? = nil, expectedBucketOwner: String? = nil, ifMatch: String? = nil, ifNoneMatch: String? = nil, key: String, mpuObjectSize: String? = nil, multipartUpload: CompletedMultipartUpload? = nil, requestPayer: RequestPayer? = nil, sseCustomerAlgorithm: String? = nil, sseCustomerKey: String? = nil, sseCustomerKeyMD5: String? = nil, uploadId: String) { self.bucket = bucket self.checksumCRC32 = checksumCRC32 self.checksumCRC32C = checksumCRC32C + self.checksumCRC64NVME = checksumCRC64NVME self.checksumSHA1 = checksumSHA1 self.checksumSHA256 = checksumSHA256 + self.checksumType = checksumType self.expectedBucketOwner = expectedBucketOwner self.ifMatch = ifMatch self.ifNoneMatch = ifNoneMatch self.key = key + self.mpuObjectSize = mpuObjectSize self.multipartUpload = multipartUpload self.requestPayer = requestPayer self.sseCustomerAlgorithm = sseCustomerAlgorithm @@ -1277,12 +1310,15 @@ extension S3 { request.encodePath(self.bucket, key: "Bucket") request.encodeHeader(self.checksumCRC32, key: "x-amz-checksum-crc32") request.encodeHeader(self.checksumCRC32C, key: "x-amz-checksum-crc32c") + request.encodeHeader(self.checksumCRC64NVME, key: "x-amz-checksum-crc64nvme") request.encodeHeader(self.checksumSHA1, key: "x-amz-checksum-sha1") request.encodeHeader(self.checksumSHA256, key: "x-amz-checksum-sha256") + request.encodeHeader(self.checksumType, key: "x-amz-checksum-type") request.encodeHeader(self.expectedBucketOwner, key: "x-amz-expected-bucket-owner") request.encodeHeader(self.ifMatch, key: "If-Match") request.encodeHeader(self.ifNoneMatch, key: "If-None-Match") request.encodePath(self.key, key: "Key") + request.encodeHeader(self.mpuObjectSize, key: "x-amz-mp-object-size") try container.encode(self.multipartUpload) request.encodeHeader(self.requestPayer, key: "x-amz-request-payer") request.encodeHeader(self.sseCustomerAlgorithm, key: "x-amz-server-side-encryption-customer-algorithm") @@ -1313,13 +1349,15 @@ extension S3 { } public struct CompletedPart: AWSEncodableShape { - /// The base64-encoded, 32-bit CRC-32 checksum of the object. This will only be present if it was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide. + /// The Base64 encoded, 32-bit CRC-32 checksum of the part. This checksum is present if the multipart upload request was created with the CRC-32 checksum algorithm. For more information, see Checking object integrity in the Amazon S3 User Guide. public let checksumCRC32: String? - /// The base64-encoded, 32-bit CRC-32C checksum of the object. This will only be present if it was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide. + /// The Base64 encoded, 32-bit CRC-32C checksum of the part. This checksum is present if the multipart upload request was created with the CRC-32C checksum algorithm. For more information, see Checking object integrity in the Amazon S3 User Guide. public let checksumCRC32C: String? - /// The base64-encoded, 160-bit SHA-1 digest of the object. This will only be present if it was uploaded with the object. When you use the API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide. + /// The Base64 encoded, 64-bit CRC-64NVME checksum of the part. This checksum is present if the multipart upload request was created with the CRC-64NVME checksum algorithm to the uploaded object). For more information, see Checking object integrity in the Amazon S3 User Guide. + public let checksumCRC64NVME: String? + /// The Base64 encoded, 160-bit SHA-1 checksum of the part. This checksum is present if the multipart upload request was created with the SHA-1 checksum algorithm. For more information, see Checking object integrity in the Amazon S3 User Guide. public let checksumSHA1: String? - /// The base64-encoded, 256-bit SHA-256 digest of the object. This will only be present if it was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide. + /// The Base64 encoded, 256-bit SHA-256 checksum of the part. This checksum is present if the multipart upload request was created with the SHA-256 checksum algorithm. For more information, see Checking object integrity in the Amazon S3 User Guide. public let checksumSHA256: String? /// Entity tag returned when the part was uploaded. public let eTag: String? @@ -1327,9 +1365,10 @@ extension S3 { public let partNumber: Int? @inlinable - public init(checksumCRC32: String? = nil, checksumCRC32C: String? = nil, checksumSHA1: String? = nil, checksumSHA256: String? = nil, eTag: String? = nil, partNumber: Int? = nil) { + public init(checksumCRC32: String? = nil, checksumCRC32C: String? = nil, checksumCRC64NVME: String? = nil, checksumSHA1: String? = nil, checksumSHA256: String? = nil, eTag: String? = nil, partNumber: Int? = nil) { self.checksumCRC32 = checksumCRC32 self.checksumCRC32C = checksumCRC32C + self.checksumCRC64NVME = checksumCRC64NVME self.checksumSHA1 = checksumSHA1 self.checksumSHA256 = checksumSHA256 self.eTag = eTag @@ -1339,6 +1378,7 @@ extension S3 { private enum CodingKeys: String, CodingKey { case checksumCRC32 = "ChecksumCRC32" case checksumCRC32C = "ChecksumCRC32C" + case checksumCRC64NVME = "ChecksumCRC64NVME" case checksumSHA1 = "ChecksumSHA1" case checksumSHA256 = "ChecksumSHA256" case eTag = "ETag" @@ -1384,7 +1424,7 @@ extension S3 { public let sseCustomerAlgorithm: String? /// If server-side encryption with a customer-provided encryption key was requested, the response will include this header to provide the round-trip message integrity verification of the customer-provided encryption key. This functionality is not supported for directory buckets. public let sseCustomerKeyMD5: String? - /// If present, indicates the Amazon Web Services KMS Encryption Context to use for object encryption. The value of this header is a base64-encoded UTF-8 string holding JSON with the encryption context key-value pairs. + /// If present, indicates the Amazon Web Services KMS Encryption Context to use for object encryption. The value of this header is a Base64 encoded UTF-8 string holding JSON with the encryption context key-value pairs. public let ssekmsEncryptionContext: String? /// If present, indicates the ID of the KMS key that was used for object encryption. public let ssekmsKeyId: String? @@ -1617,25 +1657,31 @@ extension S3 { } public struct CopyObjectResult: AWSDecodableShape { - /// The base64-encoded, 32-bit CRC-32 checksum of the object. This will only be present if it was uploaded with the object. For more information, see Checking object integrity in the Amazon S3 User Guide. + /// The Base64 encoded, 32-bit CRC-32 checksum of the object. This checksum is only present if the object was uploaded with the object. For more information, see Checking object integrity in the Amazon S3 User Guide. public let checksumCRC32: String? - /// The base64-encoded, 32-bit CRC-32C checksum of the object. This will only be present if it was uploaded with the object. For more information, see Checking object integrity in the Amazon S3 User Guide. + /// The Base64 encoded, 32-bit CRC-32C checksum of the object. This will only be present if the object was uploaded with the object. For more information, see Checking object integrity in the Amazon S3 User Guide. public let checksumCRC32C: String? - /// The base64-encoded, 160-bit SHA-1 digest of the object. This will only be present if it was uploaded with the object. For more information, see Checking object integrity in the Amazon S3 User Guide. + /// The Base64 encoded, 64-bit CRC-64NVME checksum of the object. This checksum is present if the object being copied was uploaded with the CRC-64NVME checksum algorithm, or if the object was uploaded without a checksum (and Amazon S3 added the default checksum, CRC-64NVME, to the uploaded object). For more information, see Checking object integrity in the Amazon S3 User Guide. + public let checksumCRC64NVME: String? + /// The Base64 encoded, 160-bit SHA-1 digest of the object. This will only be present if the object was uploaded with the object. For more information, see Checking object integrity in the Amazon S3 User Guide. public let checksumSHA1: String? - /// The base64-encoded, 256-bit SHA-256 digest of the object. This will only be present if it was uploaded with the object. For more information, see Checking object integrity in the Amazon S3 User Guide. + /// The Base64 encoded, 256-bit SHA-256 digest of the object. This will only be present if the object was uploaded with the object. For more information, see Checking object integrity in the Amazon S3 User Guide. public let checksumSHA256: String? + /// The checksum type that is used to calculate the object’s checksum value. For more information, see Checking object integrity in the Amazon S3 User Guide. + public let checksumType: ChecksumType? /// Returns the ETag of the new object. The ETag reflects only changes to the contents of an object, not its metadata. public let eTag: String? /// Creation date of the object. public let lastModified: Date? @inlinable - public init(checksumCRC32: String? = nil, checksumCRC32C: String? = nil, checksumSHA1: String? = nil, checksumSHA256: String? = nil, eTag: String? = nil, lastModified: Date? = nil) { + public init(checksumCRC32: String? = nil, checksumCRC32C: String? = nil, checksumCRC64NVME: String? = nil, checksumSHA1: String? = nil, checksumSHA256: String? = nil, checksumType: ChecksumType? = nil, eTag: String? = nil, lastModified: Date? = nil) { self.checksumCRC32 = checksumCRC32 self.checksumCRC32C = checksumCRC32C + self.checksumCRC64NVME = checksumCRC64NVME self.checksumSHA1 = checksumSHA1 self.checksumSHA256 = checksumSHA256 + self.checksumType = checksumType self.eTag = eTag self.lastModified = lastModified } @@ -1643,21 +1689,25 @@ extension S3 { private enum CodingKeys: String, CodingKey { case checksumCRC32 = "ChecksumCRC32" case checksumCRC32C = "ChecksumCRC32C" + case checksumCRC64NVME = "ChecksumCRC64NVME" case checksumSHA1 = "ChecksumSHA1" case checksumSHA256 = "ChecksumSHA256" + case checksumType = "ChecksumType" case eTag = "ETag" case lastModified = "LastModified" } } public struct CopyPartResult: AWSDecodableShape { - /// The base64-encoded, 32-bit CRC-32 checksum of the object. This will only be present if it was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide. + /// This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the Base64 encoded, 32-bit CRC-32 checksum of the part. For more information, see Checking object integrity in the Amazon S3 User Guide. public let checksumCRC32: String? - /// The base64-encoded, 32-bit CRC-32C checksum of the object. This will only be present if it was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide. + /// This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the Base64 encoded, 32-bit CRC-32C checksum of the part. For more information, see Checking object integrity in the Amazon S3 User Guide. public let checksumCRC32C: String? - /// The base64-encoded, 160-bit SHA-1 digest of the object. This will only be present if it was uploaded with the object. When you use the API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide. + /// The Base64 encoded, 64-bit CRC-64NVME checksum of the part. This checksum is present if the multipart upload request was created with the CRC-64NVME checksum algorithm to the uploaded object). For more information, see Checking object integrity in the Amazon S3 User Guide. + public let checksumCRC64NVME: String? + /// This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the Base64 encoded, 160-bit SHA-1 checksum of the part. For more information, see Checking object integrity in the Amazon S3 User Guide. public let checksumSHA1: String? - /// The base64-encoded, 256-bit SHA-256 digest of the object. This will only be present if it was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide. + /// This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the Base64 encoded, 256-bit SHA-256 checksum of the part. For more information, see Checking object integrity in the Amazon S3 User Guide. public let checksumSHA256: String? /// Entity tag of the object. public let eTag: String? @@ -1665,9 +1715,10 @@ extension S3 { public let lastModified: Date? @inlinable - public init(checksumCRC32: String? = nil, checksumCRC32C: String? = nil, checksumSHA1: String? = nil, checksumSHA256: String? = nil, eTag: String? = nil, lastModified: Date? = nil) { + public init(checksumCRC32: String? = nil, checksumCRC32C: String? = nil, checksumCRC64NVME: String? = nil, checksumSHA1: String? = nil, checksumSHA256: String? = nil, eTag: String? = nil, lastModified: Date? = nil) { self.checksumCRC32 = checksumCRC32 self.checksumCRC32C = checksumCRC32C + self.checksumCRC64NVME = checksumCRC64NVME self.checksumSHA1 = checksumSHA1 self.checksumSHA256 = checksumSHA256 self.eTag = eTag @@ -1677,6 +1728,7 @@ extension S3 { private enum CodingKeys: String, CodingKey { case checksumCRC32 = "ChecksumCRC32" case checksumCRC32C = "ChecksumCRC32C" + case checksumCRC64NVME = "ChecksumCRC64NVME" case checksumSHA1 = "ChecksumSHA1" case checksumSHA256 = "ChecksumSHA256" case eTag = "ETag" @@ -1825,6 +1877,8 @@ extension S3 { public let bucketKeyEnabled: Bool? /// The algorithm that was used to create a checksum of the object. public let checksumAlgorithm: ChecksumAlgorithm? + /// Indicates the checksum type that you want Amazon S3 to use to calculate the object’s checksum value. For more information, see Checking object integrity in the Amazon S3 User Guide. + public let checksumType: ChecksumType? /// Object key for which the multipart upload was initiated. public let key: String? public let requestCharged: RequestCharged? @@ -1834,7 +1888,7 @@ extension S3 { public let sseCustomerAlgorithm: String? /// If server-side encryption with a customer-provided encryption key was requested, the response will include this header to provide the round-trip message integrity verification of the customer-provided encryption key. This functionality is not supported for directory buckets. public let sseCustomerKeyMD5: String? - /// If present, indicates the Amazon Web Services KMS Encryption Context to use for object encryption. The value of this header is a Base64-encoded string of a UTF-8 encoded JSON, which contains the encryption context as key-value pairs. + /// If present, indicates the Amazon Web Services KMS Encryption Context to use for object encryption. The value of this header is a Base64 encoded string of a UTF-8 encoded JSON, which contains the encryption context as key-value pairs. public let ssekmsEncryptionContext: String? /// If present, indicates the ID of the KMS key that was used for object encryption. public let ssekmsKeyId: String? @@ -1842,12 +1896,13 @@ extension S3 { public let uploadId: String? @inlinable - public init(abortDate: Date? = nil, abortRuleId: String? = nil, bucket: String? = nil, bucketKeyEnabled: Bool? = nil, checksumAlgorithm: ChecksumAlgorithm? = nil, key: String? = nil, requestCharged: RequestCharged? = nil, serverSideEncryption: ServerSideEncryption? = nil, sseCustomerAlgorithm: String? = nil, sseCustomerKeyMD5: String? = nil, ssekmsEncryptionContext: String? = nil, ssekmsKeyId: String? = nil, uploadId: String? = nil) { + public init(abortDate: Date? = nil, abortRuleId: String? = nil, bucket: String? = nil, bucketKeyEnabled: Bool? = nil, checksumAlgorithm: ChecksumAlgorithm? = nil, checksumType: ChecksumType? = nil, key: String? = nil, requestCharged: RequestCharged? = nil, serverSideEncryption: ServerSideEncryption? = nil, sseCustomerAlgorithm: String? = nil, sseCustomerKeyMD5: String? = nil, ssekmsEncryptionContext: String? = nil, ssekmsKeyId: String? = nil, uploadId: String? = nil) { self.abortDate = abortDate self.abortRuleId = abortRuleId self.bucket = bucket self.bucketKeyEnabled = bucketKeyEnabled self.checksumAlgorithm = checksumAlgorithm + self.checksumType = checksumType self.key = key self.requestCharged = requestCharged self.serverSideEncryption = serverSideEncryption @@ -1866,6 +1921,7 @@ extension S3 { self.bucket = try container.decodeIfPresent(String.self, forKey: .bucket) self.bucketKeyEnabled = try response.decodeHeaderIfPresent(Bool.self, key: "x-amz-server-side-encryption-bucket-key-enabled") self.checksumAlgorithm = try response.decodeHeaderIfPresent(ChecksumAlgorithm.self, key: "x-amz-checksum-algorithm") + self.checksumType = try response.decodeHeaderIfPresent(ChecksumType.self, key: "x-amz-checksum-type") self.key = try container.decodeIfPresent(String.self, forKey: .key) self.requestCharged = try response.decodeHeaderIfPresent(RequestCharged.self, key: "x-amz-request-charged") self.serverSideEncryption = try response.decodeHeaderIfPresent(ServerSideEncryption.self, key: "x-amz-server-side-encryption") @@ -1895,6 +1951,8 @@ extension S3 { public let cacheControl: String? /// Indicates the algorithm that you want Amazon S3 to use to create the checksum for the object. For more information, see Checking object integrity in the Amazon S3 User Guide. public let checksumAlgorithm: ChecksumAlgorithm? + /// Indicates the checksum type that you want Amazon S3 to use to calculate the object’s checksum value. For more information, see Checking object integrity in the Amazon S3 User Guide. + public let checksumType: ChecksumType? /// Specifies presentational information for the object. public let contentDisposition: String? /// Specifies what content encodings have been applied to the object and thus what decoding mechanisms must be applied to obtain the media-type referenced by the Content-Type header field. For directory buckets, only the aws-chunked value is supported in this header field. @@ -1937,7 +1995,7 @@ extension S3 { public let sseCustomerKey: String? /// Specifies the 128-bit MD5 digest of the customer-provided encryption key according to RFC 1321. Amazon S3 uses this header for a message integrity check to ensure that the encryption key was transmitted without error. This functionality is not supported for directory buckets. public let sseCustomerKeyMD5: String? - /// Specifies the Amazon Web Services KMS Encryption Context to use for object encryption. The value of this header is a Base64-encoded string of a UTF-8 encoded JSON, which contains the encryption context as key-value pairs. Directory buckets - You can optionally provide an explicit encryption context value. The value must match the default encryption context - the bucket Amazon Resource Name (ARN). An additional encryption context value is not supported. + /// Specifies the Amazon Web Services KMS Encryption Context to use for object encryption. The value of this header is a Base64 encoded string of a UTF-8 encoded JSON, which contains the encryption context as key-value pairs. Directory buckets - You can optionally provide an explicit encryption context value. The value must match the default encryption context - the bucket Amazon Resource Name (ARN). An additional encryption context value is not supported. public let ssekmsEncryptionContext: String? /// Specifies the KMS key ID (Key ID, Key ARN, or Key Alias) to use for object encryption. If the KMS key doesn't exist in the same account that's issuing the command, you must use the full Key ARN not the Key ID. General purpose buckets - If you specify x-amz-server-side-encryption with aws:kms or aws:kms:dsse, this header specifies the ID (Key ID, Key ARN, or Key Alias) of the KMS key to use. If you specify x-amz-server-side-encryption:aws:kms or x-amz-server-side-encryption:aws:kms:dsse, but do not provide x-amz-server-side-encryption-aws-kms-key-id, Amazon S3 uses the Amazon Web Services managed key (aws/s3) to protect the data. Directory buckets - If you specify x-amz-server-side-encryption with aws:kms, the x-amz-server-side-encryption-aws-kms-key-id header is implicitly assigned the ID of the KMS symmetric encryption customer managed key that's configured for your directory bucket's default encryption setting. If you want to specify the x-amz-server-side-encryption-aws-kms-key-id header explicitly, you can only specify it with the ID (Key ID or Key ARN) of the KMS customer managed key that's configured for your directory bucket's default encryption setting. Otherwise, you get an HTTP 400 Bad Request error. Only use the key ID or key ARN. The key alias format of the KMS key isn't supported. Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. /// The Amazon Web Services managed key (aws/s3) isn't supported. @@ -1950,12 +2008,13 @@ extension S3 { public let websiteRedirectLocation: String? @inlinable - public init(acl: ObjectCannedACL? = nil, bucket: String, bucketKeyEnabled: Bool? = nil, cacheControl: String? = nil, checksumAlgorithm: ChecksumAlgorithm? = nil, contentDisposition: String? = nil, contentEncoding: String? = nil, contentLanguage: String? = nil, contentType: String? = nil, expectedBucketOwner: String? = nil, expires: Date? = nil, grantFullControl: String? = nil, grantRead: String? = nil, grantReadACP: String? = nil, grantWriteACP: String? = nil, key: String, metadata: [String: String]? = nil, objectLockLegalHoldStatus: ObjectLockLegalHoldStatus? = nil, objectLockMode: ObjectLockMode? = nil, objectLockRetainUntilDate: Date? = nil, requestPayer: RequestPayer? = nil, serverSideEncryption: ServerSideEncryption? = nil, sseCustomerAlgorithm: String? = nil, sseCustomerKey: String? = nil, sseCustomerKeyMD5: String? = nil, ssekmsEncryptionContext: String? = nil, ssekmsKeyId: String? = nil, storageClass: StorageClass? = nil, tagging: String? = nil, websiteRedirectLocation: String? = nil) { + public init(acl: ObjectCannedACL? = nil, bucket: String, bucketKeyEnabled: Bool? = nil, cacheControl: String? = nil, checksumAlgorithm: ChecksumAlgorithm? = nil, checksumType: ChecksumType? = nil, contentDisposition: String? = nil, contentEncoding: String? = nil, contentLanguage: String? = nil, contentType: String? = nil, expectedBucketOwner: String? = nil, expires: Date? = nil, grantFullControl: String? = nil, grantRead: String? = nil, grantReadACP: String? = nil, grantWriteACP: String? = nil, key: String, metadata: [String: String]? = nil, objectLockLegalHoldStatus: ObjectLockLegalHoldStatus? = nil, objectLockMode: ObjectLockMode? = nil, objectLockRetainUntilDate: Date? = nil, requestPayer: RequestPayer? = nil, serverSideEncryption: ServerSideEncryption? = nil, sseCustomerAlgorithm: String? = nil, sseCustomerKey: String? = nil, sseCustomerKeyMD5: String? = nil, ssekmsEncryptionContext: String? = nil, ssekmsKeyId: String? = nil, storageClass: StorageClass? = nil, tagging: String? = nil, websiteRedirectLocation: String? = nil) { self.acl = acl self.bucket = bucket self.bucketKeyEnabled = bucketKeyEnabled self.cacheControl = cacheControl self.checksumAlgorithm = checksumAlgorithm + self.checksumType = checksumType self.contentDisposition = contentDisposition self.contentEncoding = contentEncoding self.contentLanguage = contentLanguage @@ -1991,6 +2050,7 @@ extension S3 { request.encodeHeader(self.bucketKeyEnabled, key: "x-amz-server-side-encryption-bucket-key-enabled") request.encodeHeader(self.cacheControl, key: "Cache-Control") request.encodeHeader(self.checksumAlgorithm, key: "x-amz-checksum-algorithm") + request.encodeHeader(self.checksumType, key: "x-amz-checksum-type") request.encodeHeader(self.contentDisposition, key: "Content-Disposition") request.encodeHeader(self.contentEncoding, key: "Content-Encoding") request.encodeHeader(self.contentLanguage, key: "Content-Language") @@ -2032,7 +2092,7 @@ extension S3 { public let credentials: SessionCredentials /// The server-side encryption algorithm used when you store objects in the directory bucket. public let serverSideEncryption: ServerSideEncryption? - /// If present, indicates the Amazon Web Services KMS Encryption Context to use for object encryption. The value of this header is a Base64-encoded string of a UTF-8 encoded JSON, which contains the encryption context as key-value pairs. This value is stored as object metadata and automatically gets passed on to Amazon Web Services KMS for future GetObject operations on this object. + /// If present, indicates the Amazon Web Services KMS Encryption Context to use for object encryption. The value of this header is a Base64 encoded string of a UTF-8 encoded JSON, which contains the encryption context as key-value pairs. This value is stored as object metadata and automatically gets passed on to Amazon Web Services KMS for future GetObject operations on this object. public let ssekmsEncryptionContext: String? /// If you specify x-amz-server-side-encryption with aws:kms, this header indicates the ID of the KMS symmetric encryption customer managed key that was used for object encryption. public let ssekmsKeyId: String? @@ -2071,7 +2131,7 @@ extension S3 { public let serverSideEncryption: ServerSideEncryption? /// Specifies the mode of the session that will be created, either ReadWrite or ReadOnly. By default, a ReadWrite session is created. A ReadWrite session is capable of executing all the Zonal endpoint API operations on a directory bucket. A ReadOnly session is constrained to execute the following Zonal endpoint API operations: GetObject, HeadObject, ListObjectsV2, GetObjectAttributes, ListParts, and ListMultipartUploads. public let sessionMode: SessionMode? - /// Specifies the Amazon Web Services KMS Encryption Context as an additional encryption context to use for object encryption. The value of this header is a Base64-encoded string of a UTF-8 encoded JSON, which contains the encryption context as key-value pairs. This value is stored as object metadata and automatically gets passed on to Amazon Web Services KMS for future GetObject operations on this object. General purpose buckets - This value must be explicitly added during CopyObject operations if you want an additional encryption context for your object. For more information, see Encryption context in the Amazon S3 User Guide. Directory buckets - You can optionally provide an explicit encryption context value. The value must match the default encryption context - the bucket Amazon Resource Name (ARN). An additional encryption context value is not supported. + /// Specifies the Amazon Web Services KMS Encryption Context as an additional encryption context to use for object encryption. The value of this header is a Base64 encoded string of a UTF-8 encoded JSON, which contains the encryption context as key-value pairs. This value is stored as object metadata and automatically gets passed on to Amazon Web Services KMS for future GetObject operations on this object. General purpose buckets - This value must be explicitly added during CopyObject operations if you want an additional encryption context for your object. For more information, see Encryption context in the Amazon S3 User Guide. Directory buckets - You can optionally provide an explicit encryption context value. The value must match the default encryption context - the bucket Amazon Resource Name (ARN). An additional encryption context value is not supported. public let ssekmsEncryptionContext: String? /// If you specify x-amz-server-side-encryption with aws:kms, you must specify the x-amz-server-side-encryption-aws-kms-key-id header with the ID (Key ID or Key ARN) of the KMS symmetric encryption customer managed key to use. Otherwise, you get an HTTP 400 Bad Request error. Only use the key ID or key ARN. The key alias format of the KMS key isn't supported. Also, if the KMS key doesn't exist in the same account that't issuing the command, you must use the full Key ARN not the Key ID. Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. /// The Amazon Web Services managed key (aws/s3) isn't supported. @@ -2682,7 +2742,7 @@ extension S3 { public let bucket: String /// Specifies whether you want to delete this object even if it has a Governance-type Object Lock in place. To use this header, you must have the s3:BypassGovernanceRetention permission. This functionality is not supported for directory buckets. public let bypassGovernanceRetention: Bool? - /// Indicates the algorithm used to create the checksum for the object when you use the SDK. This header will not provide any additional functionality if you don't use the SDK. When you send this header, there must be a corresponding x-amz-checksum-algorithm or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request. For the x-amz-checksum-algorithm header, replace algorithm with the supported algorithm from the following list: CRC32 CRC32C SHA1 SHA256 For more information, see Checking object integrity in the Amazon S3 User Guide. If the individual checksum value you provide through x-amz-checksum-algorithm doesn't match the checksum algorithm you set through x-amz-sdk-checksum-algorithm, Amazon S3 ignores any provided ChecksumAlgorithm parameter and uses the checksum algorithm that matches the provided value in x-amz-checksum-algorithm . If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm parameter. + /// Indicates the algorithm used to create the checksum for the object when you use the SDK. This header will not provide any additional functionality if you don't use the SDK. When you send this header, there must be a corresponding x-amz-checksum-algorithm or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request. For the x-amz-checksum-algorithm header, replace algorithm with the supported algorithm from the following list: CRC-32 CRC-32C CRC-64NVME SHA-1 SHA-256 For more information, see Checking object integrity in the Amazon S3 User Guide. If the individual checksum value you provide through x-amz-checksum-algorithm doesn't match the checksum algorithm you set through x-amz-sdk-checksum-algorithm, Amazon S3 fails the request with a BadDigest error. If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm parameter. public let checksumAlgorithm: ChecksumAlgorithm? /// Container for the request. public let delete: Delete @@ -3244,7 +3304,7 @@ extension S3 { public struct GetBucketLifecycleConfigurationOutput: AWSDecodableShape { /// Container for a lifecycle rule. public let rules: [LifecycleRule]? - /// Indicates which default minimum object size behavior is applied to the lifecycle configuration. This parameter applies to general purpose buckets only. It is not supported for directory bucket lifecycle configurations. all_storage_classes_128K - Objects smaller than 128 KB will not transition to any storage class by default. varies_by_storage_class - Objects smaller than 128 KB will transition to Glacier Flexible Retrieval or Glacier Deep Archive storage classes. By default, all other storage classes will prevent transitions smaller than 128 KB. To customize the minimum object size for any transition you can add a filter that specifies a custom ObjectSizeGreaterThan or ObjectSizeLessThan in the body of your transition rule. Custom filters always take precedence over the default transition behavior. + /// Indicates which default minimum object size behavior is applied to the lifecycle configuration. This parameter applies to general purpose buckets only. It isn't supported for directory bucket lifecycle configurations. all_storage_classes_128K - Objects smaller than 128 KB will not transition to any storage class by default. varies_by_storage_class - Objects smaller than 128 KB will transition to Glacier Flexible Retrieval or Glacier Deep Archive storage classes. By default, all other storage classes will prevent transitions smaller than 128 KB. To customize the minimum object size for any transition you can add a filter that specifies a custom ObjectSizeGreaterThan or ObjectSizeLessThan in the body of your transition rule. Custom filters always take precedence over the default transition behavior. public let transitionDefaultMinimumObjectSize: TransitionDefaultMinimumObjectSize? @inlinable @@ -4134,14 +4194,18 @@ extension S3 { public let bucketKeyEnabled: Bool? /// Specifies caching behavior along the request/reply chain. public let cacheControl: String? - /// The base64-encoded, 32-bit CRC-32 checksum of the object. This will only be present if it was uploaded with the object. For more information, see Checking object integrity in the Amazon S3 User Guide. + /// The Base64 encoded, 32-bit CRC-32 checksum of the object. This checksum is only present if the object was uploaded with the object. For more information, see Checking object integrity in the Amazon S3 User Guide. public let checksumCRC32: String? - /// The base64-encoded, 32-bit CRC-32C checksum of the object. This will only be present if it was uploaded with the object. For more information, see Checking object integrity in the Amazon S3 User Guide. + /// The Base64 encoded, 32-bit CRC-32C checksum of the object. This will only be present if the object was uploaded with the object. For more information, see Checking object integrity in the Amazon S3 User Guide. public let checksumCRC32C: String? - /// The base64-encoded, 160-bit SHA-1 digest of the object. This will only be present if it was uploaded with the object. For more information, see Checking object integrity in the Amazon S3 User Guide. + /// The Base64 encoded, 64-bit CRC-64NVME checksum of the object. For more information, see Checking object integrity in the Amazon S3 User Guide. + public let checksumCRC64NVME: String? + /// The Base64 encoded, 160-bit SHA-1 digest of the object. This will only be present if the object was uploaded with the object. For more information, see Checking object integrity in the Amazon S3 User Guide. public let checksumSHA1: String? - /// The base64-encoded, 256-bit SHA-256 digest of the object. This will only be present if it was uploaded with the object. For more information, see Checking object integrity in the Amazon S3 User Guide. + /// The Base64 encoded, 256-bit SHA-256 digest of the object. This will only be present if the object was uploaded with the object. For more information, see Checking object integrity in the Amazon S3 User Guide. public let checksumSHA256: String? + /// The checksum type, which determines how part-level checksums are combined to create an object-level checksum for multipart objects. You can use this header response to verify that the checksum type that is received is the same checksum type that was specified in the CreateMultipartUpload request. For more information, see Checking object integrity in the Amazon S3 User Guide. + public let checksumType: ChecksumType? /// Specifies presentational information for the object. public let contentDisposition: String? /// Indicates what content encodings have been applied to the object and thus what decoding mechanisms must be applied to obtain the media-type referenced by the Content-Type header field. @@ -4202,15 +4266,17 @@ extension S3 { public let websiteRedirectLocation: String? @inlinable - public init(acceptRanges: String? = nil, body: AWSHTTPBody, bucketKeyEnabled: Bool? = nil, cacheControl: String? = nil, checksumCRC32: String? = nil, checksumCRC32C: String? = nil, checksumSHA1: String? = nil, checksumSHA256: String? = nil, contentDisposition: String? = nil, contentEncoding: String? = nil, contentLanguage: String? = nil, contentLength: Int64? = nil, contentRange: String? = nil, contentType: String? = nil, deleteMarker: Bool? = nil, eTag: String? = nil, expiration: String? = nil, expires: Date? = nil, lastModified: Date? = nil, metadata: [String: String]? = nil, missingMeta: Int? = nil, objectLockLegalHoldStatus: ObjectLockLegalHoldStatus? = nil, objectLockMode: ObjectLockMode? = nil, objectLockRetainUntilDate: Date? = nil, partsCount: Int? = nil, replicationStatus: ReplicationStatus? = nil, requestCharged: RequestCharged? = nil, restore: String? = nil, serverSideEncryption: ServerSideEncryption? = nil, sseCustomerAlgorithm: String? = nil, sseCustomerKeyMD5: String? = nil, ssekmsKeyId: String? = nil, storageClass: StorageClass? = nil, tagCount: Int? = nil, versionId: String? = nil, websiteRedirectLocation: String? = nil) { + public init(acceptRanges: String? = nil, body: AWSHTTPBody, bucketKeyEnabled: Bool? = nil, cacheControl: String? = nil, checksumCRC32: String? = nil, checksumCRC32C: String? = nil, checksumCRC64NVME: String? = nil, checksumSHA1: String? = nil, checksumSHA256: String? = nil, checksumType: ChecksumType? = nil, contentDisposition: String? = nil, contentEncoding: String? = nil, contentLanguage: String? = nil, contentLength: Int64? = nil, contentRange: String? = nil, contentType: String? = nil, deleteMarker: Bool? = nil, eTag: String? = nil, expiration: String? = nil, expires: Date? = nil, lastModified: Date? = nil, metadata: [String: String]? = nil, missingMeta: Int? = nil, objectLockLegalHoldStatus: ObjectLockLegalHoldStatus? = nil, objectLockMode: ObjectLockMode? = nil, objectLockRetainUntilDate: Date? = nil, partsCount: Int? = nil, replicationStatus: ReplicationStatus? = nil, requestCharged: RequestCharged? = nil, restore: String? = nil, serverSideEncryption: ServerSideEncryption? = nil, sseCustomerAlgorithm: String? = nil, sseCustomerKeyMD5: String? = nil, ssekmsKeyId: String? = nil, storageClass: StorageClass? = nil, tagCount: Int? = nil, versionId: String? = nil, websiteRedirectLocation: String? = nil) { self.acceptRanges = acceptRanges self.body = body self.bucketKeyEnabled = bucketKeyEnabled self.cacheControl = cacheControl self.checksumCRC32 = checksumCRC32 self.checksumCRC32C = checksumCRC32C + self.checksumCRC64NVME = checksumCRC64NVME self.checksumSHA1 = checksumSHA1 self.checksumSHA256 = checksumSHA256 + self.checksumType = checksumType self.contentDisposition = contentDisposition self.contentEncoding = contentEncoding self.contentLanguage = contentLanguage @@ -4250,8 +4316,10 @@ extension S3 { self.cacheControl = try response.decodeHeaderIfPresent(String.self, key: "Cache-Control") self.checksumCRC32 = try response.decodeHeaderIfPresent(String.self, key: "x-amz-checksum-crc32") self.checksumCRC32C = try response.decodeHeaderIfPresent(String.self, key: "x-amz-checksum-crc32c") + self.checksumCRC64NVME = try response.decodeHeaderIfPresent(String.self, key: "x-amz-checksum-crc64nvme") self.checksumSHA1 = try response.decodeHeaderIfPresent(String.self, key: "x-amz-checksum-sha1") self.checksumSHA256 = try response.decodeHeaderIfPresent(String.self, key: "x-amz-checksum-sha256") + self.checksumType = try response.decodeHeaderIfPresent(ChecksumType.self, key: "x-amz-checksum-type") self.contentDisposition = try response.decodeHeaderIfPresent(String.self, key: "Content-Disposition") self.contentEncoding = try response.decodeHeaderIfPresent(String.self, key: "Content-Encoding") self.contentLanguage = try response.decodeHeaderIfPresent(String.self, key: "Content-Language") @@ -4726,14 +4794,18 @@ extension S3 { public let bucketKeyEnabled: Bool? /// Specifies caching behavior along the request/reply chain. public let cacheControl: String? - /// The base64-encoded, 32-bit CRC-32 checksum of the object. This will only be present if it was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide. + /// The Base64 encoded, 32-bit CRC-32 checksum of the object. This checksum is only be present if the checksum was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide. public let checksumCRC32: String? - /// The base64-encoded, 32-bit CRC-32C checksum of the object. This will only be present if it was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide. + /// The Base64 encoded, 32-bit CRC-32C checksum of the object. This checksum is only present if the checksum was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide. public let checksumCRC32C: String? - /// The base64-encoded, 160-bit SHA-1 digest of the object. This will only be present if it was uploaded with the object. When you use the API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide. + /// The Base64 encoded, 64-bit CRC-64NVME checksum of the object. For more information, see Checking object integrity in the Amazon S3 User Guide. + public let checksumCRC64NVME: String? + /// The Base64 encoded, 160-bit SHA-1 digest of the object. This will only be present if the object was uploaded with the object. When you use the API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide. public let checksumSHA1: String? - /// The base64-encoded, 256-bit SHA-256 digest of the object. This will only be present if it was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide. + /// The Base64 encoded, 256-bit SHA-256 digest of the object. This will only be present if the object was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide. public let checksumSHA256: String? + /// The checksum type, which determines how part-level checksums are combined to create an object-level checksum for multipart objects. You can use this header response to verify that the checksum type that is received is the same checksum type that was specified in CreateMultipartUpload request. For more information, see Checking object integrity in the Amazon S3 User Guide. + public let checksumType: ChecksumType? /// Specifies presentational information for the object. public let contentDisposition: String? /// Indicates what content encodings have been applied to the object and thus what decoding mechanisms must be applied to obtain the media-type referenced by the Content-Type header field. @@ -4790,15 +4862,17 @@ extension S3 { public let websiteRedirectLocation: String? @inlinable - public init(acceptRanges: String? = nil, archiveStatus: ArchiveStatus? = nil, bucketKeyEnabled: Bool? = nil, cacheControl: String? = nil, checksumCRC32: String? = nil, checksumCRC32C: String? = nil, checksumSHA1: String? = nil, checksumSHA256: String? = nil, contentDisposition: String? = nil, contentEncoding: String? = nil, contentLanguage: String? = nil, contentLength: Int64? = nil, contentType: String? = nil, deleteMarker: Bool? = nil, eTag: String? = nil, expiration: String? = nil, expires: Date? = nil, lastModified: Date? = nil, metadata: [String: String]? = nil, missingMeta: Int? = nil, objectLockLegalHoldStatus: ObjectLockLegalHoldStatus? = nil, objectLockMode: ObjectLockMode? = nil, objectLockRetainUntilDate: Date? = nil, partsCount: Int? = nil, replicationStatus: ReplicationStatus? = nil, requestCharged: RequestCharged? = nil, restore: String? = nil, serverSideEncryption: ServerSideEncryption? = nil, sseCustomerAlgorithm: String? = nil, sseCustomerKeyMD5: String? = nil, ssekmsKeyId: String? = nil, storageClass: StorageClass? = nil, versionId: String? = nil, websiteRedirectLocation: String? = nil) { + public init(acceptRanges: String? = nil, archiveStatus: ArchiveStatus? = nil, bucketKeyEnabled: Bool? = nil, cacheControl: String? = nil, checksumCRC32: String? = nil, checksumCRC32C: String? = nil, checksumCRC64NVME: String? = nil, checksumSHA1: String? = nil, checksumSHA256: String? = nil, checksumType: ChecksumType? = nil, contentDisposition: String? = nil, contentEncoding: String? = nil, contentLanguage: String? = nil, contentLength: Int64? = nil, contentType: String? = nil, deleteMarker: Bool? = nil, eTag: String? = nil, expiration: String? = nil, expires: Date? = nil, lastModified: Date? = nil, metadata: [String: String]? = nil, missingMeta: Int? = nil, objectLockLegalHoldStatus: ObjectLockLegalHoldStatus? = nil, objectLockMode: ObjectLockMode? = nil, objectLockRetainUntilDate: Date? = nil, partsCount: Int? = nil, replicationStatus: ReplicationStatus? = nil, requestCharged: RequestCharged? = nil, restore: String? = nil, serverSideEncryption: ServerSideEncryption? = nil, sseCustomerAlgorithm: String? = nil, sseCustomerKeyMD5: String? = nil, ssekmsKeyId: String? = nil, storageClass: StorageClass? = nil, versionId: String? = nil, websiteRedirectLocation: String? = nil) { self.acceptRanges = acceptRanges self.archiveStatus = archiveStatus self.bucketKeyEnabled = bucketKeyEnabled self.cacheControl = cacheControl self.checksumCRC32 = checksumCRC32 self.checksumCRC32C = checksumCRC32C + self.checksumCRC64NVME = checksumCRC64NVME self.checksumSHA1 = checksumSHA1 self.checksumSHA256 = checksumSHA256 + self.checksumType = checksumType self.contentDisposition = contentDisposition self.contentEncoding = contentEncoding self.contentLanguage = contentLanguage @@ -4835,8 +4909,10 @@ extension S3 { self.cacheControl = try response.decodeHeaderIfPresent(String.self, key: "Cache-Control") self.checksumCRC32 = try response.decodeHeaderIfPresent(String.self, key: "x-amz-checksum-crc32") self.checksumCRC32C = try response.decodeHeaderIfPresent(String.self, key: "x-amz-checksum-crc32c") + self.checksumCRC64NVME = try response.decodeHeaderIfPresent(String.self, key: "x-amz-checksum-crc64nvme") self.checksumSHA1 = try response.decodeHeaderIfPresent(String.self, key: "x-amz-checksum-sha1") self.checksumSHA256 = try response.decodeHeaderIfPresent(String.self, key: "x-amz-checksum-sha256") + self.checksumType = try response.decodeHeaderIfPresent(ChecksumType.self, key: "x-amz-checksum-type") self.contentDisposition = try response.decodeHeaderIfPresent(String.self, key: "Content-Disposition") self.contentEncoding = try response.decodeHeaderIfPresent(String.self, key: "Content-Encoding") self.contentLanguage = try response.decodeHeaderIfPresent(String.self, key: "Content-Language") @@ -6285,6 +6361,8 @@ extension S3 { public let bucket: String? /// The algorithm that was used to create a checksum of the object. public let checksumAlgorithm: ChecksumAlgorithm? + /// The checksum type, which determines how part-level checksums are combined to create an object-level checksum for multipart objects. You can use this header response to verify that the checksum type that is received is the same checksum type that was specified in CreateMultipartUpload request. For more information, see Checking object integrity in the Amazon S3 User Guide. + public let checksumType: ChecksumType? /// Container element that identifies who initiated the multipart upload. If the initiator is an Amazon Web Services account, this element provides the same information as the Owner element. If the initiator is an IAM User, this element provides the user ARN and display name. public let initiator: Initiator? /// Indicates whether the returned list of parts is truncated. A true value indicates that the list was truncated. A list can be truncated if the number of parts exceeds the limit returned in the MaxParts element. @@ -6308,11 +6386,12 @@ extension S3 { public let uploadId: String? @inlinable - public init(abortDate: Date? = nil, abortRuleId: String? = nil, bucket: String? = nil, checksumAlgorithm: ChecksumAlgorithm? = nil, initiator: Initiator? = nil, isTruncated: Bool? = nil, key: String? = nil, maxParts: Int? = nil, nextPartNumberMarker: String? = nil, owner: Owner? = nil, partNumberMarker: String? = nil, parts: [Part]? = nil, requestCharged: RequestCharged? = nil, storageClass: StorageClass? = nil, uploadId: String? = nil) { + public init(abortDate: Date? = nil, abortRuleId: String? = nil, bucket: String? = nil, checksumAlgorithm: ChecksumAlgorithm? = nil, checksumType: ChecksumType? = nil, initiator: Initiator? = nil, isTruncated: Bool? = nil, key: String? = nil, maxParts: Int? = nil, nextPartNumberMarker: String? = nil, owner: Owner? = nil, partNumberMarker: String? = nil, parts: [Part]? = nil, requestCharged: RequestCharged? = nil, storageClass: StorageClass? = nil, uploadId: String? = nil) { self.abortDate = abortDate self.abortRuleId = abortRuleId self.bucket = bucket self.checksumAlgorithm = checksumAlgorithm + self.checksumType = checksumType self.initiator = initiator self.isTruncated = isTruncated self.key = key @@ -6333,6 +6412,7 @@ extension S3 { self.abortRuleId = try response.decodeHeaderIfPresent(String.self, key: "x-amz-abort-rule-id") self.bucket = try container.decodeIfPresent(String.self, forKey: .bucket) self.checksumAlgorithm = try container.decodeIfPresent(ChecksumAlgorithm.self, forKey: .checksumAlgorithm) + self.checksumType = try container.decodeIfPresent(ChecksumType.self, forKey: .checksumType) self.initiator = try container.decodeIfPresent(Initiator.self, forKey: .initiator) self.isTruncated = try container.decodeIfPresent(Bool.self, forKey: .isTruncated) self.key = try container.decodeIfPresent(String.self, forKey: .key) @@ -6349,6 +6429,7 @@ extension S3 { private enum CodingKeys: String, CodingKey { case bucket = "Bucket" case checksumAlgorithm = "ChecksumAlgorithm" + case checksumType = "ChecksumType" case initiator = "Initiator" case isTruncated = "IsTruncated" case key = "Key" @@ -6583,6 +6664,8 @@ extension S3 { public struct MultipartUpload: AWSDecodableShape { /// The algorithm that was used to create a checksum of the object. public let checksumAlgorithm: ChecksumAlgorithm? + /// The checksum type that is used to calculate the object’s checksum value. For more information, see Checking object integrity in the Amazon S3 User Guide. + public let checksumType: ChecksumType? /// Date and time at which the multipart upload was initiated. public let initiated: Date? /// Identifies who initiated the multipart upload. @@ -6597,8 +6680,9 @@ extension S3 { public let uploadId: String? @inlinable - public init(checksumAlgorithm: ChecksumAlgorithm? = nil, initiated: Date? = nil, initiator: Initiator? = nil, key: String? = nil, owner: Owner? = nil, storageClass: StorageClass? = nil, uploadId: String? = nil) { + public init(checksumAlgorithm: ChecksumAlgorithm? = nil, checksumType: ChecksumType? = nil, initiated: Date? = nil, initiator: Initiator? = nil, key: String? = nil, owner: Owner? = nil, storageClass: StorageClass? = nil, uploadId: String? = nil) { self.checksumAlgorithm = checksumAlgorithm + self.checksumType = checksumType self.initiated = initiated self.initiator = initiator self.key = key @@ -6609,6 +6693,7 @@ extension S3 { private enum CodingKeys: String, CodingKey { case checksumAlgorithm = "ChecksumAlgorithm" + case checksumType = "ChecksumType" case initiated = "Initiated" case initiator = "Initiator" case key = "Key" @@ -6700,6 +6785,8 @@ extension S3 { public struct Object: AWSDecodableShape { /// The algorithm that was used to create a checksum of the object. public let checksumAlgorithm: [ChecksumAlgorithm]? + /// The checksum type that is used to calculate the object’s checksum value. For more information, see Checking object integrity in the Amazon S3 User Guide. + public let checksumType: ChecksumType? /// The entity tag is a hash of the object. The ETag reflects changes only to the contents of an object, not its metadata. The ETag may or may not be an MD5 digest of the object data. Whether or not it is depends on how the object was created and how it is encrypted as described below: Objects created by the PUT Object, POST Object, or Copy operation, or through the Amazon Web Services Management Console, and are encrypted by SSE-S3 or plaintext, have ETags that are an MD5 digest of their object data. Objects created by the PUT Object, POST Object, or Copy operation, or through the Amazon Web Services Management Console, and are encrypted by SSE-C or SSE-KMS, have ETags that are not an MD5 digest of their object data. If an object is created by either the Multipart Upload or Part Copy operation, the ETag is not an MD5 digest, regardless of the method of encryption. If an object is larger than 16 MB, the Amazon Web Services Management Console will upload or copy that object as a Multipart Upload, and therefore the ETag will not be an MD5 digest. Directory buckets - MD5 is not supported by directory buckets. public let eTag: String? /// The name that you assign to an object. You use the object key to retrieve the object. @@ -6716,8 +6803,9 @@ extension S3 { public let storageClass: ObjectStorageClass? @inlinable - public init(checksumAlgorithm: [ChecksumAlgorithm]? = nil, eTag: String? = nil, key: String? = nil, lastModified: Date? = nil, owner: Owner? = nil, restoreStatus: RestoreStatus? = nil, size: Int64? = nil, storageClass: ObjectStorageClass? = nil) { + public init(checksumAlgorithm: [ChecksumAlgorithm]? = nil, checksumType: ChecksumType? = nil, eTag: String? = nil, key: String? = nil, lastModified: Date? = nil, owner: Owner? = nil, restoreStatus: RestoreStatus? = nil, size: Int64? = nil, storageClass: ObjectStorageClass? = nil) { self.checksumAlgorithm = checksumAlgorithm + self.checksumType = checksumType self.eTag = eTag self.key = key self.lastModified = lastModified @@ -6729,6 +6817,7 @@ extension S3 { private enum CodingKeys: String, CodingKey { case checksumAlgorithm = "ChecksumAlgorithm" + case checksumType = "ChecksumType" case eTag = "ETag" case key = "Key" case lastModified = "LastModified" @@ -6840,13 +6929,15 @@ extension S3 { } public struct ObjectPart: AWSDecodableShape { - /// This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the base64-encoded, 32-bit CRC-32 checksum of the object. For more information, see Checking object integrity in the Amazon S3 User Guide. + /// The Base64 encoded, 32-bit CRC-32 checksum of the part. This checksum is present if the multipart upload request was created with the CRC-32 checksum algorithm. For more information, see Checking object integrity in the Amazon S3 User Guide. public let checksumCRC32: String? - /// The base64-encoded, 32-bit CRC-32C checksum of the object. This will only be present if it was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide. + /// The Base64 encoded, 32-bit CRC-32C checksum of the part. This checksum is present if the multipart upload request was created with the CRC-32C checksum algorithm. For more information, see Checking object integrity in the Amazon S3 User Guide. public let checksumCRC32C: String? - /// The base64-encoded, 160-bit SHA-1 digest of the object. This will only be present if it was uploaded with the object. When you use the API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide. + /// The Base64 encoded, 64-bit CRC-64NVME checksum of the part. This checksum is present if the multipart upload request was created with the CRC-64NVME checksum algorithm, or if the object was uploaded without a checksum (and Amazon S3 added the default checksum, CRC-64NVME, to the uploaded object). For more information, see Checking object integrity in the Amazon S3 User Guide. + public let checksumCRC64NVME: String? + /// The Base64 encoded, 160-bit SHA-1 checksum of the part. This checksum is present if the multipart upload request was created with the SHA-1 checksum algorithm. For more information, see Checking object integrity in the Amazon S3 User Guide. public let checksumSHA1: String? - /// The base64-encoded, 256-bit SHA-256 digest of the object. This will only be present if it was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide. + /// The Base64 encoded, 256-bit SHA-256 checksum of the part. This checksum is present if the multipart upload request was created with the SHA-256 checksum algorithm. For more information, see Checking object integrity in the Amazon S3 User Guide. public let checksumSHA256: String? /// The part number identifying the part. This value is a positive integer between 1 and 10,000. public let partNumber: Int? @@ -6854,9 +6945,10 @@ extension S3 { public let size: Int64? @inlinable - public init(checksumCRC32: String? = nil, checksumCRC32C: String? = nil, checksumSHA1: String? = nil, checksumSHA256: String? = nil, partNumber: Int? = nil, size: Int64? = nil) { + public init(checksumCRC32: String? = nil, checksumCRC32C: String? = nil, checksumCRC64NVME: String? = nil, checksumSHA1: String? = nil, checksumSHA256: String? = nil, partNumber: Int? = nil, size: Int64? = nil) { self.checksumCRC32 = checksumCRC32 self.checksumCRC32C = checksumCRC32C + self.checksumCRC64NVME = checksumCRC64NVME self.checksumSHA1 = checksumSHA1 self.checksumSHA256 = checksumSHA256 self.partNumber = partNumber @@ -6866,6 +6958,7 @@ extension S3 { private enum CodingKeys: String, CodingKey { case checksumCRC32 = "ChecksumCRC32" case checksumCRC32C = "ChecksumCRC32C" + case checksumCRC64NVME = "ChecksumCRC64NVME" case checksumSHA1 = "ChecksumSHA1" case checksumSHA256 = "ChecksumSHA256" case partNumber = "PartNumber" @@ -6876,6 +6969,8 @@ extension S3 { public struct ObjectVersion: AWSDecodableShape { /// The algorithm that was used to create a checksum of the object. public let checksumAlgorithm: [ChecksumAlgorithm]? + /// The checksum type that is used to calculate the object’s checksum value. For more information, see Checking object integrity in the Amazon S3 User Guide. + public let checksumType: ChecksumType? /// The entity tag is an MD5 hash of that version of the object. public let eTag: String? /// Specifies whether the object is (true) or is not (false) the latest version of an object. @@ -6896,8 +6991,9 @@ extension S3 { public let versionId: String? @inlinable - public init(checksumAlgorithm: [ChecksumAlgorithm]? = nil, eTag: String? = nil, isLatest: Bool? = nil, key: String? = nil, lastModified: Date? = nil, owner: Owner? = nil, restoreStatus: RestoreStatus? = nil, size: Int64? = nil, storageClass: ObjectVersionStorageClass? = nil, versionId: String? = nil) { + public init(checksumAlgorithm: [ChecksumAlgorithm]? = nil, checksumType: ChecksumType? = nil, eTag: String? = nil, isLatest: Bool? = nil, key: String? = nil, lastModified: Date? = nil, owner: Owner? = nil, restoreStatus: RestoreStatus? = nil, size: Int64? = nil, storageClass: ObjectVersionStorageClass? = nil, versionId: String? = nil) { self.checksumAlgorithm = checksumAlgorithm + self.checksumType = checksumType self.eTag = eTag self.isLatest = isLatest self.key = key @@ -6911,6 +7007,7 @@ extension S3 { private enum CodingKeys: String, CodingKey { case checksumAlgorithm = "ChecksumAlgorithm" + case checksumType = "ChecksumType" case eTag = "ETag" case isLatest = "IsLatest" case key = "Key" @@ -7009,13 +7106,15 @@ extension S3 { } public struct Part: AWSDecodableShape { - /// This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the base64-encoded, 32-bit CRC-32 checksum of the object. For more information, see Checking object integrity in the Amazon S3 User Guide. + /// The Base64 encoded, 32-bit CRC-32 checksum of the part. This checksum is present if the object was uploaded with the CRC-32 checksum algorithm. For more information, see Checking object integrity in the Amazon S3 User Guide. public let checksumCRC32: String? - /// The base64-encoded, 32-bit CRC-32C checksum of the object. This will only be present if it was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide. + /// The Base64 encoded, 32-bit CRC-32C checksum of the part. This checksum is present if the object was uploaded with the CRC-32C checksum algorithm. For more information, see Checking object integrity in the Amazon S3 User Guide. public let checksumCRC32C: String? - /// The base64-encoded, 160-bit SHA-1 digest of the object. This will only be present if it was uploaded with the object. When you use the API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide. + /// The Base64 encoded, 64-bit CRC-64NVME checksum of the part. This checksum is present if the multipart upload request was created with the CRC-64NVME checksum algorithm, or if the object was uploaded without a checksum (and Amazon S3 added the default checksum, CRC-64NVME, to the uploaded object). For more information, see Checking object integrity in the Amazon S3 User Guide. + public let checksumCRC64NVME: String? + /// The Base64 encoded, 160-bit SHA-1 checksum of the part. This checksum is present if the object was uploaded with the SHA-1 checksum algorithm. For more information, see Checking object integrity in the Amazon S3 User Guide. public let checksumSHA1: String? - /// This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the base64-encoded, 256-bit SHA-256 digest of the object. For more information, see Checking object integrity in the Amazon S3 User Guide. + /// The Base64 encoded, 256-bit SHA-256 checksum of the part. This checksum is present if the object was uploaded with the SHA-256 checksum algorithm. For more information, see Checking object integrity in the Amazon S3 User Guide. public let checksumSHA256: String? /// Entity tag returned when the part was uploaded. public let eTag: String? @@ -7027,9 +7126,10 @@ extension S3 { public let size: Int64? @inlinable - public init(checksumCRC32: String? = nil, checksumCRC32C: String? = nil, checksumSHA1: String? = nil, checksumSHA256: String? = nil, eTag: String? = nil, lastModified: Date? = nil, partNumber: Int? = nil, size: Int64? = nil) { + public init(checksumCRC32: String? = nil, checksumCRC32C: String? = nil, checksumCRC64NVME: String? = nil, checksumSHA1: String? = nil, checksumSHA256: String? = nil, eTag: String? = nil, lastModified: Date? = nil, partNumber: Int? = nil, size: Int64? = nil) { self.checksumCRC32 = checksumCRC32 self.checksumCRC32C = checksumCRC32C + self.checksumCRC64NVME = checksumCRC64NVME self.checksumSHA1 = checksumSHA1 self.checksumSHA256 = checksumSHA256 self.eTag = eTag @@ -7041,6 +7141,7 @@ extension S3 { private enum CodingKeys: String, CodingKey { case checksumCRC32 = "ChecksumCRC32" case checksumCRC32C = "ChecksumCRC32C" + case checksumCRC64NVME = "ChecksumCRC64NVME" case checksumSHA1 = "ChecksumSHA1" case checksumSHA256 = "ChecksumSHA256" case eTag = "ETag" @@ -7186,7 +7287,7 @@ extension S3 { public let bucket: String /// Indicates the algorithm used to create the checksum for the object when you use the SDK. This header will not provide any additional functionality if you don't use the SDK. When you send this header, there must be a corresponding x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request. For more information, see Checking object integrity in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm parameter. public let checksumAlgorithm: ChecksumAlgorithm? - /// The base64-encoded 128-bit MD5 digest of the data. This header must be used as a message integrity check to verify that the request body was not corrupted in transit. For more information, go to RFC 1864. For requests made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is calculated automatically. + /// The Base64 encoded 128-bit MD5 digest of the data. This header must be used as a message integrity check to verify that the request body was not corrupted in transit. For more information, go to RFC 1864. For requests made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is calculated automatically. public let contentMD5: String? /// The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden (access denied). public let expectedBucketOwner: String? @@ -7277,7 +7378,7 @@ extension S3 { public let bucket: String /// Indicates the algorithm used to create the checksum for the object when you use the SDK. This header will not provide any additional functionality if you don't use the SDK. When you send this header, there must be a corresponding x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request. For more information, see Checking object integrity in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm parameter. public let checksumAlgorithm: ChecksumAlgorithm? - /// The base64-encoded 128-bit MD5 digest of the data. This header must be used as a message integrity check to verify that the request body was not corrupted in transit. For more information, go to RFC 1864. For requests made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is calculated automatically. + /// The Base64 encoded 128-bit MD5 digest of the data. This header must be used as a message integrity check to verify that the request body was not corrupted in transit. For more information, go to RFC 1864. For requests made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is calculated automatically. public let contentMD5: String? /// Describes the cross-origin access configuration for objects in an Amazon S3 bucket. For more information, see Enabling Cross-Origin Resource Sharing in the Amazon S3 User Guide. public let corsConfiguration: CORSConfiguration @@ -7313,7 +7414,7 @@ extension S3 { public let bucket: String /// Indicates the algorithm used to create the checksum for the object when you use the SDK. This header will not provide any additional functionality if you don't use the SDK. When you send this header, there must be a corresponding x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request. For more information, see Checking object integrity in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm parameter. For directory buckets, when you use Amazon Web Services SDKs, CRC32 is the default checksum algorithm that's used for performance. public let checksumAlgorithm: ChecksumAlgorithm? - /// The base64-encoded 128-bit MD5 digest of the server-side encryption configuration. For requests made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is calculated automatically. This functionality is not supported for directory buckets. + /// The Base64 encoded 128-bit MD5 digest of the server-side encryption configuration. For requests made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is calculated automatically. This functionality is not supported for directory buckets. public let contentMD5: String? /// The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden (access denied). For directory buckets, this header is not supported in this API operation. If you specify this header, the request fails with the HTTP status code /// 501 Not Implemented. @@ -7599,7 +7700,7 @@ extension S3 { public static let _xmlRootNodeName: String? = "Policy" /// The name of the bucket. Directory buckets - When you use this operation with a directory bucket, you must use path-style requests in the format https://s3express-control.region-code.amazonaws.com/bucket-name . Virtual-hosted-style requests aren't supported. Directory bucket names must be unique in the chosen Zone (Availability Zone or Local Zone). Bucket names must also follow the format bucket-base-name--zone-id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about bucket naming restrictions, see Directory bucket naming rules in the Amazon S3 User Guide public let bucket: String - /// Indicates the algorithm used to create the checksum for the object when you use the SDK. This header will not provide any additional functionality if you don't use the SDK. When you send this header, there must be a corresponding x-amz-checksum-algorithm or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request. For the x-amz-checksum-algorithm header, replace algorithm with the supported algorithm from the following list: CRC32 CRC32C SHA1 SHA256 For more information, see Checking object integrity in the Amazon S3 User Guide. If the individual checksum value you provide through x-amz-checksum-algorithm doesn't match the checksum algorithm you set through x-amz-sdk-checksum-algorithm, Amazon S3 ignores any provided ChecksumAlgorithm parameter and uses the checksum algorithm that matches the provided value in x-amz-checksum-algorithm . For directory buckets, when you use Amazon Web Services SDKs, CRC32 is the default checksum algorithm that's used for performance. + /// Indicates the algorithm used to create the checksum for the object when you use the SDK. This header will not provide any additional functionality if you don't use the SDK. When you send this header, there must be a corresponding x-amz-checksum-algorithm or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request. For the x-amz-checksum-algorithm header, replace algorithm with the supported algorithm from the following list: CRC-32 CRC-32C CRC-64NVME SHA-1 SHA-256 For more information, see Checking object integrity in the Amazon S3 User Guide. If the individual checksum value you provide through x-amz-checksum-algorithm doesn't match the checksum algorithm you set through x-amz-sdk-checksum-algorithm, Amazon S3 fails the request with a BadDigest error. For directory buckets, when you use Amazon Web Services SDKs, CRC32 is the default checksum algorithm that's used for performance. public let checksumAlgorithm: ChecksumAlgorithm? /// Set this parameter to true to confirm that you want to remove your permissions to change this bucket policy in the future. This functionality is not supported for directory buckets. public let confirmRemoveSelfBucketAccess: Bool? @@ -7642,7 +7743,7 @@ extension S3 { public let bucket: String /// Indicates the algorithm used to create the checksum for the object when you use the SDK. This header will not provide any additional functionality if you don't use the SDK. When you send this header, there must be a corresponding x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request. For more information, see Checking object integrity in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm parameter. public let checksumAlgorithm: ChecksumAlgorithm? - /// The base64-encoded 128-bit MD5 digest of the data. You must use this header as a message integrity check to verify that the request body was not corrupted in transit. For more information, see RFC 1864. For requests made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is calculated automatically. + /// The Base64 encoded 128-bit MD5 digest of the data. You must use this header as a message integrity check to verify that the request body was not corrupted in transit. For more information, see RFC 1864. For requests made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is calculated automatically. public let contentMD5: String? /// The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden (access denied). public let expectedBucketOwner: String? @@ -7685,7 +7786,7 @@ extension S3 { public let bucket: String /// Indicates the algorithm used to create the checksum for the object when you use the SDK. This header will not provide any additional functionality if you don't use the SDK. When you send this header, there must be a corresponding x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request. For more information, see Checking object integrity in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm parameter. public let checksumAlgorithm: ChecksumAlgorithm? - /// The base64-encoded 128-bit MD5 digest of the data. You must use this header as a message integrity check to verify that the request body was not corrupted in transit. For more information, see RFC 1864. For requests made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is calculated automatically. + /// The Base64 encoded 128-bit MD5 digest of the data. You must use this header as a message integrity check to verify that the request body was not corrupted in transit. For more information, see RFC 1864. For requests made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is calculated automatically. public let contentMD5: String? /// The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden (access denied). public let expectedBucketOwner: String? @@ -7721,7 +7822,7 @@ extension S3 { public let bucket: String /// Indicates the algorithm used to create the checksum for the object when you use the SDK. This header will not provide any additional functionality if you don't use the SDK. When you send this header, there must be a corresponding x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request. For more information, see Checking object integrity in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm parameter. public let checksumAlgorithm: ChecksumAlgorithm? - /// The base64-encoded 128-bit MD5 digest of the data. You must use this header as a message integrity check to verify that the request body was not corrupted in transit. For more information, see RFC 1864. For requests made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is calculated automatically. + /// The Base64 encoded 128-bit MD5 digest of the data. You must use this header as a message integrity check to verify that the request body was not corrupted in transit. For more information, see RFC 1864. For requests made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is calculated automatically. public let contentMD5: String? /// The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden (access denied). public let expectedBucketOwner: String? @@ -7761,7 +7862,7 @@ extension S3 { public let bucket: String /// Indicates the algorithm used to create the checksum for the object when you use the SDK. This header will not provide any additional functionality if you don't use the SDK. When you send this header, there must be a corresponding x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request. For more information, see Checking object integrity in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm parameter. public let checksumAlgorithm: ChecksumAlgorithm? - /// >The base64-encoded 128-bit MD5 digest of the data. You must use this header as a message integrity check to verify that the request body was not corrupted in transit. For more information, see RFC 1864. For requests made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is calculated automatically. + /// >The Base64 encoded 128-bit MD5 digest of the data. You must use this header as a message integrity check to verify that the request body was not corrupted in transit. For more information, see RFC 1864. For requests made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is calculated automatically. public let contentMD5: String? /// The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden (access denied). public let expectedBucketOwner: String? @@ -7801,7 +7902,7 @@ extension S3 { public let bucket: String /// Indicates the algorithm used to create the checksum for the object when you use the SDK. This header will not provide any additional functionality if you don't use the SDK. When you send this header, there must be a corresponding x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request. For more information, see Checking object integrity in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm parameter. public let checksumAlgorithm: ChecksumAlgorithm? - /// The base64-encoded 128-bit MD5 digest of the data. You must use this header as a message integrity check to verify that the request body was not corrupted in transit. For more information, see RFC 1864. For requests made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is calculated automatically. + /// The Base64 encoded 128-bit MD5 digest of the data. You must use this header as a message integrity check to verify that the request body was not corrupted in transit. For more information, see RFC 1864. For requests made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is calculated automatically. public let contentMD5: String? /// The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden (access denied). public let expectedBucketOwner: String? @@ -7861,7 +7962,7 @@ extension S3 { public let bucket: String /// Indicates the algorithm used to create the checksum for the object when you use the SDK. This header will not provide any additional functionality if you don't use the SDK. When you send this header, there must be a corresponding x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request. For more information, see Checking object integrity in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm parameter. public let checksumAlgorithm: ChecksumAlgorithm? - /// The base64-encoded 128-bit MD5 digest of the data. This header must be used as a message integrity check to verify that the request body was not corrupted in transit. For more information, go to RFC 1864.> For requests made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is calculated automatically. + /// The Base64 encoded 128-bit MD5 digest of the data. This header must be used as a message integrity check to verify that the request body was not corrupted in transit. For more information, go to RFC 1864.> For requests made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is calculated automatically. public let contentMD5: String? /// The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden (access denied). public let expectedBucketOwner: String? @@ -8054,14 +8155,18 @@ extension S3 { public struct PutObjectOutput: AWSDecodableShape { /// Indicates whether the uploaded object uses an S3 Bucket Key for server-side encryption with Key Management Service (KMS) keys (SSE-KMS). public let bucketKeyEnabled: Bool? - /// The base64-encoded, 32-bit CRC-32 checksum of the object. This will only be present if it was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide. + /// The Base64 encoded, 32-bit CRC-32 checksum of the object. This checksum is only be present if the checksum was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide. public let checksumCRC32: String? - /// The base64-encoded, 32-bit CRC-32C checksum of the object. This will only be present if it was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide. + /// The Base64 encoded, 32-bit CRC-32C checksum of the object. This checksum is only present if the checksum was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide. public let checksumCRC32C: String? - /// The base64-encoded, 160-bit SHA-1 digest of the object. This will only be present if it was uploaded with the object. When you use the API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide. + /// The Base64 encoded, 64-bit CRC-64NVME checksum of the object. This header is present if the object was uploaded with the CRC-64NVME checksum algorithm, or if it was uploaded without a checksum (and Amazon S3 added the default checksum, CRC-64NVME, to the uploaded object). For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide. + public let checksumCRC64NVME: String? + /// The Base64 encoded, 160-bit SHA-1 digest of the object. This will only be present if the object was uploaded with the object. When you use the API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide. public let checksumSHA1: String? - /// The base64-encoded, 256-bit SHA-256 digest of the object. This will only be present if it was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide. + /// The Base64 encoded, 256-bit SHA-256 digest of the object. This will only be present if the object was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide. public let checksumSHA256: String? + /// This header specifies the checksum type of the object, which determines how part-level checksums are combined to create an object-level checksum for multipart objects. For PutObject uploads, the checksum type is always FULL_OBJECT. You can use this header as a data integrity check to verify that the checksum type that is received is the same checksum that was specified. For more information, see Checking object integrity in the Amazon S3 User Guide. + public let checksumType: ChecksumType? /// Entity tag for the uploaded object. General purpose buckets - To ensure that data is not corrupted traversing the network, for objects where the ETag is the MD5 digest of the object, you can calculate the MD5 while putting an object to Amazon S3 and compare the returned ETag to the calculated MD5 value. Directory buckets - The ETag for the object in a directory bucket isn't the MD5 digest of the object. public let eTag: String? /// If the expiration is configured for the object (see PutBucketLifecycleConfiguration) in the Amazon S3 User Guide, the response includes this header. It includes the expiry-date and rule-id key-value pairs that provide information about object expiration. The value of the rule-id is URL-encoded. Object expiration information is not returned in directory buckets and this header returns the value "NotImplemented" in all responses for directory buckets. @@ -8069,13 +8174,13 @@ extension S3 { public let requestCharged: RequestCharged? /// The server-side encryption algorithm used when you store this object in Amazon S3. public let serverSideEncryption: ServerSideEncryption? - /// The size of the object in bytes. This will only be present if you append to an object. This functionality is only supported for objects in the Amazon S3 Express One Zone storage class in directory buckets. + /// The size of the object in bytes. This value is only be present if you append to an object. This functionality is only supported for objects in the Amazon S3 Express One Zone storage class in directory buckets. public let size: Int64? /// If server-side encryption with a customer-provided encryption key was requested, the response will include this header to confirm the encryption algorithm that's used. This functionality is not supported for directory buckets. public let sseCustomerAlgorithm: String? /// If server-side encryption with a customer-provided encryption key was requested, the response will include this header to provide the round-trip message integrity verification of the customer-provided encryption key. This functionality is not supported for directory buckets. public let sseCustomerKeyMD5: String? - /// If present, indicates the Amazon Web Services KMS Encryption Context to use for object encryption. The value of this header is a Base64-encoded string of a UTF-8 encoded JSON, which contains the encryption context as key-value pairs. This value is stored as object metadata and automatically gets passed on to Amazon Web Services KMS for future GetObject operations on this object. + /// If present, indicates the Amazon Web Services KMS Encryption Context to use for object encryption. The value of this header is a Base64 encoded string of a UTF-8 encoded JSON, which contains the encryption context as key-value pairs. This value is stored as object metadata and automatically gets passed on to Amazon Web Services KMS for future GetObject operations on this object. public let ssekmsEncryptionContext: String? /// If present, indicates the ID of the KMS key that was used for object encryption. public let ssekmsKeyId: String? @@ -8083,12 +8188,14 @@ extension S3 { public let versionId: String? @inlinable - public init(bucketKeyEnabled: Bool? = nil, checksumCRC32: String? = nil, checksumCRC32C: String? = nil, checksumSHA1: String? = nil, checksumSHA256: String? = nil, eTag: String? = nil, expiration: String? = nil, requestCharged: RequestCharged? = nil, serverSideEncryption: ServerSideEncryption? = nil, size: Int64? = nil, sseCustomerAlgorithm: String? = nil, sseCustomerKeyMD5: String? = nil, ssekmsEncryptionContext: String? = nil, ssekmsKeyId: String? = nil, versionId: String? = nil) { + public init(bucketKeyEnabled: Bool? = nil, checksumCRC32: String? = nil, checksumCRC32C: String? = nil, checksumCRC64NVME: String? = nil, checksumSHA1: String? = nil, checksumSHA256: String? = nil, checksumType: ChecksumType? = nil, eTag: String? = nil, expiration: String? = nil, requestCharged: RequestCharged? = nil, serverSideEncryption: ServerSideEncryption? = nil, size: Int64? = nil, sseCustomerAlgorithm: String? = nil, sseCustomerKeyMD5: String? = nil, ssekmsEncryptionContext: String? = nil, ssekmsKeyId: String? = nil, versionId: String? = nil) { self.bucketKeyEnabled = bucketKeyEnabled self.checksumCRC32 = checksumCRC32 self.checksumCRC32C = checksumCRC32C + self.checksumCRC64NVME = checksumCRC64NVME self.checksumSHA1 = checksumSHA1 self.checksumSHA256 = checksumSHA256 + self.checksumType = checksumType self.eTag = eTag self.expiration = expiration self.requestCharged = requestCharged @@ -8106,8 +8213,10 @@ extension S3 { self.bucketKeyEnabled = try response.decodeHeaderIfPresent(Bool.self, key: "x-amz-server-side-encryption-bucket-key-enabled") self.checksumCRC32 = try response.decodeHeaderIfPresent(String.self, key: "x-amz-checksum-crc32") self.checksumCRC32C = try response.decodeHeaderIfPresent(String.self, key: "x-amz-checksum-crc32c") + self.checksumCRC64NVME = try response.decodeHeaderIfPresent(String.self, key: "x-amz-checksum-crc64nvme") self.checksumSHA1 = try response.decodeHeaderIfPresent(String.self, key: "x-amz-checksum-sha1") self.checksumSHA256 = try response.decodeHeaderIfPresent(String.self, key: "x-amz-checksum-sha256") + self.checksumType = try response.decodeHeaderIfPresent(ChecksumType.self, key: "x-amz-checksum-type") self.eTag = try response.decodeHeaderIfPresent(String.self, key: "ETag") self.expiration = try response.decodeHeaderIfPresent(String.self, key: "x-amz-expiration") self.requestCharged = try response.decodeHeaderIfPresent(RequestCharged.self, key: "x-amz-request-charged") @@ -8137,15 +8246,17 @@ extension S3 { public let bucketKeyEnabled: Bool? /// Can be used to specify caching behavior along the request/reply chain. For more information, see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9. public let cacheControl: String? - /// Indicates the algorithm used to create the checksum for the object when you use the SDK. This header will not provide any additional functionality if you don't use the SDK. When you send this header, there must be a corresponding x-amz-checksum-algorithm or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request. For the x-amz-checksum-algorithm header, replace algorithm with the supported algorithm from the following list: CRC32 CRC32C SHA1 SHA256 For more information, see Checking object integrity in the Amazon S3 User Guide. If the individual checksum value you provide through x-amz-checksum-algorithm doesn't match the checksum algorithm you set through x-amz-sdk-checksum-algorithm, Amazon S3 ignores any provided ChecksumAlgorithm parameter and uses the checksum algorithm that matches the provided value in x-amz-checksum-algorithm . The Content-MD5 or x-amz-sdk-checksum-algorithm header is required for any request to upload an object with a retention period configured using Amazon S3 Object Lock. For more information, see Uploading objects to an Object Lock enabled bucket in the Amazon S3 User Guide. For directory buckets, when you use Amazon Web Services SDKs, CRC32 is the default checksum algorithm that's used for performance. + /// Indicates the algorithm used to create the checksum for the object when you use the SDK. This header will not provide any additional functionality if you don't use the SDK. When you send this header, there must be a corresponding x-amz-checksum-algorithm or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request. For the x-amz-checksum-algorithm header, replace algorithm with the supported algorithm from the following list: CRC-32 CRC-32C CRC-64NVME SHA-1 SHA-256 For more information, see Checking object integrity in the Amazon S3 User Guide. If the individual checksum value you provide through x-amz-checksum-algorithm doesn't match the checksum algorithm you set through x-amz-sdk-checksum-algorithm, Amazon S3 fails the request with a BadDigest error. The Content-MD5 or x-amz-sdk-checksum-algorithm header is required for any request to upload an object with a retention period configured using Amazon S3 Object Lock. For more information, see Uploading objects to an Object Lock enabled bucket in the Amazon S3 User Guide. For directory buckets, when you use Amazon Web Services SDKs, CRC32 is the default checksum algorithm that's used for performance. public let checksumAlgorithm: ChecksumAlgorithm? - /// This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the base64-encoded, 32-bit CRC-32 checksum of the object. For more information, see Checking object integrity in the Amazon S3 User Guide. + /// This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the Base64 encoded, 32-bit CRC-32 checksum of the object. For more information, see Checking object integrity in the Amazon S3 User Guide. public let checksumCRC32: String? - /// This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the base64-encoded, 32-bit CRC-32C checksum of the object. For more information, see Checking object integrity in the Amazon S3 User Guide. + /// This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the Base64 encoded, 32-bit CRC-32C checksum of the object. For more information, see Checking object integrity in the Amazon S3 User Guide. public let checksumCRC32C: String? - /// This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the base64-encoded, 160-bit SHA-1 digest of the object. For more information, see Checking object integrity in the Amazon S3 User Guide. + /// This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the Base64 encoded, 64-bit CRC-64NVME checksum of the object. The CRC-64NVME checksum is always a full object checksum. For more information, see Checking object integrity in the Amazon S3 User Guide. + public let checksumCRC64NVME: String? + /// This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the Base64 encoded, 160-bit SHA-1 digest of the object. For more information, see Checking object integrity in the Amazon S3 User Guide. public let checksumSHA1: String? - /// This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the base64-encoded, 256-bit SHA-256 digest of the object. For more information, see Checking object integrity in the Amazon S3 User Guide. + /// This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the Base64 encoded, 256-bit SHA-256 digest of the object. For more information, see Checking object integrity in the Amazon S3 User Guide. public let checksumSHA256: String? /// Specifies presentational information for the object. For more information, see https://www.rfc-editor.org/rfc/rfc6266#section-4. public let contentDisposition: String? @@ -8155,7 +8266,7 @@ extension S3 { public let contentLanguage: String? /// Size of the body in bytes. This parameter is useful when the size of the body cannot be determined automatically. For more information, see https://www.rfc-editor.org/rfc/rfc9110.html#name-content-length. public let contentLength: Int64? - /// The base64-encoded 128-bit MD5 digest of the message (without the headers) according to RFC 1864. This header can be used as a message integrity check to verify that the data is the same data that was originally sent. Although it is optional, we recommend using the Content-MD5 mechanism as an end-to-end integrity check. For more information about REST request authentication, see REST Authentication. The Content-MD5 or x-amz-sdk-checksum-algorithm header is required for any request to upload an object with a retention period configured using Amazon S3 Object Lock. For more information, see Uploading objects to an Object Lock enabled bucket in the Amazon S3 User Guide. This functionality is not supported for directory buckets. + /// The Base64 encoded 128-bit MD5 digest of the message (without the headers) according to RFC 1864. This header can be used as a message integrity check to verify that the data is the same data that was originally sent. Although it is optional, we recommend using the Content-MD5 mechanism as an end-to-end integrity check. For more information about REST request authentication, see REST Authentication. The Content-MD5 or x-amz-sdk-checksum-algorithm header is required for any request to upload an object with a retention period configured using Amazon S3 Object Lock. For more information, see Uploading objects to an Object Lock enabled bucket in the Amazon S3 User Guide. This functionality is not supported for directory buckets. public let contentMD5: String? /// A standard MIME type describing the format of the contents. For more information, see https://www.rfc-editor.org/rfc/rfc9110.html#name-content-type. public let contentType: String? @@ -8197,7 +8308,7 @@ extension S3 { public let sseCustomerKey: String? /// Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Amazon S3 uses this header for a message integrity check to ensure that the encryption key was transmitted without error. This functionality is not supported for directory buckets. public let sseCustomerKeyMD5: String? - /// Specifies the Amazon Web Services KMS Encryption Context as an additional encryption context to use for object encryption. The value of this header is a Base64-encoded string of a UTF-8 encoded JSON, which contains the encryption context as key-value pairs. This value is stored as object metadata and automatically gets passed on to Amazon Web Services KMS for future GetObject operations on this object. General purpose buckets - This value must be explicitly added during CopyObject operations if you want an additional encryption context for your object. For more information, see Encryption context in the Amazon S3 User Guide. Directory buckets - You can optionally provide an explicit encryption context value. The value must match the default encryption context - the bucket Amazon Resource Name (ARN). An additional encryption context value is not supported. + /// Specifies the Amazon Web Services KMS Encryption Context as an additional encryption context to use for object encryption. The value of this header is a Base64 encoded string of a UTF-8 encoded JSON, which contains the encryption context as key-value pairs. This value is stored as object metadata and automatically gets passed on to Amazon Web Services KMS for future GetObject operations on this object. General purpose buckets - This value must be explicitly added during CopyObject operations if you want an additional encryption context for your object. For more information, see Encryption context in the Amazon S3 User Guide. Directory buckets - You can optionally provide an explicit encryption context value. The value must match the default encryption context - the bucket Amazon Resource Name (ARN). An additional encryption context value is not supported. public let ssekmsEncryptionContext: String? /// Specifies the KMS key ID (Key ID, Key ARN, or Key Alias) to use for object encryption. If the KMS key doesn't exist in the same account that's issuing the command, you must use the full Key ARN not the Key ID. General purpose buckets - If you specify x-amz-server-side-encryption with aws:kms or aws:kms:dsse, this header specifies the ID (Key ID, Key ARN, or Key Alias) of the KMS key to use. If you specify x-amz-server-side-encryption:aws:kms or x-amz-server-side-encryption:aws:kms:dsse, but do not provide x-amz-server-side-encryption-aws-kms-key-id, Amazon S3 uses the Amazon Web Services managed key (aws/s3) to protect the data. Directory buckets - If you specify x-amz-server-side-encryption with aws:kms, the x-amz-server-side-encryption-aws-kms-key-id header is implicitly assigned the ID of the KMS symmetric encryption customer managed key that's configured for your directory bucket's default encryption setting. If you want to specify the x-amz-server-side-encryption-aws-kms-key-id header explicitly, you can only specify it with the ID (Key ID or Key ARN) of the KMS customer managed key that's configured for your directory bucket's default encryption setting. Otherwise, you get an HTTP 400 Bad Request error. Only use the key ID or key ARN. The key alias format of the KMS key isn't supported. Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. /// The Amazon Web Services managed key (aws/s3) isn't supported. @@ -8212,7 +8323,7 @@ extension S3 { public let writeOffsetBytes: Int64? @inlinable - public init(acl: ObjectCannedACL? = nil, body: AWSHTTPBody? = nil, bucket: String, bucketKeyEnabled: Bool? = nil, cacheControl: String? = nil, checksumAlgorithm: ChecksumAlgorithm? = nil, checksumCRC32: String? = nil, checksumCRC32C: String? = nil, checksumSHA1: String? = nil, checksumSHA256: String? = nil, contentDisposition: String? = nil, contentEncoding: String? = nil, contentLanguage: String? = nil, contentLength: Int64? = nil, contentMD5: String? = nil, contentType: String? = nil, expectedBucketOwner: String? = nil, expires: Date? = nil, grantFullControl: String? = nil, grantRead: String? = nil, grantReadACP: String? = nil, grantWriteACP: String? = nil, ifMatch: String? = nil, ifNoneMatch: String? = nil, key: String, metadata: [String: String]? = nil, objectLockLegalHoldStatus: ObjectLockLegalHoldStatus? = nil, objectLockMode: ObjectLockMode? = nil, objectLockRetainUntilDate: Date? = nil, requestPayer: RequestPayer? = nil, serverSideEncryption: ServerSideEncryption? = nil, sseCustomerAlgorithm: String? = nil, sseCustomerKey: String? = nil, sseCustomerKeyMD5: String? = nil, ssekmsEncryptionContext: String? = nil, ssekmsKeyId: String? = nil, storageClass: StorageClass? = nil, tagging: String? = nil, websiteRedirectLocation: String? = nil, writeOffsetBytes: Int64? = nil) { + public init(acl: ObjectCannedACL? = nil, body: AWSHTTPBody? = nil, bucket: String, bucketKeyEnabled: Bool? = nil, cacheControl: String? = nil, checksumAlgorithm: ChecksumAlgorithm? = nil, checksumCRC32: String? = nil, checksumCRC32C: String? = nil, checksumCRC64NVME: String? = nil, checksumSHA1: String? = nil, checksumSHA256: String? = nil, contentDisposition: String? = nil, contentEncoding: String? = nil, contentLanguage: String? = nil, contentLength: Int64? = nil, contentMD5: String? = nil, contentType: String? = nil, expectedBucketOwner: String? = nil, expires: Date? = nil, grantFullControl: String? = nil, grantRead: String? = nil, grantReadACP: String? = nil, grantWriteACP: String? = nil, ifMatch: String? = nil, ifNoneMatch: String? = nil, key: String, metadata: [String: String]? = nil, objectLockLegalHoldStatus: ObjectLockLegalHoldStatus? = nil, objectLockMode: ObjectLockMode? = nil, objectLockRetainUntilDate: Date? = nil, requestPayer: RequestPayer? = nil, serverSideEncryption: ServerSideEncryption? = nil, sseCustomerAlgorithm: String? = nil, sseCustomerKey: String? = nil, sseCustomerKeyMD5: String? = nil, ssekmsEncryptionContext: String? = nil, ssekmsKeyId: String? = nil, storageClass: StorageClass? = nil, tagging: String? = nil, websiteRedirectLocation: String? = nil, writeOffsetBytes: Int64? = nil) { self.acl = acl self.body = body self.bucket = bucket @@ -8221,6 +8332,7 @@ extension S3 { self.checksumAlgorithm = checksumAlgorithm self.checksumCRC32 = checksumCRC32 self.checksumCRC32C = checksumCRC32C + self.checksumCRC64NVME = checksumCRC64NVME self.checksumSHA1 = checksumSHA1 self.checksumSHA256 = checksumSHA256 self.contentDisposition = contentDisposition @@ -8266,6 +8378,7 @@ extension S3 { request.encodeHeader(self.checksumAlgorithm, key: "x-amz-sdk-checksum-algorithm") request.encodeHeader(self.checksumCRC32, key: "x-amz-checksum-crc32") request.encodeHeader(self.checksumCRC32C, key: "x-amz-checksum-crc32c") + request.encodeHeader(self.checksumCRC64NVME, key: "x-amz-checksum-crc64nvme") request.encodeHeader(self.checksumSHA1, key: "x-amz-checksum-sha1") request.encodeHeader(self.checksumSHA256, key: "x-amz-checksum-sha256") request.encodeHeader(self.contentDisposition, key: "Content-Disposition") @@ -9662,13 +9775,15 @@ extension S3 { public struct UploadPartOutput: AWSDecodableShape { /// Indicates whether the multipart upload uses an S3 Bucket Key for server-side encryption with Key Management Service (KMS) keys (SSE-KMS). public let bucketKeyEnabled: Bool? - /// The base64-encoded, 32-bit CRC-32 checksum of the object. This will only be present if it was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide. + /// The Base64 encoded, 32-bit CRC-32 checksum of the object. This checksum is only be present if the checksum was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide. public let checksumCRC32: String? - /// The base64-encoded, 32-bit CRC-32C checksum of the object. This will only be present if it was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide. + /// The Base64 encoded, 32-bit CRC-32C checksum of the object. This checksum is only present if the checksum was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide. public let checksumCRC32C: String? - /// The base64-encoded, 160-bit SHA-1 digest of the object. This will only be present if it was uploaded with the object. When you use the API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide. + /// This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the Base64 encoded, 64-bit CRC-64NVME checksum of the part. For more information, see Checking object integrity in the Amazon S3 User Guide. + public let checksumCRC64NVME: String? + /// The Base64 encoded, 160-bit SHA-1 digest of the object. This will only be present if the object was uploaded with the object. When you use the API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide. public let checksumSHA1: String? - /// The base64-encoded, 256-bit SHA-256 digest of the object. This will only be present if it was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide. + /// The Base64 encoded, 256-bit SHA-256 digest of the object. This will only be present if the object was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide. public let checksumSHA256: String? /// Entity tag for the uploaded object. public let eTag: String? @@ -9683,10 +9798,11 @@ extension S3 { public let ssekmsKeyId: String? @inlinable - public init(bucketKeyEnabled: Bool? = nil, checksumCRC32: String? = nil, checksumCRC32C: String? = nil, checksumSHA1: String? = nil, checksumSHA256: String? = nil, eTag: String? = nil, requestCharged: RequestCharged? = nil, serverSideEncryption: ServerSideEncryption? = nil, sseCustomerAlgorithm: String? = nil, sseCustomerKeyMD5: String? = nil, ssekmsKeyId: String? = nil) { + public init(bucketKeyEnabled: Bool? = nil, checksumCRC32: String? = nil, checksumCRC32C: String? = nil, checksumCRC64NVME: String? = nil, checksumSHA1: String? = nil, checksumSHA256: String? = nil, eTag: String? = nil, requestCharged: RequestCharged? = nil, serverSideEncryption: ServerSideEncryption? = nil, sseCustomerAlgorithm: String? = nil, sseCustomerKeyMD5: String? = nil, ssekmsKeyId: String? = nil) { self.bucketKeyEnabled = bucketKeyEnabled self.checksumCRC32 = checksumCRC32 self.checksumCRC32C = checksumCRC32C + self.checksumCRC64NVME = checksumCRC64NVME self.checksumSHA1 = checksumSHA1 self.checksumSHA256 = checksumSHA256 self.eTag = eTag @@ -9702,6 +9818,7 @@ extension S3 { self.bucketKeyEnabled = try response.decodeHeaderIfPresent(Bool.self, key: "x-amz-server-side-encryption-bucket-key-enabled") self.checksumCRC32 = try response.decodeHeaderIfPresent(String.self, key: "x-amz-checksum-crc32") self.checksumCRC32C = try response.decodeHeaderIfPresent(String.self, key: "x-amz-checksum-crc32c") + self.checksumCRC64NVME = try response.decodeHeaderIfPresent(String.self, key: "x-amz-checksum-crc64nvme") self.checksumSHA1 = try response.decodeHeaderIfPresent(String.self, key: "x-amz-checksum-sha1") self.checksumSHA256 = try response.decodeHeaderIfPresent(String.self, key: "x-amz-checksum-sha256") self.eTag = try response.decodeHeaderIfPresent(String.self, key: "ETag") @@ -9724,17 +9841,19 @@ extension S3 { public let bucket: String /// Indicates the algorithm used to create the checksum for the object when you use the SDK. This header will not provide any additional functionality if you don't use the SDK. When you send this header, there must be a corresponding x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request. For more information, see Checking object integrity in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm parameter. This checksum algorithm must be the same for all parts and it match the checksum value supplied in the CreateMultipartUpload request. public let checksumAlgorithm: ChecksumAlgorithm? - /// This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the base64-encoded, 32-bit CRC-32 checksum of the object. For more information, see Checking object integrity in the Amazon S3 User Guide. + /// This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the Base64 encoded, 32-bit CRC-32 checksum of the object. For more information, see Checking object integrity in the Amazon S3 User Guide. public let checksumCRC32: String? - /// This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the base64-encoded, 32-bit CRC-32C checksum of the object. For more information, see Checking object integrity in the Amazon S3 User Guide. + /// This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the Base64 encoded, 32-bit CRC-32C checksum of the object. For more information, see Checking object integrity in the Amazon S3 User Guide. public let checksumCRC32C: String? - /// This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the base64-encoded, 160-bit SHA-1 digest of the object. For more information, see Checking object integrity in the Amazon S3 User Guide. + /// This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the Base64 encoded, 64-bit CRC-64NVME checksum of the part. For more information, see Checking object integrity in the Amazon S3 User Guide. + public let checksumCRC64NVME: String? + /// This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the Base64 encoded, 160-bit SHA-1 digest of the object. For more information, see Checking object integrity in the Amazon S3 User Guide. public let checksumSHA1: String? - /// This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the base64-encoded, 256-bit SHA-256 digest of the object. For more information, see Checking object integrity in the Amazon S3 User Guide. + /// This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the Base64 encoded, 256-bit SHA-256 digest of the object. For more information, see Checking object integrity in the Amazon S3 User Guide. public let checksumSHA256: String? /// Size of the body in bytes. This parameter is useful when the size of the body cannot be determined automatically. public let contentLength: Int64? - /// The base64-encoded 128-bit MD5 digest of the part data. This parameter is auto-populated when using the command from the CLI. This parameter is required if object lock parameters are specified. This functionality is not supported for directory buckets. + /// The Base64 encoded 128-bit MD5 digest of the part data. This parameter is auto-populated when using the command from the CLI. This parameter is required if object lock parameters are specified. This functionality is not supported for directory buckets. public let contentMD5: String? /// The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden (access denied). public let expectedBucketOwner: String? @@ -9753,12 +9872,13 @@ extension S3 { public let uploadId: String @inlinable - public init(body: AWSHTTPBody? = nil, bucket: String, checksumAlgorithm: ChecksumAlgorithm? = nil, checksumCRC32: String? = nil, checksumCRC32C: String? = nil, checksumSHA1: String? = nil, checksumSHA256: String? = nil, contentLength: Int64? = nil, contentMD5: String? = nil, expectedBucketOwner: String? = nil, key: String, partNumber: Int, requestPayer: RequestPayer? = nil, sseCustomerAlgorithm: String? = nil, sseCustomerKey: String? = nil, sseCustomerKeyMD5: String? = nil, uploadId: String) { + public init(body: AWSHTTPBody? = nil, bucket: String, checksumAlgorithm: ChecksumAlgorithm? = nil, checksumCRC32: String? = nil, checksumCRC32C: String? = nil, checksumCRC64NVME: String? = nil, checksumSHA1: String? = nil, checksumSHA256: String? = nil, contentLength: Int64? = nil, contentMD5: String? = nil, expectedBucketOwner: String? = nil, key: String, partNumber: Int, requestPayer: RequestPayer? = nil, sseCustomerAlgorithm: String? = nil, sseCustomerKey: String? = nil, sseCustomerKeyMD5: String? = nil, uploadId: String) { self.body = body self.bucket = bucket self.checksumAlgorithm = checksumAlgorithm self.checksumCRC32 = checksumCRC32 self.checksumCRC32C = checksumCRC32C + self.checksumCRC64NVME = checksumCRC64NVME self.checksumSHA1 = checksumSHA1 self.checksumSHA256 = checksumSHA256 self.contentLength = contentLength @@ -9781,6 +9901,7 @@ extension S3 { request.encodeHeader(self.checksumAlgorithm, key: "x-amz-sdk-checksum-algorithm") request.encodeHeader(self.checksumCRC32, key: "x-amz-checksum-crc32") request.encodeHeader(self.checksumCRC32C, key: "x-amz-checksum-crc32c") + request.encodeHeader(self.checksumCRC64NVME, key: "x-amz-checksum-crc64nvme") request.encodeHeader(self.checksumSHA1, key: "x-amz-checksum-sha1") request.encodeHeader(self.checksumSHA256, key: "x-amz-checksum-sha256") request.encodeHeader(self.contentLength, key: "Content-Length") @@ -9864,13 +9985,15 @@ extension S3 { public let bucketKeyEnabled: Bool? /// Specifies caching behavior along the request/reply chain. public let cacheControl: String? - /// This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This specifies the base64-encoded, 32-bit CRC-32 checksum of the object returned by the Object Lambda function. This may not match the checksum for the object stored in Amazon S3. Amazon S3 will perform validation of the checksum values only when the original GetObject request required checksum validation. For more information about checksums, see Checking object integrity in the Amazon S3 User Guide. Only one checksum header can be specified at a time. If you supply multiple checksum headers, this request will fail. + /// This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This specifies the Base64 encoded, 32-bit CRC-32 checksum of the object returned by the Object Lambda function. This may not match the checksum for the object stored in Amazon S3. Amazon S3 will perform validation of the checksum values only when the original GetObject request required checksum validation. For more information about checksums, see Checking object integrity in the Amazon S3 User Guide. Only one checksum header can be specified at a time. If you supply multiple checksum headers, this request will fail. public let checksumCRC32: String? - /// This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This specifies the base64-encoded, 32-bit CRC-32C checksum of the object returned by the Object Lambda function. This may not match the checksum for the object stored in Amazon S3. Amazon S3 will perform validation of the checksum values only when the original GetObject request required checksum validation. For more information about checksums, see Checking object integrity in the Amazon S3 User Guide. Only one checksum header can be specified at a time. If you supply multiple checksum headers, this request will fail. + /// This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This specifies the Base64 encoded, 32-bit CRC-32C checksum of the object returned by the Object Lambda function. This may not match the checksum for the object stored in Amazon S3. Amazon S3 will perform validation of the checksum values only when the original GetObject request required checksum validation. For more information about checksums, see Checking object integrity in the Amazon S3 User Guide. Only one checksum header can be specified at a time. If you supply multiple checksum headers, this request will fail. public let checksumCRC32C: String? - /// This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This specifies the base64-encoded, 160-bit SHA-1 digest of the object returned by the Object Lambda function. This may not match the checksum for the object stored in Amazon S3. Amazon S3 will perform validation of the checksum values only when the original GetObject request required checksum validation. For more information about checksums, see Checking object integrity in the Amazon S3 User Guide. Only one checksum header can be specified at a time. If you supply multiple checksum headers, this request will fail. + /// This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the Base64 encoded, 64-bit CRC-64NVME checksum of the part. For more information, see Checking object integrity in the Amazon S3 User Guide. + public let checksumCRC64NVME: String? + /// This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This specifies the Base64 encoded, 160-bit SHA-1 digest of the object returned by the Object Lambda function. This may not match the checksum for the object stored in Amazon S3. Amazon S3 will perform validation of the checksum values only when the original GetObject request required checksum validation. For more information about checksums, see Checking object integrity in the Amazon S3 User Guide. Only one checksum header can be specified at a time. If you supply multiple checksum headers, this request will fail. public let checksumSHA1: String? - /// This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This specifies the base64-encoded, 256-bit SHA-256 digest of the object returned by the Object Lambda function. This may not match the checksum for the object stored in Amazon S3. Amazon S3 will perform validation of the checksum values only when the original GetObject request required checksum validation. For more information about checksums, see Checking object integrity in the Amazon S3 User Guide. Only one checksum header can be specified at a time. If you supply multiple checksum headers, this request will fail. + /// This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This specifies the Base64 encoded, 256-bit SHA-256 digest of the object returned by the Object Lambda function. This may not match the checksum for the object stored in Amazon S3. Amazon S3 will perform validation of the checksum values only when the original GetObject request required checksum validation. For more information about checksums, see Checking object integrity in the Amazon S3 User Guide. Only one checksum header can be specified at a time. If you supply multiple checksum headers, this request will fail. public let checksumSHA256: String? /// Specifies presentational information for the object. public let contentDisposition: String? @@ -9940,13 +10063,14 @@ extension S3 { public let versionId: String? @inlinable - public init(acceptRanges: String? = nil, body: AWSHTTPBody? = nil, bucketKeyEnabled: Bool? = nil, cacheControl: String? = nil, checksumCRC32: String? = nil, checksumCRC32C: String? = nil, checksumSHA1: String? = nil, checksumSHA256: String? = nil, contentDisposition: String? = nil, contentEncoding: String? = nil, contentLanguage: String? = nil, contentLength: Int64? = nil, contentRange: String? = nil, contentType: String? = nil, deleteMarker: Bool? = nil, errorCode: String? = nil, errorMessage: String? = nil, eTag: String? = nil, expiration: String? = nil, expires: Date? = nil, lastModified: Date? = nil, metadata: [String: String]? = nil, missingMeta: Int? = nil, objectLockLegalHoldStatus: ObjectLockLegalHoldStatus? = nil, objectLockMode: ObjectLockMode? = nil, objectLockRetainUntilDate: Date? = nil, partsCount: Int? = nil, replicationStatus: ReplicationStatus? = nil, requestCharged: RequestCharged? = nil, requestRoute: String, requestToken: String, restore: String? = nil, serverSideEncryption: ServerSideEncryption? = nil, sseCustomerAlgorithm: String? = nil, sseCustomerKeyMD5: String? = nil, ssekmsKeyId: String? = nil, statusCode: Int? = nil, storageClass: StorageClass? = nil, tagCount: Int? = nil, versionId: String? = nil) { + public init(acceptRanges: String? = nil, body: AWSHTTPBody? = nil, bucketKeyEnabled: Bool? = nil, cacheControl: String? = nil, checksumCRC32: String? = nil, checksumCRC32C: String? = nil, checksumCRC64NVME: String? = nil, checksumSHA1: String? = nil, checksumSHA256: String? = nil, contentDisposition: String? = nil, contentEncoding: String? = nil, contentLanguage: String? = nil, contentLength: Int64? = nil, contentRange: String? = nil, contentType: String? = nil, deleteMarker: Bool? = nil, errorCode: String? = nil, errorMessage: String? = nil, eTag: String? = nil, expiration: String? = nil, expires: Date? = nil, lastModified: Date? = nil, metadata: [String: String]? = nil, missingMeta: Int? = nil, objectLockLegalHoldStatus: ObjectLockLegalHoldStatus? = nil, objectLockMode: ObjectLockMode? = nil, objectLockRetainUntilDate: Date? = nil, partsCount: Int? = nil, replicationStatus: ReplicationStatus? = nil, requestCharged: RequestCharged? = nil, requestRoute: String, requestToken: String, restore: String? = nil, serverSideEncryption: ServerSideEncryption? = nil, sseCustomerAlgorithm: String? = nil, sseCustomerKeyMD5: String? = nil, ssekmsKeyId: String? = nil, statusCode: Int? = nil, storageClass: StorageClass? = nil, tagCount: Int? = nil, versionId: String? = nil) { self.acceptRanges = acceptRanges self.body = body self.bucketKeyEnabled = bucketKeyEnabled self.cacheControl = cacheControl self.checksumCRC32 = checksumCRC32 self.checksumCRC32C = checksumCRC32C + self.checksumCRC64NVME = checksumCRC64NVME self.checksumSHA1 = checksumSHA1 self.checksumSHA256 = checksumSHA256 self.contentDisposition = contentDisposition @@ -9992,6 +10116,7 @@ extension S3 { request.encodeHeader(self.cacheControl, key: "x-amz-fwd-header-Cache-Control") request.encodeHeader(self.checksumCRC32, key: "x-amz-fwd-header-x-amz-checksum-crc32") request.encodeHeader(self.checksumCRC32C, key: "x-amz-fwd-header-x-amz-checksum-crc32c") + request.encodeHeader(self.checksumCRC64NVME, key: "x-amz-fwd-header-x-amz-checksum-crc64nvme") request.encodeHeader(self.checksumSHA1, key: "x-amz-fwd-header-x-amz-checksum-sha1") request.encodeHeader(self.checksumSHA256, key: "x-amz-fwd-header-x-amz-checksum-sha256") request.encodeHeader(self.contentDisposition, key: "x-amz-fwd-header-Content-Disposition") diff --git a/Sources/Soto/Services/SESv2/SESv2_shapes.swift b/Sources/Soto/Services/SESv2/SESv2_shapes.swift index dc4f347072..cee5466674 100644 --- a/Sources/Soto/Services/SESv2/SESv2_shapes.swift +++ b/Sources/Soto/Services/SESv2/SESv2_shapes.swift @@ -283,6 +283,7 @@ extension SESv2 { public enum RecommendationType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case bimi = "BIMI" + case complaint = "COMPLAINT" case dkim = "DKIM" case dmarc = "DMARC" case spf = "SPF" diff --git a/Sources/Soto/Services/SNS/SNS_api.swift b/Sources/Soto/Services/SNS/SNS_api.swift index 76c7ebc9ea..4dd1ae407c 100644 --- a/Sources/Soto/Services/SNS/SNS_api.swift +++ b/Sources/Soto/Services/SNS/SNS_api.swift @@ -316,7 +316,7 @@ public struct SNS: AWSService { /// Creates a topic to which notifications can be published. Users can create at most 100,000 standard topics (at most 1,000 FIFO topics). For more information, see Creating an Amazon SNS topic in the Amazon SNS Developer Guide. This action is idempotent, so if the requester already owns a topic with the specified name, that topic's ARN is returned without creating a new topic. /// /// Parameters: - /// - attributes: A map of attributes with their corresponding values. The following lists names, descriptions, and values of the special request parameters that the CreateTopic action uses: DeliveryPolicy – The policy that defines how Amazon SNS retries failed deliveries to HTTP/S endpoints. DisplayName – The display name to use for a topic with SMS subscriptions. FifoTopic – Set to true to create a FIFO topic. Policy – The policy that defines who can access your topic. By default, only the topic owner can publish or subscribe to the topic. SignatureVersion – The signature version corresponds to the hashing algorithm used while creating the signature of the notifications, subscription confirmations, or unsubscribe confirmation messages sent by Amazon SNS. By default, SignatureVersion is set to 1. TracingConfig – Tracing mode of an Amazon SNS topic. By default TracingConfig is set to PassThrough, and the topic passes through the tracing header it receives from an Amazon SNS publisher to its subscriptions. If set to Active, Amazon SNS will vend X-Ray segment data to topic owner account if the sampled flag in the tracing header is true. This is only supported on standard topics. The following attribute applies only to server-side encryption: KmsMasterKeyId – The ID of an Amazon Web Services managed customer master key (CMK) for Amazon SNS or a custom CMK. For more information, see Key Terms. For more examples, see KeyId in the Key Management Service API Reference. The following attributes apply only to FIFO topics: ArchivePolicy – The policy that sets the retention period for messages stored in the message archive of an Amazon SNS FIFO topic. ContentBasedDeduplication – Enables content-based deduplication for FIFO topics. By default, ContentBasedDeduplication is set to false. If you create a FIFO topic and this attribute is false, you must specify a value for the MessageDeduplicationId parameter for the Publish action. When you set ContentBasedDeduplication to true, Amazon SNS uses a SHA-256 hash to generate the MessageDeduplicationId using the body of the message (but not the attributes of the message). (Optional) To override the generated value, you can specify a value for the MessageDeduplicationId parameter for the Publish action. + /// - attributes: A map of attributes with their corresponding values. The following lists names, descriptions, and values of the special request parameters that the CreateTopic action uses: DeliveryPolicy – The policy that defines how Amazon SNS retries failed deliveries to HTTP/S endpoints. DisplayName – The display name to use for a topic with SMS subscriptions. FifoTopic – Set to true to create a FIFO topic. Policy – The policy that defines who can access your topic. By default, only the topic owner can publish or subscribe to the topic. SignatureVersion – The signature version corresponds to the hashing algorithm used while creating the signature of the notifications, subscription confirmations, or unsubscribe confirmation messages sent by Amazon SNS. By default, SignatureVersion is set to 1. TracingConfig – Tracing mode of an Amazon SNS topic. By default TracingConfig is set to PassThrough, and the topic passes through the tracing header it receives from an Amazon SNS publisher to its subscriptions. If set to Active, Amazon SNS will vend X-Ray segment data to topic owner account if the sampled flag in the tracing header is true. This is only supported on standard topics. The following attribute applies only to server-side encryption: KmsMasterKeyId – The ID of an Amazon Web Services managed customer master key (CMK) for Amazon SNS or a custom CMK. For more information, see Key Terms. For more examples, see KeyId in the Key Management Service API Reference. The following attributes apply only to FIFO topics: ArchivePolicy – The policy that sets the retention period for messages stored in the message archive of an Amazon SNS FIFO topic. ContentBasedDeduplication – Enables content-based deduplication for FIFO topics. By default, ContentBasedDeduplication is set to false. If you create a FIFO topic and this attribute is false, you must specify a value for the MessageDeduplicationId parameter for the Publish action. When you set ContentBasedDeduplication to true, Amazon SNS uses a SHA-256 hash to generate the MessageDeduplicationId using the body of the message (but not the attributes of the message). (Optional) To override the generated value, you can specify a value for the MessageDeduplicationId parameter for the Publish action. FifoThroughputScope – Enables higher throughput for your FIFO topic by adjusting the scope of deduplication. This attribute has two possible values: Topic – The scope of message deduplication is across the entire topic. This is the default value and maintains existing behavior, with a maximum throughput of 3000 messages per second or 20MB per second, whichever comes first. MessageGroup – The scope of deduplication is within each individual message group, which enables higher throughput per topic subject to regional quotas. For more information on quotas or to request an increase, see Amazon SNS service quotas in the Amazon Web Services General Reference. /// - dataProtectionPolicy: The body of the policy document you want to use for this topic. You can only add one policy per topic. The policy must be in JSON string format. Length Constraints: Maximum length of 30,720. /// - name: The name of the topic you want to create. Constraints: Topic names must be made up of only uppercase and lowercase ASCII letters, numbers, underscores, and hyphens, and must be between 1 and 256 characters long. For a FIFO (first-in-first-out) topic, the name must end with the .fifo suffix. /// - tags: The list of tags to add to a new topic. To be able to tag a topic on creation, you must have the sns:CreateTopic and sns:TagResource permissions. @@ -974,7 +974,7 @@ public struct SNS: AWSService { /// Parameters: /// - message: The message you want to send. If you are publishing to a topic and you want to send the same message to all transport protocols, include the text of the message as a String value. If you want to send different messages for each transport protocol, set the value of the MessageStructure parameter to json and use a JSON object for the Message parameter. Constraints: With the exception of SMS, messages must be UTF-8 encoded strings and at most 256 KB in size (262,144 bytes, not 262,144 characters). For SMS, each message can contain up to 140 characters. This character limit depends on the encoding schema. For example, an SMS message can contain 160 GSM characters, 140 ASCII characters, or 70 UCS-2 characters. If you publish a message that exceeds this size limit, Amazon SNS sends the message as multiple messages, each fitting within the size limit. Messages aren't truncated mid-word but are cut off at whole-word boundaries. The total size limit for a single SMS Publish action is 1,600 characters. JSON-specific constraints: Keys in the JSON object that correspond to supported transport protocols must have simple JSON string values. The values will be parsed (unescaped) before they are used in outgoing messages. Outbound notifications are JSON encoded (meaning that the characters will be reescaped for sending). Values have a minimum length of 0 (the empty string, "", is allowed). Values have a maximum length bounded by the overall message size (so, including multiple protocols may limit message sizes). Non-string values will cause the key to be ignored. Keys that do not correspond to supported transport protocols are ignored. Duplicate keys are not allowed. Failure to parse or validate any key or value in the message will cause the Publish call to return an error (no partial delivery). /// - messageAttributes: Message attributes for Publish action. - /// - messageDeduplicationId: This parameter applies only to FIFO (first-in-first-out) topics. The MessageDeduplicationId can contain up to 128 alphanumeric characters (a-z, A-Z, 0-9) and punctuation (!"#$%&'()*+,-./:;?@[\]^_`{|}~). Every message must have a unique MessageDeduplicationId, which is a token used for deduplication of sent messages. If a message with a particular MessageDeduplicationId is sent successfully, any message sent with the same MessageDeduplicationId during the 5-minute deduplication interval is treated as a duplicate. If the topic has ContentBasedDeduplication set, the system generates a MessageDeduplicationId based on the contents of the message. Your MessageDeduplicationId overrides the generated one. + /// - messageDeduplicationId: This parameter applies only to FIFO (first-in-first-out) topics. The MessageDeduplicationId can contain up to 128 alphanumeric characters (a-z, A-Z, 0-9) and punctuation (!"#$%&'()*+,-./:;?@[\]^_`{|}~). Every message must have a unique MessageDeduplicationId, which is a token used for deduplication of sent messages within the 5 minute minimum deduplication interval. The scope of deduplication depends on the FifoThroughputScope attribute, when set to Topic the message deduplication scope is across the entire topic, when set to MessageGroup the message deduplication scope is within each individual message group. If a message with a particular MessageDeduplicationId is sent successfully, subsequent messages within the deduplication scope and interval, with the same MessageDeduplicationId, are accepted successfully but aren't delivered. Every message must have a unique MessageDeduplicationId: You may provide a MessageDeduplicationId explicitly. If you aren't able to provide a MessageDeduplicationId and you enable ContentBasedDeduplication for your topic, Amazon SNS uses a SHA-256 hash to generate the MessageDeduplicationId using the body of the message (but not the attributes of the message). If you don't provide a MessageDeduplicationId and the topic doesn't have ContentBasedDeduplication set, the action fails with an error. If the topic has a ContentBasedDeduplication set, your MessageDeduplicationId overrides the generated one. When ContentBasedDeduplication is in effect, messages with identical content sent within the deduplication scope and interval are treated as duplicates and only one copy of the message is delivered. If you send one message with ContentBasedDeduplication enabled, and then another message with a MessageDeduplicationId that is the same as the one generated for the first MessageDeduplicationId, the two messages are treated as duplicates, within the deduplication scope and interval, and only one copy of the message is delivered. /// - messageGroupId: This parameter applies only to FIFO (first-in-first-out) topics. The MessageGroupId can contain up to 128 alphanumeric characters (a-z, A-Z, 0-9) and punctuation (!"#$%&'()*+,-./:;?@[\]^_`{|}~). The MessageGroupId is a tag that specifies that a message belongs to a specific message group. Messages that belong to the same message group are processed in a FIFO manner (however, messages in different message groups might be processed out of order). Every message must include a MessageGroupId. /// - messageStructure: Set MessageStructure to json if you want to send a different message for each protocol. For example, using one publish action, you can send a short message to your SMS subscribers and a longer message to your email subscribers. If you set MessageStructure to json, the value of the Message parameter must: be a syntactically valid JSON object; and contain at least a top-level JSON key of "default" with a value that is a string. You can define other top-level keys that define the message you want to send to a specific transport protocol (e.g., "http"). Valid value: json /// - phoneNumber: The phone number to which you want to deliver an SMS message. Use E.164 format. If you don't specify a value for the PhoneNumber parameter, you must specify a value for the TargetArn or TopicArn parameters. @@ -1249,7 +1249,7 @@ public struct SNS: AWSService { /// Allows a topic owner to set an attribute of the topic to a new value. To remove the ability to change topic permissions, you must deny permissions to the AddPermission, RemovePermission, and SetTopicAttributes actions in your IAM policy. /// /// Parameters: - /// - attributeName: A map of attributes with their corresponding values. The following lists the names, descriptions, and values of the special request parameters that the SetTopicAttributes action uses: ApplicationSuccessFeedbackRoleArn – Indicates failed message delivery status for an Amazon SNS topic that is subscribed to a platform application endpoint. DeliveryPolicy – The policy that defines how Amazon SNS retries failed deliveries to HTTP/S endpoints. DisplayName – The display name to use for a topic with SMS subscriptions. Policy – The policy that defines who can access your topic. By default, only the topic owner can publish or subscribe to the topic. TracingConfig – Tracing mode of an Amazon SNS topic. By default TracingConfig is set to PassThrough, and the topic passes through the tracing header it receives from an Amazon SNS publisher to its subscriptions. If set to Active, Amazon SNS will vend X-Ray segment data to topic owner account if the sampled flag in the tracing header is true. This is only supported on standard topics. HTTP HTTPSuccessFeedbackRoleArn – Indicates successful message delivery status for an Amazon SNS topic that is subscribed to an HTTP endpoint. HTTPSuccessFeedbackSampleRate – Indicates percentage of successful messages to sample for an Amazon SNS topic that is subscribed to an HTTP endpoint. HTTPFailureFeedbackRoleArn – Indicates failed message delivery status for an Amazon SNS topic that is subscribed to an HTTP endpoint. Amazon Kinesis Data Firehose FirehoseSuccessFeedbackRoleArn – Indicates successful message delivery status for an Amazon SNS topic that is subscribed to an Amazon Kinesis Data Firehose endpoint. FirehoseSuccessFeedbackSampleRate – Indicates percentage of successful messages to sample for an Amazon SNS topic that is subscribed to an Amazon Kinesis Data Firehose endpoint. FirehoseFailureFeedbackRoleArn – Indicates failed message delivery status for an Amazon SNS topic that is subscribed to an Amazon Kinesis Data Firehose endpoint. Lambda LambdaSuccessFeedbackRoleArn – Indicates successful message delivery status for an Amazon SNS topic that is subscribed to an Lambda endpoint. LambdaSuccessFeedbackSampleRate – Indicates percentage of successful messages to sample for an Amazon SNS topic that is subscribed to an Lambda endpoint. LambdaFailureFeedbackRoleArn – Indicates failed message delivery status for an Amazon SNS topic that is subscribed to an Lambda endpoint. Platform application endpoint ApplicationSuccessFeedbackRoleArn – Indicates successful message delivery status for an Amazon SNS topic that is subscribed to an Amazon Web Services application endpoint. ApplicationSuccessFeedbackSampleRate – Indicates percentage of successful messages to sample for an Amazon SNS topic that is subscribed to an Amazon Web Services application endpoint. ApplicationFailureFeedbackRoleArn – Indicates failed message delivery status for an Amazon SNS topic that is subscribed to an Amazon Web Services application endpoint. In addition to being able to configure topic attributes for message delivery status of notification messages sent to Amazon SNS application endpoints, you can also configure application attributes for the delivery status of push notification messages sent to push notification services. For example, For more information, see Using Amazon SNS Application Attributes for Message Delivery Status. Amazon SQS SQSSuccessFeedbackRoleArn – Indicates successful message delivery status for an Amazon SNS topic that is subscribed to an Amazon SQS endpoint. SQSSuccessFeedbackSampleRate – Indicates percentage of successful messages to sample for an Amazon SNS topic that is subscribed to an Amazon SQS endpoint. SQSFailureFeedbackRoleArn – Indicates failed message delivery status for an Amazon SNS topic that is subscribed to an Amazon SQS endpoint. The SuccessFeedbackRoleArn and FailureFeedbackRoleArn attributes are used to give Amazon SNS write access to use CloudWatch Logs on your behalf. The SuccessFeedbackSampleRate attribute is for specifying the sample rate percentage (0-100) of successfully delivered messages. After you configure the FailureFeedbackRoleArn attribute, then all failed message deliveries generate CloudWatch Logs. The following attribute applies only to server-side-encryption: KmsMasterKeyId – The ID of an Amazon Web Services managed customer master key (CMK) for Amazon SNS or a custom CMK. For more information, see Key Terms. For more examples, see KeyId in the Key Management Service API Reference. SignatureVersion – The signature version corresponds to the hashing algorithm used while creating the signature of the notifications, subscription confirmations, or unsubscribe confirmation messages sent by Amazon SNS. By default, SignatureVersion is set to 1. The following attribute applies only to FIFO topics: ArchivePolicy – The policy that sets the retention period for messages stored in the message archive of an Amazon SNS FIFO topic. ContentBasedDeduplication – Enables content-based deduplication for FIFO topics. By default, ContentBasedDeduplication is set to false. If you create a FIFO topic and this attribute is false, you must specify a value for the MessageDeduplicationId parameter for the Publish action. When you set ContentBasedDeduplication to true, Amazon SNS uses a SHA-256 hash to generate the MessageDeduplicationId using the body of the message (but not the attributes of the message). (Optional) To override the generated value, you can specify a value for the MessageDeduplicationId parameter for the Publish action. + /// - attributeName: A map of attributes with their corresponding values. The following lists the names, descriptions, and values of the special request parameters that the SetTopicAttributes action uses: ApplicationSuccessFeedbackRoleArn – Indicates failed message delivery status for an Amazon SNS topic that is subscribed to a platform application endpoint. DeliveryPolicy – The policy that defines how Amazon SNS retries failed deliveries to HTTP/S endpoints. DisplayName – The display name to use for a topic with SMS subscriptions. Policy – The policy that defines who can access your topic. By default, only the topic owner can publish or subscribe to the topic. TracingConfig – Tracing mode of an Amazon SNS topic. By default TracingConfig is set to PassThrough, and the topic passes through the tracing header it receives from an Amazon SNS publisher to its subscriptions. If set to Active, Amazon SNS will vend X-Ray segment data to topic owner account if the sampled flag in the tracing header is true. This is only supported on standard topics. HTTP HTTPSuccessFeedbackRoleArn – Indicates successful message delivery status for an Amazon SNS topic that is subscribed to an HTTP endpoint. HTTPSuccessFeedbackSampleRate – Indicates percentage of successful messages to sample for an Amazon SNS topic that is subscribed to an HTTP endpoint. HTTPFailureFeedbackRoleArn – Indicates failed message delivery status for an Amazon SNS topic that is subscribed to an HTTP endpoint. Amazon Kinesis Data Firehose FirehoseSuccessFeedbackRoleArn – Indicates successful message delivery status for an Amazon SNS topic that is subscribed to an Amazon Kinesis Data Firehose endpoint. FirehoseSuccessFeedbackSampleRate – Indicates percentage of successful messages to sample for an Amazon SNS topic that is subscribed to an Amazon Kinesis Data Firehose endpoint. FirehoseFailureFeedbackRoleArn – Indicates failed message delivery status for an Amazon SNS topic that is subscribed to an Amazon Kinesis Data Firehose endpoint. Lambda LambdaSuccessFeedbackRoleArn – Indicates successful message delivery status for an Amazon SNS topic that is subscribed to an Lambda endpoint. LambdaSuccessFeedbackSampleRate – Indicates percentage of successful messages to sample for an Amazon SNS topic that is subscribed to an Lambda endpoint. LambdaFailureFeedbackRoleArn – Indicates failed message delivery status for an Amazon SNS topic that is subscribed to an Lambda endpoint. Platform application endpoint ApplicationSuccessFeedbackRoleArn – Indicates successful message delivery status for an Amazon SNS topic that is subscribed to an Amazon Web Services application endpoint. ApplicationSuccessFeedbackSampleRate – Indicates percentage of successful messages to sample for an Amazon SNS topic that is subscribed to an Amazon Web Services application endpoint. ApplicationFailureFeedbackRoleArn – Indicates failed message delivery status for an Amazon SNS topic that is subscribed to an Amazon Web Services application endpoint. In addition to being able to configure topic attributes for message delivery status of notification messages sent to Amazon SNS application endpoints, you can also configure application attributes for the delivery status of push notification messages sent to push notification services. For example, For more information, see Using Amazon SNS Application Attributes for Message Delivery Status. Amazon SQS SQSSuccessFeedbackRoleArn – Indicates successful message delivery status for an Amazon SNS topic that is subscribed to an Amazon SQS endpoint. SQSSuccessFeedbackSampleRate – Indicates percentage of successful messages to sample for an Amazon SNS topic that is subscribed to an Amazon SQS endpoint. SQSFailureFeedbackRoleArn – Indicates failed message delivery status for an Amazon SNS topic that is subscribed to an Amazon SQS endpoint. The SuccessFeedbackRoleArn and FailureFeedbackRoleArn attributes are used to give Amazon SNS write access to use CloudWatch Logs on your behalf. The SuccessFeedbackSampleRate attribute is for specifying the sample rate percentage (0-100) of successfully delivered messages. After you configure the FailureFeedbackRoleArn attribute, then all failed message deliveries generate CloudWatch Logs. The following attribute applies only to server-side-encryption: KmsMasterKeyId – The ID of an Amazon Web Services managed customer master key (CMK) for Amazon SNS or a custom CMK. For more information, see Key Terms. For more examples, see KeyId in the Key Management Service API Reference. SignatureVersion – The signature version corresponds to the hashing algorithm used while creating the signature of the notifications, subscription confirmations, or unsubscribe confirmation messages sent by Amazon SNS. By default, SignatureVersion is set to 1. The following attribute applies only to FIFO topics: ArchivePolicy – The policy that sets the retention period for messages stored in the message archive of an Amazon SNS FIFO topic. ContentBasedDeduplication – Enables content-based deduplication for FIFO topics. By default, ContentBasedDeduplication is set to false. If you create a FIFO topic and this attribute is false, you must specify a value for the MessageDeduplicationId parameter for the Publish action. When you set ContentBasedDeduplication to true, Amazon SNS uses a SHA-256 hash to generate the MessageDeduplicationId using the body of the message (but not the attributes of the message). (Optional) To override the generated value, you can specify a value for the MessageDeduplicationId parameter for the Publish action. FifoThroughputScope – Enables higher throughput for your FIFO topic by adjusting the scope of deduplication. This attribute has two possible values: Topic – The scope of message deduplication is across the entire topic. This is the default value and maintains existing behavior, with a maximum throughput of 3000 messages per second or 20MB per second, whichever comes first. MessageGroup – The scope of deduplication is within each individual message group, which enables higher throughput per topic subject to regional quotas. For more information on quotas or to request an increase, see Amazon SNS service quotas in the Amazon Web Services General Reference. /// - attributeValue: The new value for the attribute. /// - topicArn: The ARN of the topic to modify. /// - logger: Logger use during operation diff --git a/Sources/Soto/Services/SNS/SNS_shapes.swift b/Sources/Soto/Services/SNS/SNS_shapes.swift index 6995d47288..65fe5901d6 100644 --- a/Sources/Soto/Services/SNS/SNS_shapes.swift +++ b/Sources/Soto/Services/SNS/SNS_shapes.swift @@ -289,7 +289,7 @@ extension SNS { } public struct CreateTopicInput: AWSEncodableShape { - /// A map of attributes with their corresponding values. The following lists names, descriptions, and values of the special request parameters that the CreateTopic action uses: DeliveryPolicy – The policy that defines how Amazon SNS retries failed deliveries to HTTP/S endpoints. DisplayName – The display name to use for a topic with SMS subscriptions. FifoTopic – Set to true to create a FIFO topic. Policy – The policy that defines who can access your topic. By default, only the topic owner can publish or subscribe to the topic. SignatureVersion – The signature version corresponds to the hashing algorithm used while creating the signature of the notifications, subscription confirmations, or unsubscribe confirmation messages sent by Amazon SNS. By default, SignatureVersion is set to 1. TracingConfig – Tracing mode of an Amazon SNS topic. By default TracingConfig is set to PassThrough, and the topic passes through the tracing header it receives from an Amazon SNS publisher to its subscriptions. If set to Active, Amazon SNS will vend X-Ray segment data to topic owner account if the sampled flag in the tracing header is true. This is only supported on standard topics. The following attribute applies only to server-side encryption: KmsMasterKeyId – The ID of an Amazon Web Services managed customer master key (CMK) for Amazon SNS or a custom CMK. For more information, see Key Terms. For more examples, see KeyId in the Key Management Service API Reference. The following attributes apply only to FIFO topics: ArchivePolicy – The policy that sets the retention period for messages stored in the message archive of an Amazon SNS FIFO topic. ContentBasedDeduplication – Enables content-based deduplication for FIFO topics. By default, ContentBasedDeduplication is set to false. If you create a FIFO topic and this attribute is false, you must specify a value for the MessageDeduplicationId parameter for the Publish action. When you set ContentBasedDeduplication to true, Amazon SNS uses a SHA-256 hash to generate the MessageDeduplicationId using the body of the message (but not the attributes of the message). (Optional) To override the generated value, you can specify a value for the MessageDeduplicationId parameter for the Publish action. + /// A map of attributes with their corresponding values. The following lists names, descriptions, and values of the special request parameters that the CreateTopic action uses: DeliveryPolicy – The policy that defines how Amazon SNS retries failed deliveries to HTTP/S endpoints. DisplayName – The display name to use for a topic with SMS subscriptions. FifoTopic – Set to true to create a FIFO topic. Policy – The policy that defines who can access your topic. By default, only the topic owner can publish or subscribe to the topic. SignatureVersion – The signature version corresponds to the hashing algorithm used while creating the signature of the notifications, subscription confirmations, or unsubscribe confirmation messages sent by Amazon SNS. By default, SignatureVersion is set to 1. TracingConfig – Tracing mode of an Amazon SNS topic. By default TracingConfig is set to PassThrough, and the topic passes through the tracing header it receives from an Amazon SNS publisher to its subscriptions. If set to Active, Amazon SNS will vend X-Ray segment data to topic owner account if the sampled flag in the tracing header is true. This is only supported on standard topics. The following attribute applies only to server-side encryption: KmsMasterKeyId – The ID of an Amazon Web Services managed customer master key (CMK) for Amazon SNS or a custom CMK. For more information, see Key Terms. For more examples, see KeyId in the Key Management Service API Reference. The following attributes apply only to FIFO topics: ArchivePolicy – The policy that sets the retention period for messages stored in the message archive of an Amazon SNS FIFO topic. ContentBasedDeduplication – Enables content-based deduplication for FIFO topics. By default, ContentBasedDeduplication is set to false. If you create a FIFO topic and this attribute is false, you must specify a value for the MessageDeduplicationId parameter for the Publish action. When you set ContentBasedDeduplication to true, Amazon SNS uses a SHA-256 hash to generate the MessageDeduplicationId using the body of the message (but not the attributes of the message). (Optional) To override the generated value, you can specify a value for the MessageDeduplicationId parameter for the Publish action. FifoThroughputScope – Enables higher throughput for your FIFO topic by adjusting the scope of deduplication. This attribute has two possible values: Topic – The scope of message deduplication is across the entire topic. This is the default value and maintains existing behavior, with a maximum throughput of 3000 messages per second or 20MB per second, whichever comes first. MessageGroup – The scope of deduplication is within each individual message group, which enables higher throughput per topic subject to regional quotas. For more information on quotas or to request an increase, see Amazon SNS service quotas in the Amazon Web Services General Reference. @OptionalCustomCoding> public var attributes: [String: String]? /// The body of the policy document you want to use for this topic. You can only add one policy per topic. The policy must be in JSON string format. Length Constraints: Maximum length of 30,720. @@ -1063,7 +1063,7 @@ extension SNS { /// Each message attribute consists of a Name, Type, and Value. For more information, see Amazon SNS message attributes in the Amazon SNS Developer Guide. @OptionalCustomCoding> public var messageAttributes: [String: MessageAttributeValue]? - /// This parameter applies only to FIFO (first-in-first-out) topics. The token used for deduplication of messages within a 5-minute minimum deduplication interval. If a message with a particular MessageDeduplicationId is sent successfully, subsequent messages with the same MessageDeduplicationId are accepted successfully but aren't delivered. Every message must have a unique MessageDeduplicationId. You may provide a MessageDeduplicationId explicitly. If you aren't able to provide a MessageDeduplicationId and you enable ContentBasedDeduplication for your topic, Amazon SNS uses a SHA-256 hash to generate the MessageDeduplicationId using the body of the message (but not the attributes of the message). If you don't provide a MessageDeduplicationId and the topic doesn't have ContentBasedDeduplication set, the action fails with an error. If the topic has a ContentBasedDeduplication set, your MessageDeduplicationId overrides the generated one. When ContentBasedDeduplication is in effect, messages with identical content sent within the deduplication interval are treated as duplicates and only one copy of the message is delivered. If you send one message with ContentBasedDeduplication enabled, and then another message with a MessageDeduplicationId that is the same as the one generated for the first MessageDeduplicationId, the two messages are treated as duplicates and only one copy of the message is delivered. The MessageDeduplicationId is available to the consumer of the message (this can be useful for troubleshooting delivery issues). If a message is sent successfully but the acknowledgement is lost and the message is resent with the same MessageDeduplicationId after the deduplication interval, Amazon SNS can't detect duplicate messages. Amazon SNS continues to keep track of the message deduplication ID even after the message is received and deleted. The length of MessageDeduplicationId is 128 characters. MessageDeduplicationId can contain alphanumeric characters (a-z, A-Z, 0-9) and punctuation (!"#$%&'()*+,-./:;?@[\]^_`{|}~). + /// This parameter applies only to FIFO (first-in-first-out) topics. This parameter applies only to FIFO (first-in-first-out) topics. The MessageDeduplicationId can contain up to 128 alphanumeric characters (a-z, A-Z, 0-9) and punctuation (!"#$%&'()*+,-./:;?@[\]^_`{|}~). Every message must have a unique MessageDeduplicationId, which is a token used for deduplication of sent messages within the 5 minute minimum deduplication interval. The scope of deduplication depends on the FifoThroughputScope attribute, when set to Topic the message deduplication scope is across the entire topic, when set to MessageGroup the message deduplication scope is within each individual message group. If a message with a particular MessageDeduplicationId is sent successfully, subsequent messages within the deduplication scope and interval, with the same MessageDeduplicationId, are accepted successfully but aren't delivered. Every message must have a unique MessageDeduplicationId. You may provide a MessageDeduplicationId explicitly. If you aren't able to provide a MessageDeduplicationId and you enable ContentBasedDeduplication for your topic, Amazon SNS uses a SHA-256 hash to generate the MessageDeduplicationId using the body of the message (but not the attributes of the message). If you don't provide a MessageDeduplicationId and the topic doesn't have ContentBasedDeduplication set, the action fails with an error. If the topic has a ContentBasedDeduplication set, your MessageDeduplicationId overrides the generated one. When ContentBasedDeduplication is in effect, messages with identical content sent within the deduplication scope and interval are treated as duplicates and only one copy of the message is delivered. If you send one message with ContentBasedDeduplication enabled, and then another message with a MessageDeduplicationId that is the same as the one generated for the first MessageDeduplicationId, the two messages are treated as duplicates, within the deduplication scope and interval, and only one copy of the message is delivered. The MessageDeduplicationId is available to the consumer of the message (this can be useful for troubleshooting delivery issues). If a message is sent successfully but the acknowledgement is lost and the message is resent with the same MessageDeduplicationId after the deduplication interval, Amazon SNS can't detect duplicate messages. Amazon SNS continues to keep track of the message deduplication ID even after the message is received and deleted. public let messageDeduplicationId: String? /// This parameter applies only to FIFO (first-in-first-out) topics. The tag that specifies that a message belongs to a specific message group. Messages that belong to the same message group are processed in a FIFO manner (however, messages in different message groups might be processed out of order). To interleave multiple ordered streams within a single topic, use MessageGroupId values (for example, session data for multiple users). In this scenario, multiple consumers can process the topic, but the session data of each user is processed in a FIFO fashion. You must associate a non-empty MessageGroupId with a message. If you don't provide a MessageGroupId, the action fails. The length of MessageGroupId is 128 characters. MessageGroupId can contain alphanumeric characters (a-z, A-Z, 0-9) and punctuation (!"#$%&'()*+,-./:;?@[\]^_`{|}~). MessageGroupId is required for FIFO topics. You can't use it for standard topics. public let messageGroupId: String? @@ -1148,7 +1148,7 @@ extension SNS { /// Message attributes for Publish action. @OptionalCustomCoding> public var messageAttributes: [String: MessageAttributeValue]? - /// This parameter applies only to FIFO (first-in-first-out) topics. The MessageDeduplicationId can contain up to 128 alphanumeric characters (a-z, A-Z, 0-9) and punctuation (!"#$%&'()*+,-./:;?@[\]^_`{|}~). Every message must have a unique MessageDeduplicationId, which is a token used for deduplication of sent messages. If a message with a particular MessageDeduplicationId is sent successfully, any message sent with the same MessageDeduplicationId during the 5-minute deduplication interval is treated as a duplicate. If the topic has ContentBasedDeduplication set, the system generates a MessageDeduplicationId based on the contents of the message. Your MessageDeduplicationId overrides the generated one. + /// This parameter applies only to FIFO (first-in-first-out) topics. The MessageDeduplicationId can contain up to 128 alphanumeric characters (a-z, A-Z, 0-9) and punctuation (!"#$%&'()*+,-./:;?@[\]^_`{|}~). Every message must have a unique MessageDeduplicationId, which is a token used for deduplication of sent messages within the 5 minute minimum deduplication interval. The scope of deduplication depends on the FifoThroughputScope attribute, when set to Topic the message deduplication scope is across the entire topic, when set to MessageGroup the message deduplication scope is within each individual message group. If a message with a particular MessageDeduplicationId is sent successfully, subsequent messages within the deduplication scope and interval, with the same MessageDeduplicationId, are accepted successfully but aren't delivered. Every message must have a unique MessageDeduplicationId: You may provide a MessageDeduplicationId explicitly. If you aren't able to provide a MessageDeduplicationId and you enable ContentBasedDeduplication for your topic, Amazon SNS uses a SHA-256 hash to generate the MessageDeduplicationId using the body of the message (but not the attributes of the message). If you don't provide a MessageDeduplicationId and the topic doesn't have ContentBasedDeduplication set, the action fails with an error. If the topic has a ContentBasedDeduplication set, your MessageDeduplicationId overrides the generated one. When ContentBasedDeduplication is in effect, messages with identical content sent within the deduplication scope and interval are treated as duplicates and only one copy of the message is delivered. If you send one message with ContentBasedDeduplication enabled, and then another message with a MessageDeduplicationId that is the same as the one generated for the first MessageDeduplicationId, the two messages are treated as duplicates, within the deduplication scope and interval, and only one copy of the message is delivered. public let messageDeduplicationId: String? /// This parameter applies only to FIFO (first-in-first-out) topics. The MessageGroupId can contain up to 128 alphanumeric characters (a-z, A-Z, 0-9) and punctuation (!"#$%&'()*+,-./:;?@[\]^_`{|}~). The MessageGroupId is a tag that specifies that a message belongs to a specific message group. Messages that belong to the same message group are processed in a FIFO manner (however, messages in different message groups might be processed out of order). Every message must include a MessageGroupId. public let messageGroupId: String? @@ -1341,7 +1341,7 @@ extension SNS { } public struct SetTopicAttributesInput: AWSEncodableShape { - /// A map of attributes with their corresponding values. The following lists the names, descriptions, and values of the special request parameters that the SetTopicAttributes action uses: ApplicationSuccessFeedbackRoleArn – Indicates failed message delivery status for an Amazon SNS topic that is subscribed to a platform application endpoint. DeliveryPolicy – The policy that defines how Amazon SNS retries failed deliveries to HTTP/S endpoints. DisplayName – The display name to use for a topic with SMS subscriptions. Policy – The policy that defines who can access your topic. By default, only the topic owner can publish or subscribe to the topic. TracingConfig – Tracing mode of an Amazon SNS topic. By default TracingConfig is set to PassThrough, and the topic passes through the tracing header it receives from an Amazon SNS publisher to its subscriptions. If set to Active, Amazon SNS will vend X-Ray segment data to topic owner account if the sampled flag in the tracing header is true. This is only supported on standard topics. HTTP HTTPSuccessFeedbackRoleArn – Indicates successful message delivery status for an Amazon SNS topic that is subscribed to an HTTP endpoint. HTTPSuccessFeedbackSampleRate – Indicates percentage of successful messages to sample for an Amazon SNS topic that is subscribed to an HTTP endpoint. HTTPFailureFeedbackRoleArn – Indicates failed message delivery status for an Amazon SNS topic that is subscribed to an HTTP endpoint. Amazon Kinesis Data Firehose FirehoseSuccessFeedbackRoleArn – Indicates successful message delivery status for an Amazon SNS topic that is subscribed to an Amazon Kinesis Data Firehose endpoint. FirehoseSuccessFeedbackSampleRate – Indicates percentage of successful messages to sample for an Amazon SNS topic that is subscribed to an Amazon Kinesis Data Firehose endpoint. FirehoseFailureFeedbackRoleArn – Indicates failed message delivery status for an Amazon SNS topic that is subscribed to an Amazon Kinesis Data Firehose endpoint. Lambda LambdaSuccessFeedbackRoleArn – Indicates successful message delivery status for an Amazon SNS topic that is subscribed to an Lambda endpoint. LambdaSuccessFeedbackSampleRate – Indicates percentage of successful messages to sample for an Amazon SNS topic that is subscribed to an Lambda endpoint. LambdaFailureFeedbackRoleArn – Indicates failed message delivery status for an Amazon SNS topic that is subscribed to an Lambda endpoint. Platform application endpoint ApplicationSuccessFeedbackRoleArn – Indicates successful message delivery status for an Amazon SNS topic that is subscribed to an Amazon Web Services application endpoint. ApplicationSuccessFeedbackSampleRate – Indicates percentage of successful messages to sample for an Amazon SNS topic that is subscribed to an Amazon Web Services application endpoint. ApplicationFailureFeedbackRoleArn – Indicates failed message delivery status for an Amazon SNS topic that is subscribed to an Amazon Web Services application endpoint. In addition to being able to configure topic attributes for message delivery status of notification messages sent to Amazon SNS application endpoints, you can also configure application attributes for the delivery status of push notification messages sent to push notification services. For example, For more information, see Using Amazon SNS Application Attributes for Message Delivery Status. Amazon SQS SQSSuccessFeedbackRoleArn – Indicates successful message delivery status for an Amazon SNS topic that is subscribed to an Amazon SQS endpoint. SQSSuccessFeedbackSampleRate – Indicates percentage of successful messages to sample for an Amazon SNS topic that is subscribed to an Amazon SQS endpoint. SQSFailureFeedbackRoleArn – Indicates failed message delivery status for an Amazon SNS topic that is subscribed to an Amazon SQS endpoint. The SuccessFeedbackRoleArn and FailureFeedbackRoleArn attributes are used to give Amazon SNS write access to use CloudWatch Logs on your behalf. The SuccessFeedbackSampleRate attribute is for specifying the sample rate percentage (0-100) of successfully delivered messages. After you configure the FailureFeedbackRoleArn attribute, then all failed message deliveries generate CloudWatch Logs. The following attribute applies only to server-side-encryption: KmsMasterKeyId – The ID of an Amazon Web Services managed customer master key (CMK) for Amazon SNS or a custom CMK. For more information, see Key Terms. For more examples, see KeyId in the Key Management Service API Reference. SignatureVersion – The signature version corresponds to the hashing algorithm used while creating the signature of the notifications, subscription confirmations, or unsubscribe confirmation messages sent by Amazon SNS. By default, SignatureVersion is set to 1. The following attribute applies only to FIFO topics: ArchivePolicy – The policy that sets the retention period for messages stored in the message archive of an Amazon SNS FIFO topic. ContentBasedDeduplication – Enables content-based deduplication for FIFO topics. By default, ContentBasedDeduplication is set to false. If you create a FIFO topic and this attribute is false, you must specify a value for the MessageDeduplicationId parameter for the Publish action. When you set ContentBasedDeduplication to true, Amazon SNS uses a SHA-256 hash to generate the MessageDeduplicationId using the body of the message (but not the attributes of the message). (Optional) To override the generated value, you can specify a value for the MessageDeduplicationId parameter for the Publish action. + /// A map of attributes with their corresponding values. The following lists the names, descriptions, and values of the special request parameters that the SetTopicAttributes action uses: ApplicationSuccessFeedbackRoleArn – Indicates failed message delivery status for an Amazon SNS topic that is subscribed to a platform application endpoint. DeliveryPolicy – The policy that defines how Amazon SNS retries failed deliveries to HTTP/S endpoints. DisplayName – The display name to use for a topic with SMS subscriptions. Policy – The policy that defines who can access your topic. By default, only the topic owner can publish or subscribe to the topic. TracingConfig – Tracing mode of an Amazon SNS topic. By default TracingConfig is set to PassThrough, and the topic passes through the tracing header it receives from an Amazon SNS publisher to its subscriptions. If set to Active, Amazon SNS will vend X-Ray segment data to topic owner account if the sampled flag in the tracing header is true. This is only supported on standard topics. HTTP HTTPSuccessFeedbackRoleArn – Indicates successful message delivery status for an Amazon SNS topic that is subscribed to an HTTP endpoint. HTTPSuccessFeedbackSampleRate – Indicates percentage of successful messages to sample for an Amazon SNS topic that is subscribed to an HTTP endpoint. HTTPFailureFeedbackRoleArn – Indicates failed message delivery status for an Amazon SNS topic that is subscribed to an HTTP endpoint. Amazon Kinesis Data Firehose FirehoseSuccessFeedbackRoleArn – Indicates successful message delivery status for an Amazon SNS topic that is subscribed to an Amazon Kinesis Data Firehose endpoint. FirehoseSuccessFeedbackSampleRate – Indicates percentage of successful messages to sample for an Amazon SNS topic that is subscribed to an Amazon Kinesis Data Firehose endpoint. FirehoseFailureFeedbackRoleArn – Indicates failed message delivery status for an Amazon SNS topic that is subscribed to an Amazon Kinesis Data Firehose endpoint. Lambda LambdaSuccessFeedbackRoleArn – Indicates successful message delivery status for an Amazon SNS topic that is subscribed to an Lambda endpoint. LambdaSuccessFeedbackSampleRate – Indicates percentage of successful messages to sample for an Amazon SNS topic that is subscribed to an Lambda endpoint. LambdaFailureFeedbackRoleArn – Indicates failed message delivery status for an Amazon SNS topic that is subscribed to an Lambda endpoint. Platform application endpoint ApplicationSuccessFeedbackRoleArn – Indicates successful message delivery status for an Amazon SNS topic that is subscribed to an Amazon Web Services application endpoint. ApplicationSuccessFeedbackSampleRate – Indicates percentage of successful messages to sample for an Amazon SNS topic that is subscribed to an Amazon Web Services application endpoint. ApplicationFailureFeedbackRoleArn – Indicates failed message delivery status for an Amazon SNS topic that is subscribed to an Amazon Web Services application endpoint. In addition to being able to configure topic attributes for message delivery status of notification messages sent to Amazon SNS application endpoints, you can also configure application attributes for the delivery status of push notification messages sent to push notification services. For example, For more information, see Using Amazon SNS Application Attributes for Message Delivery Status. Amazon SQS SQSSuccessFeedbackRoleArn – Indicates successful message delivery status for an Amazon SNS topic that is subscribed to an Amazon SQS endpoint. SQSSuccessFeedbackSampleRate – Indicates percentage of successful messages to sample for an Amazon SNS topic that is subscribed to an Amazon SQS endpoint. SQSFailureFeedbackRoleArn – Indicates failed message delivery status for an Amazon SNS topic that is subscribed to an Amazon SQS endpoint. The SuccessFeedbackRoleArn and FailureFeedbackRoleArn attributes are used to give Amazon SNS write access to use CloudWatch Logs on your behalf. The SuccessFeedbackSampleRate attribute is for specifying the sample rate percentage (0-100) of successfully delivered messages. After you configure the FailureFeedbackRoleArn attribute, then all failed message deliveries generate CloudWatch Logs. The following attribute applies only to server-side-encryption: KmsMasterKeyId – The ID of an Amazon Web Services managed customer master key (CMK) for Amazon SNS or a custom CMK. For more information, see Key Terms. For more examples, see KeyId in the Key Management Service API Reference. SignatureVersion – The signature version corresponds to the hashing algorithm used while creating the signature of the notifications, subscription confirmations, or unsubscribe confirmation messages sent by Amazon SNS. By default, SignatureVersion is set to 1. The following attribute applies only to FIFO topics: ArchivePolicy – The policy that sets the retention period for messages stored in the message archive of an Amazon SNS FIFO topic. ContentBasedDeduplication – Enables content-based deduplication for FIFO topics. By default, ContentBasedDeduplication is set to false. If you create a FIFO topic and this attribute is false, you must specify a value for the MessageDeduplicationId parameter for the Publish action. When you set ContentBasedDeduplication to true, Amazon SNS uses a SHA-256 hash to generate the MessageDeduplicationId using the body of the message (but not the attributes of the message). (Optional) To override the generated value, you can specify a value for the MessageDeduplicationId parameter for the Publish action. FifoThroughputScope – Enables higher throughput for your FIFO topic by adjusting the scope of deduplication. This attribute has two possible values: Topic – The scope of message deduplication is across the entire topic. This is the default value and maintains existing behavior, with a maximum throughput of 3000 messages per second or 20MB per second, whichever comes first. MessageGroup – The scope of deduplication is within each individual message group, which enables higher throughput per topic subject to regional quotas. For more information on quotas or to request an increase, see Amazon SNS service quotas in the Amazon Web Services General Reference. public let attributeName: String /// The new value for the attribute. public let attributeValue: String? diff --git a/Sources/Soto/Services/SQS/SQS_api.swift b/Sources/Soto/Services/SQS/SQS_api.swift index 7afb64350c..4ebeb8fc02 100644 --- a/Sources/Soto/Services/SQS/SQS_api.swift +++ b/Sources/Soto/Services/SQS/SQS_api.swift @@ -169,7 +169,7 @@ public struct SQS: AWSService { return try await self.cancelMessageMoveTask(input, logger: logger) } - /// Changes the visibility timeout of a specified message in a queue to a new value. The default visibility timeout for a message is 30 seconds. The minimum is 0 seconds. The maximum is 12 hours. For more information, see Visibility Timeout in the Amazon SQS Developer Guide. For example, if the default timeout for a queue is 60 seconds, 15 seconds have elapsed since you received the message, and you send a ChangeMessageVisibility call with VisibilityTimeout set to 10 seconds, the 10 seconds begin to count from the time that you make the ChangeMessageVisibility call. Thus, any attempt to change the visibility timeout or to delete that message 10 seconds after you initially change the visibility timeout (a total of 25 seconds) might result in an error. An Amazon SQS message has three basic states: Sent to a queue by a producer. Received from the queue by a consumer. Deleted from the queue. A message is considered to be stored after it is sent to a queue by a producer, but not yet received from the queue by a consumer (that is, between states 1 and 2). There is no limit to the number of stored messages. A message is considered to be in flight after it is received from a queue by a consumer, but not yet deleted from the queue (that is, between states 2 and 3). There is a limit to the number of in flight messages. Limits that apply to in flight messages are unrelated to the unlimited number of stored messages. For most standard queues (depending on queue traffic and message backlog), there can be a maximum of approximately 120,000 in flight messages (received from a queue by a consumer, but not yet deleted from the queue). If you reach this limit, Amazon SQS returns the OverLimit error message. To avoid reaching the limit, you should delete messages from the queue after they're processed. You can also increase the number of queues you use to process your messages. To request a limit increase, file a support request. For FIFO queues, there can be a maximum of 20,000 in flight messages (received from a queue by a consumer, but not yet deleted from the queue). If you reach this limit, Amazon SQS returns no error messages. If you attempt to set the VisibilityTimeout to a value greater than the maximum time left, Amazon SQS returns an error. Amazon SQS doesn't automatically recalculate and increase the timeout to the maximum remaining time. Unlike with a queue, when you change the visibility timeout for a specific message the timeout value is applied immediately but isn't saved in memory for that message. If you don't delete a message after it is received, the visibility timeout for the message reverts to the original timeout value (not to the value you set using the ChangeMessageVisibility action) the next time the message is received. + /// Changes the visibility timeout of a specified message in a queue to a new value. The default visibility timeout for a message is 30 seconds. The minimum is 0 seconds. The maximum is 12 hours. For more information, see Visibility Timeout in the Amazon SQS Developer Guide. For example, if the default timeout for a queue is 60 seconds, 15 seconds have elapsed since you received the message, and you send a ChangeMessageVisibility call with VisibilityTimeout set to 10 seconds, the 10 seconds begin to count from the time that you make the ChangeMessageVisibility call. Thus, any attempt to change the visibility timeout or to delete that message 10 seconds after you initially change the visibility timeout (a total of 25 seconds) might result in an error. An Amazon SQS message has three basic states: Sent to a queue by a producer. Received from the queue by a consumer. Deleted from the queue. A message is considered to be stored after it is sent to a queue by a producer, but not yet received from the queue by a consumer (that is, between states 1 and 2). There is no limit to the number of stored messages. A message is considered to be in flight after it is received from a queue by a consumer, but not yet deleted from the queue (that is, between states 2 and 3). There is a limit to the number of in flight messages. Limits that apply to in flight messages are unrelated to the unlimited number of stored messages. For most standard queues (depending on queue traffic and message backlog), there can be a maximum of approximately 120,000 in flight messages (received from a queue by a consumer, but not yet deleted from the queue). If you reach this limit, Amazon SQS returns the OverLimit error message. To avoid reaching the limit, you should delete messages from the queue after they're processed. You can also increase the number of queues you use to process your messages. To request a limit increase, file a support request. For FIFO queues, there can be a maximum of 120,000 in flight messages (received from a queue by a consumer, but not yet deleted from the queue). If you reach this limit, Amazon SQS returns no error messages. If you attempt to set the VisibilityTimeout to a value greater than the maximum time left, Amazon SQS returns an error. Amazon SQS doesn't automatically recalculate and increase the timeout to the maximum remaining time. Unlike with a queue, when you change the visibility timeout for a specific message the timeout value is applied immediately but isn't saved in memory for that message. If you don't delete a message after it is received, the visibility timeout for the message reverts to the original timeout value (not to the value you set using the ChangeMessageVisibility action) the next time the message is received. @Sendable @inlinable public func changeMessageVisibility(_ input: ChangeMessageVisibilityRequest, logger: Logger = AWSClient.loggingDisabled) async throws { @@ -182,7 +182,7 @@ public struct SQS: AWSService { logger: logger ) } - /// Changes the visibility timeout of a specified message in a queue to a new value. The default visibility timeout for a message is 30 seconds. The minimum is 0 seconds. The maximum is 12 hours. For more information, see Visibility Timeout in the Amazon SQS Developer Guide. For example, if the default timeout for a queue is 60 seconds, 15 seconds have elapsed since you received the message, and you send a ChangeMessageVisibility call with VisibilityTimeout set to 10 seconds, the 10 seconds begin to count from the time that you make the ChangeMessageVisibility call. Thus, any attempt to change the visibility timeout or to delete that message 10 seconds after you initially change the visibility timeout (a total of 25 seconds) might result in an error. An Amazon SQS message has three basic states: Sent to a queue by a producer. Received from the queue by a consumer. Deleted from the queue. A message is considered to be stored after it is sent to a queue by a producer, but not yet received from the queue by a consumer (that is, between states 1 and 2). There is no limit to the number of stored messages. A message is considered to be in flight after it is received from a queue by a consumer, but not yet deleted from the queue (that is, between states 2 and 3). There is a limit to the number of in flight messages. Limits that apply to in flight messages are unrelated to the unlimited number of stored messages. For most standard queues (depending on queue traffic and message backlog), there can be a maximum of approximately 120,000 in flight messages (received from a queue by a consumer, but not yet deleted from the queue). If you reach this limit, Amazon SQS returns the OverLimit error message. To avoid reaching the limit, you should delete messages from the queue after they're processed. You can also increase the number of queues you use to process your messages. To request a limit increase, file a support request. For FIFO queues, there can be a maximum of 20,000 in flight messages (received from a queue by a consumer, but not yet deleted from the queue). If you reach this limit, Amazon SQS returns no error messages. If you attempt to set the VisibilityTimeout to a value greater than the maximum time left, Amazon SQS returns an error. Amazon SQS doesn't automatically recalculate and increase the timeout to the maximum remaining time. Unlike with a queue, when you change the visibility timeout for a specific message the timeout value is applied immediately but isn't saved in memory for that message. If you don't delete a message after it is received, the visibility timeout for the message reverts to the original timeout value (not to the value you set using the ChangeMessageVisibility action) the next time the message is received. + /// Changes the visibility timeout of a specified message in a queue to a new value. The default visibility timeout for a message is 30 seconds. The minimum is 0 seconds. The maximum is 12 hours. For more information, see Visibility Timeout in the Amazon SQS Developer Guide. For example, if the default timeout for a queue is 60 seconds, 15 seconds have elapsed since you received the message, and you send a ChangeMessageVisibility call with VisibilityTimeout set to 10 seconds, the 10 seconds begin to count from the time that you make the ChangeMessageVisibility call. Thus, any attempt to change the visibility timeout or to delete that message 10 seconds after you initially change the visibility timeout (a total of 25 seconds) might result in an error. An Amazon SQS message has three basic states: Sent to a queue by a producer. Received from the queue by a consumer. Deleted from the queue. A message is considered to be stored after it is sent to a queue by a producer, but not yet received from the queue by a consumer (that is, between states 1 and 2). There is no limit to the number of stored messages. A message is considered to be in flight after it is received from a queue by a consumer, but not yet deleted from the queue (that is, between states 2 and 3). There is a limit to the number of in flight messages. Limits that apply to in flight messages are unrelated to the unlimited number of stored messages. For most standard queues (depending on queue traffic and message backlog), there can be a maximum of approximately 120,000 in flight messages (received from a queue by a consumer, but not yet deleted from the queue). If you reach this limit, Amazon SQS returns the OverLimit error message. To avoid reaching the limit, you should delete messages from the queue after they're processed. You can also increase the number of queues you use to process your messages. To request a limit increase, file a support request. For FIFO queues, there can be a maximum of 120,000 in flight messages (received from a queue by a consumer, but not yet deleted from the queue). If you reach this limit, Amazon SQS returns no error messages. If you attempt to set the VisibilityTimeout to a value greater than the maximum time left, Amazon SQS returns an error. Amazon SQS doesn't automatically recalculate and increase the timeout to the maximum remaining time. Unlike with a queue, when you change the visibility timeout for a specific message the timeout value is applied immediately but isn't saved in memory for that message. If you don't delete a message after it is received, the visibility timeout for the message reverts to the original timeout value (not to the value you set using the ChangeMessageVisibility action) the next time the message is received. /// /// Parameters: /// - queueUrl: The URL of the Amazon SQS queue whose message's visibility is changed. Queue URLs and names are case-sensitive. @@ -236,7 +236,7 @@ public struct SQS: AWSService { return try await self.changeMessageVisibilityBatch(input, logger: logger) } - /// Creates a new standard or FIFO queue. You can pass one or more attributes in the request. Keep the following in mind: If you don't specify the FifoQueue attribute, Amazon SQS creates a standard queue. You can't change the queue type after you create it and you can't convert an existing standard queue into a FIFO queue. You must either create a new FIFO queue for your application or delete your existing standard queue and recreate it as a FIFO queue. For more information, see Moving From a Standard Queue to a FIFO Queue in the Amazon SQS Developer Guide. If you don't provide a value for an attribute, the queue is created with the default value for the attribute. If you delete a queue, you must wait at least 60 seconds before creating a queue with the same name. To successfully create a new queue, you must provide a queue name that adheres to the limits related to queues and is unique within the scope of your queues. After you create a queue, you must wait at least one second after the queue is created to be able to use the queue. To get the queue URL, use the GetQueueUrl action. GetQueueUrl requires only the QueueName parameter. be aware of existing queue names: If you provide the name of an existing queue along with the exact names and values of all the queue's attributes, CreateQueue returns the queue URL for the existing queue. If the queue name, attribute names, or attribute values don't match an existing queue, CreateQueue returns an error. Cross-account permissions don't apply to this action. For more information, + /// Creates a new standard or FIFO queue. You can pass one or more attributes in the request. Keep the following in mind: If you don't specify the FifoQueue attribute, Amazon SQS creates a standard queue. You can't change the queue type after you create it and you can't convert an existing standard queue into a FIFO queue. You must either create a new FIFO queue for your application or delete your existing standard queue and recreate it as a FIFO queue. For more information, see Moving From a Standard Queue to a FIFO Queue in the Amazon SQS Developer Guide. If you don't provide a value for an attribute, the queue is created with the default value for the attribute. If you delete a queue, you must wait at least 60 seconds before creating a queue with the same name. To successfully create a new queue, you must provide a queue name that adheres to the limits related to queues and is unique within the scope of your queues. After you create a queue, you must wait at least one second after the queue is created to be able to use the queue. To retrieve the URL of a queue, use the GetQueueUrl action. This action only requires the QueueName parameter. When creating queues, keep the following points in mind: If you specify the name of an existing queue and provide the exact same names and values for all its attributes, the CreateQueue action will return the URL of the existing queue instead of creating a new one. If you attempt to create a queue with a name that already exists but with different attribute names or values, the CreateQueue action will return an error. This ensures that existing queues are not inadvertently altered. Cross-account permissions don't apply to this action. For more information, /// see Grant /// cross-account permissions to a role and a username in the Amazon SQS Developer Guide. @Sendable @@ -251,7 +251,7 @@ public struct SQS: AWSService { logger: logger ) } - /// Creates a new standard or FIFO queue. You can pass one or more attributes in the request. Keep the following in mind: If you don't specify the FifoQueue attribute, Amazon SQS creates a standard queue. You can't change the queue type after you create it and you can't convert an existing standard queue into a FIFO queue. You must either create a new FIFO queue for your application or delete your existing standard queue and recreate it as a FIFO queue. For more information, see Moving From a Standard Queue to a FIFO Queue in the Amazon SQS Developer Guide. If you don't provide a value for an attribute, the queue is created with the default value for the attribute. If you delete a queue, you must wait at least 60 seconds before creating a queue with the same name. To successfully create a new queue, you must provide a queue name that adheres to the limits related to queues and is unique within the scope of your queues. After you create a queue, you must wait at least one second after the queue is created to be able to use the queue. To get the queue URL, use the GetQueueUrl action. GetQueueUrl requires only the QueueName parameter. be aware of existing queue names: If you provide the name of an existing queue along with the exact names and values of all the queue's attributes, CreateQueue returns the queue URL for the existing queue. If the queue name, attribute names, or attribute values don't match an existing queue, CreateQueue returns an error. Cross-account permissions don't apply to this action. For more information, + /// Creates a new standard or FIFO queue. You can pass one or more attributes in the request. Keep the following in mind: If you don't specify the FifoQueue attribute, Amazon SQS creates a standard queue. You can't change the queue type after you create it and you can't convert an existing standard queue into a FIFO queue. You must either create a new FIFO queue for your application or delete your existing standard queue and recreate it as a FIFO queue. For more information, see Moving From a Standard Queue to a FIFO Queue in the Amazon SQS Developer Guide. If you don't provide a value for an attribute, the queue is created with the default value for the attribute. If you delete a queue, you must wait at least 60 seconds before creating a queue with the same name. To successfully create a new queue, you must provide a queue name that adheres to the limits related to queues and is unique within the scope of your queues. After you create a queue, you must wait at least one second after the queue is created to be able to use the queue. To retrieve the URL of a queue, use the GetQueueUrl action. This action only requires the QueueName parameter. When creating queues, keep the following points in mind: If you specify the name of an existing queue and provide the exact same names and values for all its attributes, the CreateQueue action will return the URL of the existing queue instead of creating a new one. If you attempt to create a queue with a name that already exists but with different attribute names or values, the CreateQueue action will return an error. This ensures that existing queues are not inadvertently altered. Cross-account permissions don't apply to this action. For more information, /// see Grant /// cross-account permissions to a role and a username in the Amazon SQS Developer Guide. /// @@ -275,7 +275,7 @@ public struct SQS: AWSService { return try await self.createQueue(input, logger: logger) } - /// Deletes the specified message from the specified queue. To select the message to delete, use the ReceiptHandle of the message (not the MessageId which you receive when you send the message). Amazon SQS can delete a message from a queue even if a visibility timeout setting causes the message to be locked by another consumer. Amazon SQS automatically deletes messages left in a queue longer than the retention period configured for the queue. The ReceiptHandle is associated with a specific instance of receiving a message. If you receive a message more than once, the ReceiptHandle is different each time you receive a message. When you use the DeleteMessage action, you must provide the most recently received ReceiptHandle for the message (otherwise, the request succeeds, but the message will not be deleted). For standard queues, it is possible to receive a message even after you delete it. This might happen on rare occasions if one of the servers which stores a copy of the message is unavailable when you send the request to delete the message. The copy remains on the server and might be returned to you during a subsequent receive request. You should ensure that your application is idempotent, so that receiving a message more than once does not cause issues. + /// Deletes the specified message from the specified queue. To select the message to delete, use the ReceiptHandle of the message (not the MessageId which you receive when you send the message). Amazon SQS can delete a message from a queue even if a visibility timeout setting causes the message to be locked by another consumer. Amazon SQS automatically deletes messages left in a queue longer than the retention period configured for the queue. Each time you receive a message, meaning when a consumer retrieves a message from the queue, it comes with a unique ReceiptHandle. If you receive the same message more than once, you will get a different ReceiptHandle each time. When you want to delete a message using the DeleteMessage action, you must use the ReceiptHandle from the most recent time you received the message. If you use an old ReceiptHandle, the request will succeed, but the message might not be deleted. For standard queues, it is possible to receive a message even after you delete it. This might happen on rare occasions if one of the servers which stores a copy of the message is unavailable when you send the request to delete the message. The copy remains on the server and might be returned to you during a subsequent receive request. You should ensure that your application is idempotent, so that receiving a message more than once does not cause issues. @Sendable @inlinable public func deleteMessage(_ input: DeleteMessageRequest, logger: Logger = AWSClient.loggingDisabled) async throws { @@ -288,7 +288,7 @@ public struct SQS: AWSService { logger: logger ) } - /// Deletes the specified message from the specified queue. To select the message to delete, use the ReceiptHandle of the message (not the MessageId which you receive when you send the message). Amazon SQS can delete a message from a queue even if a visibility timeout setting causes the message to be locked by another consumer. Amazon SQS automatically deletes messages left in a queue longer than the retention period configured for the queue. The ReceiptHandle is associated with a specific instance of receiving a message. If you receive a message more than once, the ReceiptHandle is different each time you receive a message. When you use the DeleteMessage action, you must provide the most recently received ReceiptHandle for the message (otherwise, the request succeeds, but the message will not be deleted). For standard queues, it is possible to receive a message even after you delete it. This might happen on rare occasions if one of the servers which stores a copy of the message is unavailable when you send the request to delete the message. The copy remains on the server and might be returned to you during a subsequent receive request. You should ensure that your application is idempotent, so that receiving a message more than once does not cause issues. + /// Deletes the specified message from the specified queue. To select the message to delete, use the ReceiptHandle of the message (not the MessageId which you receive when you send the message). Amazon SQS can delete a message from a queue even if a visibility timeout setting causes the message to be locked by another consumer. Amazon SQS automatically deletes messages left in a queue longer than the retention period configured for the queue. Each time you receive a message, meaning when a consumer retrieves a message from the queue, it comes with a unique ReceiptHandle. If you receive the same message more than once, you will get a different ReceiptHandle each time. When you want to delete a message using the DeleteMessage action, you must use the ReceiptHandle from the most recent time you received the message. If you use an old ReceiptHandle, the request will succeed, but the message might not be deleted. For standard queues, it is possible to receive a message even after you delete it. This might happen on rare occasions if one of the servers which stores a copy of the message is unavailable when you send the request to delete the message. The copy remains on the server and might be returned to you during a subsequent receive request. You should ensure that your application is idempotent, so that receiving a message more than once does not cause issues. /// /// Parameters: /// - queueUrl: The URL of the Amazon SQS queue from which messages are deleted. Queue URLs and names are case-sensitive. @@ -404,7 +404,7 @@ public struct SQS: AWSService { return try await self.getQueueAttributes(input, logger: logger) } - /// Returns the URL of an existing Amazon SQS queue. To access a queue that belongs to another AWS account, use the QueueOwnerAWSAccountId parameter to specify the account ID of the queue's owner. The queue's owner must grant you permission to access the queue. For more information about shared queue access, see AddPermission or see Allow Developers to Write Messages to a Shared Queue in the Amazon SQS Developer Guide. + /// The GetQueueUrl API returns the URL of an existing Amazon SQS queue. This is useful when you know the queue's name but need to retrieve its URL for further operations. To access a queue owned by another Amazon Web Services account, use the QueueOwnerAWSAccountId parameter to specify the account ID of the queue's owner. Note that the queue owner must grant you the necessary permissions to access the queue. For more information about accessing shared queues, see the AddPermission API or Allow developers to write messages to a shared queue in the Amazon SQS Developer Guide. @Sendable @inlinable public func getQueueUrl(_ input: GetQueueUrlRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetQueueUrlResult { @@ -417,11 +417,11 @@ public struct SQS: AWSService { logger: logger ) } - /// Returns the URL of an existing Amazon SQS queue. To access a queue that belongs to another AWS account, use the QueueOwnerAWSAccountId parameter to specify the account ID of the queue's owner. The queue's owner must grant you permission to access the queue. For more information about shared queue access, see AddPermission or see Allow Developers to Write Messages to a Shared Queue in the Amazon SQS Developer Guide. + /// The GetQueueUrl API returns the URL of an existing Amazon SQS queue. This is useful when you know the queue's name but need to retrieve its URL for further operations. To access a queue owned by another Amazon Web Services account, use the QueueOwnerAWSAccountId parameter to specify the account ID of the queue's owner. Note that the queue owner must grant you the necessary permissions to access the queue. For more information about accessing shared queues, see the AddPermission API or Allow developers to write messages to a shared queue in the Amazon SQS Developer Guide. /// /// Parameters: - /// - queueName: The name of the queue whose URL must be fetched. Maximum 80 characters. Valid values: alphanumeric characters, hyphens (-), and underscores (_). Queue URLs and names are case-sensitive. - /// - queueOwnerAWSAccountId: The Amazon Web Services account ID of the account that created the queue. + /// - queueName: (Required) The name of the queue for which you want to fetch the URL. The name can be up to 80 characters long and can include alphanumeric characters, hyphens (-), and underscores (_). Queue URLs and names are case-sensitive. + /// - queueOwnerAWSAccountId: (Optional) The Amazon Web Services account ID of the account that created the queue. This is only required when you are attempting to access a queue owned by another Amazon Web Services account. /// - logger: Logger use during operation @inlinable public func getQueueUrl( @@ -606,7 +606,7 @@ public struct SQS: AWSService { return try await self.purgeQueue(input, logger: logger) } - /// Retrieves one or more messages (up to 10), from the specified queue. Using the WaitTimeSeconds parameter enables long-poll support. For more information, see Amazon SQS Long Polling in the Amazon SQS Developer Guide. Short poll is the default behavior where a weighted random set of machines is sampled on a ReceiveMessage call. Thus, only the messages on the sampled machines are returned. If the number of messages in the queue is small (fewer than 1,000), you most likely get fewer messages than you requested per ReceiveMessage call. If the number of messages in the queue is extremely small, you might not receive any messages in a particular ReceiveMessage response. If this happens, repeat the request. For each message returned, the response includes the following: The message body. An MD5 digest of the message body. For information about MD5, see RFC1321. The MessageId you received when you sent the message to the queue. The receipt handle. The message attributes. An MD5 digest of the message attributes. The receipt handle is the identifier you must provide when deleting the message. For more information, see Queue and Message Identifiers in the Amazon SQS Developer Guide. You can provide the VisibilityTimeout parameter in your request. The parameter is applied to the messages that Amazon SQS returns in the response. If you don't include the parameter, the overall visibility timeout for the queue is used for the returned messages. For more information, see Visibility Timeout in the Amazon SQS Developer Guide. A message that isn't deleted or a message whose visibility isn't extended before the visibility timeout expires counts as a failed receive. Depending on the configuration of the queue, the message might be sent to the dead-letter queue. In the future, new attributes might be added. If you write code that calls this action, we recommend that you structure your code so that it can handle new attributes gracefully. + /// Retrieves one or more messages (up to 10), from the specified queue. Using the WaitTimeSeconds parameter enables long-poll support. For more information, see Amazon SQS Long Polling in the Amazon SQS Developer Guide. Short poll is the default behavior where a weighted random set of machines is sampled on a ReceiveMessage call. Therefore, only the messages on the sampled machines are returned. If the number of messages in the queue is small (fewer than 1,000), you most likely get fewer messages than you requested per ReceiveMessage call. If the number of messages in the queue is extremely small, you might not receive any messages in a particular ReceiveMessage response. If this happens, repeat the request. For each message returned, the response includes the following: The message body. An MD5 digest of the message body. For information about MD5, see RFC1321. The MessageId you received when you sent the message to the queue. The receipt handle. The message attributes. An MD5 digest of the message attributes. The receipt handle is the identifier you must provide when deleting the message. For more information, see Queue and Message Identifiers in the Amazon SQS Developer Guide. You can provide the VisibilityTimeout parameter in your request. The parameter is applied to the messages that Amazon SQS returns in the response. If you don't include the parameter, the overall visibility timeout for the queue is used for the returned messages. The default visibility timeout for a queue is 30 seconds. In the future, new attributes might be added. If you write code that calls this action, we recommend that you structure your code so that it can handle new attributes gracefully. @Sendable @inlinable public func receiveMessage(_ input: ReceiveMessageRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ReceiveMessageResult { @@ -619,7 +619,7 @@ public struct SQS: AWSService { logger: logger ) } - /// Retrieves one or more messages (up to 10), from the specified queue. Using the WaitTimeSeconds parameter enables long-poll support. For more information, see Amazon SQS Long Polling in the Amazon SQS Developer Guide. Short poll is the default behavior where a weighted random set of machines is sampled on a ReceiveMessage call. Thus, only the messages on the sampled machines are returned. If the number of messages in the queue is small (fewer than 1,000), you most likely get fewer messages than you requested per ReceiveMessage call. If the number of messages in the queue is extremely small, you might not receive any messages in a particular ReceiveMessage response. If this happens, repeat the request. For each message returned, the response includes the following: The message body. An MD5 digest of the message body. For information about MD5, see RFC1321. The MessageId you received when you sent the message to the queue. The receipt handle. The message attributes. An MD5 digest of the message attributes. The receipt handle is the identifier you must provide when deleting the message. For more information, see Queue and Message Identifiers in the Amazon SQS Developer Guide. You can provide the VisibilityTimeout parameter in your request. The parameter is applied to the messages that Amazon SQS returns in the response. If you don't include the parameter, the overall visibility timeout for the queue is used for the returned messages. For more information, see Visibility Timeout in the Amazon SQS Developer Guide. A message that isn't deleted or a message whose visibility isn't extended before the visibility timeout expires counts as a failed receive. Depending on the configuration of the queue, the message might be sent to the dead-letter queue. In the future, new attributes might be added. If you write code that calls this action, we recommend that you structure your code so that it can handle new attributes gracefully. + /// Retrieves one or more messages (up to 10), from the specified queue. Using the WaitTimeSeconds parameter enables long-poll support. For more information, see Amazon SQS Long Polling in the Amazon SQS Developer Guide. Short poll is the default behavior where a weighted random set of machines is sampled on a ReceiveMessage call. Therefore, only the messages on the sampled machines are returned. If the number of messages in the queue is small (fewer than 1,000), you most likely get fewer messages than you requested per ReceiveMessage call. If the number of messages in the queue is extremely small, you might not receive any messages in a particular ReceiveMessage response. If this happens, repeat the request. For each message returned, the response includes the following: The message body. An MD5 digest of the message body. For information about MD5, see RFC1321. The MessageId you received when you sent the message to the queue. The receipt handle. The message attributes. An MD5 digest of the message attributes. The receipt handle is the identifier you must provide when deleting the message. For more information, see Queue and Message Identifiers in the Amazon SQS Developer Guide. You can provide the VisibilityTimeout parameter in your request. The parameter is applied to the messages that Amazon SQS returns in the response. If you don't include the parameter, the overall visibility timeout for the queue is used for the returned messages. The default visibility timeout for a queue is 30 seconds. In the future, new attributes might be added. If you write code that calls this action, we recommend that you structure your code so that it can handle new attributes gracefully. /// /// Parameters: /// - maxNumberOfMessages: The maximum number of messages to return. Amazon SQS never returns more messages than this value (however, fewer messages might be returned). Valid values: 1 to 10. Default: 1. @@ -627,8 +627,8 @@ public struct SQS: AWSService { /// - messageSystemAttributeNames: A list of attributes that need to be returned along with each message. These attributes include: All – Returns all values. ApproximateFirstReceiveTimestamp – Returns the time the message was first received from the queue (epoch time in milliseconds). ApproximateReceiveCount – Returns the number of times a message has been received across all queues but not deleted. AWSTraceHeader – Returns the X-Ray trace header string. SenderId For a user, returns the user ID, for example ABCDEFGHI1JKLMNOPQ23R. For an IAM role, returns the IAM role ID, for example ABCDE1F2GH3I4JK5LMNOP:i-a123b456. SentTimestamp – Returns the time the message was sent to the queue (epoch time in milliseconds). SqsManagedSseEnabled – Enables server-side queue encryption using SQS owned encryption keys. Only one server-side encryption option is supported per queue (for example, SSE-KMS or SSE-SQS). MessageDeduplicationId – Returns the value provided by the producer that calls the SendMessage action. MessageGroupId – Returns the value provided by the producer that calls the SendMessage action. Messages with the same MessageGroupId are returned in sequence. SequenceNumber – Returns the value provided by Amazon SQS. /// - queueUrl: The URL of the Amazon SQS queue from which messages are received. Queue URLs and names are case-sensitive. /// - receiveRequestAttemptId: This parameter applies only to FIFO (first-in-first-out) queues. The token used for deduplication of ReceiveMessage calls. If a networking issue occurs after a ReceiveMessage action, and instead of a response you receive a generic error, it is possible to retry the same action with an identical ReceiveRequestAttemptId to retrieve the same set of messages, even if their visibility timeout has not yet expired. You can use ReceiveRequestAttemptId only for 5 minutes after a ReceiveMessage action. When you set FifoQueue, a caller of the ReceiveMessage action can provide a ReceiveRequestAttemptId explicitly. It is possible to retry the ReceiveMessage action with the same ReceiveRequestAttemptId if none of the messages have been modified (deleted or had their visibility changes). During a visibility timeout, subsequent calls with the same ReceiveRequestAttemptId return the same messages and receipt handles. If a retry occurs within the deduplication interval, it resets the visibility timeout. For more information, see Visibility Timeout in the Amazon SQS Developer Guide. If a caller of the ReceiveMessage action still processes messages when the visibility timeout expires and messages become visible, another worker consuming from the same queue can receive the same messages and therefore process duplicates. Also, if a consumer whose message processing time is longer than the visibility timeout tries to delete the processed messages, the action fails with an error. To mitigate this effect, ensure that your application observes a safe threshold before the visibility timeout expires and extend the visibility timeout as necessary. While messages with a particular MessageGroupId are invisible, no more messages belonging to the same MessageGroupId are returned until the visibility timeout expires. You can still receive messages with another MessageGroupId as long as it is also visible. If a caller of ReceiveMessage can't track the ReceiveRequestAttemptId, no retries work until the original visibility timeout expires. As a result, delays might occur but the messages in the queue remain in a strict order. The maximum length of ReceiveRequestAttemptId is 128 characters. ReceiveRequestAttemptId can contain alphanumeric characters (a-z, A-Z, 0-9) and punctuation (!"#$%&'()*+,-./:;?@[\]^_`{|}~). For best practices of using ReceiveRequestAttemptId, see Using the ReceiveRequestAttemptId Request Parameter in the Amazon SQS Developer Guide. - /// - visibilityTimeout: The duration (in seconds) that the received messages are hidden from subsequent retrieve requests after being retrieved by a ReceiveMessage request. - /// - waitTimeSeconds: The duration (in seconds) for which the call waits for a message to arrive in the queue before returning. If a message is available, the call returns sooner than WaitTimeSeconds. If no messages are available and the wait time expires, the call does not return a message list. To avoid HTTP errors, ensure that the HTTP response timeout for ReceiveMessage requests is longer than the WaitTimeSeconds parameter. For example, with the Java SDK, you can set HTTP transport settings using the NettyNioAsyncHttpClient for asynchronous clients, or the ApacheHttpClient for synchronous clients. + /// - visibilityTimeout: The duration (in seconds) that the received messages are hidden from subsequent retrieve requests after being retrieved by a ReceiveMessage request. If not specified, the default visibility timeout for the queue is used, which is 30 seconds. Understanding VisibilityTimeout: When a message is received from a queue, it becomes temporarily invisible to other consumers for the duration of the visibility timeout. This prevents multiple consumers from processing the same message simultaneously. If the message is not deleted or its visibility timeout is not extended before the timeout expires, it becomes visible again and can be retrieved by other consumers. Setting an appropriate visibility timeout is crucial. If it's too short, the message might become visible again before processing is complete, leading to duplicate processing. If it's too long, it delays the reprocessing of messages if the initial processing fails. You can adjust the visibility timeout using the --visibility-timeout parameter in the receive-message command to match the processing time required by your application. A message that isn't deleted or a message whose visibility isn't extended before the visibility timeout expires counts as a failed receive. Depending on the configuration of the queue, the message might be sent to the dead-letter queue. For more information, see Visibility Timeout in the Amazon SQS Developer Guide. + /// - waitTimeSeconds: The duration (in seconds) for which the call waits for a message to arrive in the queue before returning. If a message is available, the call returns sooner than WaitTimeSeconds. If no messages are available and the wait time expires, the call does not return a message list. If you are using the Java SDK, it returns a ReceiveMessageResponse object, which has a empty list instead of a Null object. To avoid HTTP errors, ensure that the HTTP response timeout for ReceiveMessage requests is longer than the WaitTimeSeconds parameter. For example, with the Java SDK, you can set HTTP transport settings using the NettyNioAsyncHttpClient for asynchronous clients, or the ApacheHttpClient for synchronous clients. /// - logger: Logger use during operation @inlinable public func receiveMessage( diff --git a/Sources/Soto/Services/SQS/SQS_shapes.swift b/Sources/Soto/Services/SQS/SQS_shapes.swift index ae095262fb..e039fdac33 100644 --- a/Sources/Soto/Services/SQS/SQS_shapes.swift +++ b/Sources/Soto/Services/SQS/SQS_shapes.swift @@ -425,9 +425,9 @@ extension SQS { } public struct GetQueueUrlRequest: AWSEncodableShape { - /// The name of the queue whose URL must be fetched. Maximum 80 characters. Valid values: alphanumeric characters, hyphens (-), and underscores (_). Queue URLs and names are case-sensitive. + /// (Required) The name of the queue for which you want to fetch the URL. The name can be up to 80 characters long and can include alphanumeric characters, hyphens (-), and underscores (_). Queue URLs and names are case-sensitive. public let queueName: String - /// The Amazon Web Services account ID of the account that created the queue. + /// (Optional) The Amazon Web Services account ID of the account that created the queue. This is only required when you are attempting to access a queue owned by another Amazon Web Services account. public let queueOwnerAWSAccountId: String? @inlinable @@ -758,7 +758,7 @@ extension SQS { } public struct ReceiveMessageRequest: AWSEncodableShape { - /// This parameter has been deprecated but will be supported for backward compatibility. To provide attribute names, you are encouraged to use MessageSystemAttributeNames. A list of attributes that need to be returned along with each message. These attributes include: All – Returns all values. ApproximateFirstReceiveTimestamp – Returns the time the message was first received from the queue (epoch time in milliseconds). ApproximateReceiveCount – Returns the number of times a message has been received across all queues but not deleted. AWSTraceHeader – Returns the X-Ray trace header string. SenderId For a user, returns the user ID, for example ABCDEFGHI1JKLMNOPQ23R. For an IAM role, returns the IAM role ID, for example ABCDE1F2GH3I4JK5LMNOP:i-a123b456. SentTimestamp – Returns the time the message was sent to the queue (epoch time in milliseconds). SqsManagedSseEnabled – Enables server-side queue encryption using SQS owned encryption keys. Only one server-side encryption option is supported per queue (for example, SSE-KMS or SSE-SQS). MessageDeduplicationId – Returns the value provided by the producer that calls the SendMessage action. MessageGroupId – Returns the value provided by the producer that calls the SendMessage action. Messages with the same MessageGroupId are returned in sequence. SequenceNumber – Returns the value provided by Amazon SQS. + /// This parameter has been discontinued but will be supported for backward compatibility. To provide attribute names, you are encouraged to use MessageSystemAttributeNames. A list of attributes that need to be returned along with each message. These attributes include: All – Returns all values. ApproximateFirstReceiveTimestamp – Returns the time the message was first received from the queue (epoch time in milliseconds). ApproximateReceiveCount – Returns the number of times a message has been received across all queues but not deleted. AWSTraceHeader – Returns the X-Ray trace header string. SenderId For a user, returns the user ID, for example ABCDEFGHI1JKLMNOPQ23R. For an IAM role, returns the IAM role ID, for example ABCDE1F2GH3I4JK5LMNOP:i-a123b456. SentTimestamp – Returns the time the message was sent to the queue (epoch time in milliseconds). SqsManagedSseEnabled – Enables server-side queue encryption using SQS owned encryption keys. Only one server-side encryption option is supported per queue (for example, SSE-KMS or SSE-SQS). MessageDeduplicationId – Returns the value provided by the producer that calls the SendMessage action. MessageGroupId – Returns the value provided by the producer that calls the SendMessage action. Messages with the same MessageGroupId are returned in sequence. SequenceNumber – Returns the value provided by Amazon SQS. public let attributeNames: [QueueAttributeName]? /// The maximum number of messages to return. Amazon SQS never returns more messages than this value (however, fewer messages might be returned). Valid values: 1 to 10. Default: 1. public let maxNumberOfMessages: Int? @@ -770,9 +770,9 @@ extension SQS { public let queueUrl: String /// This parameter applies only to FIFO (first-in-first-out) queues. The token used for deduplication of ReceiveMessage calls. If a networking issue occurs after a ReceiveMessage action, and instead of a response you receive a generic error, it is possible to retry the same action with an identical ReceiveRequestAttemptId to retrieve the same set of messages, even if their visibility timeout has not yet expired. You can use ReceiveRequestAttemptId only for 5 minutes after a ReceiveMessage action. When you set FifoQueue, a caller of the ReceiveMessage action can provide a ReceiveRequestAttemptId explicitly. It is possible to retry the ReceiveMessage action with the same ReceiveRequestAttemptId if none of the messages have been modified (deleted or had their visibility changes). During a visibility timeout, subsequent calls with the same ReceiveRequestAttemptId return the same messages and receipt handles. If a retry occurs within the deduplication interval, it resets the visibility timeout. For more information, see Visibility Timeout in the Amazon SQS Developer Guide. If a caller of the ReceiveMessage action still processes messages when the visibility timeout expires and messages become visible, another worker consuming from the same queue can receive the same messages and therefore process duplicates. Also, if a consumer whose message processing time is longer than the visibility timeout tries to delete the processed messages, the action fails with an error. To mitigate this effect, ensure that your application observes a safe threshold before the visibility timeout expires and extend the visibility timeout as necessary. While messages with a particular MessageGroupId are invisible, no more messages belonging to the same MessageGroupId are returned until the visibility timeout expires. You can still receive messages with another MessageGroupId as long as it is also visible. If a caller of ReceiveMessage can't track the ReceiveRequestAttemptId, no retries work until the original visibility timeout expires. As a result, delays might occur but the messages in the queue remain in a strict order. The maximum length of ReceiveRequestAttemptId is 128 characters. ReceiveRequestAttemptId can contain alphanumeric characters (a-z, A-Z, 0-9) and punctuation (!"#$%&'()*+,-./:;?@[\]^_`{|}~). For best practices of using ReceiveRequestAttemptId, see Using the ReceiveRequestAttemptId Request Parameter in the Amazon SQS Developer Guide. public let receiveRequestAttemptId: String? - /// The duration (in seconds) that the received messages are hidden from subsequent retrieve requests after being retrieved by a ReceiveMessage request. + /// The duration (in seconds) that the received messages are hidden from subsequent retrieve requests after being retrieved by a ReceiveMessage request. If not specified, the default visibility timeout for the queue is used, which is 30 seconds. Understanding VisibilityTimeout: When a message is received from a queue, it becomes temporarily invisible to other consumers for the duration of the visibility timeout. This prevents multiple consumers from processing the same message simultaneously. If the message is not deleted or its visibility timeout is not extended before the timeout expires, it becomes visible again and can be retrieved by other consumers. Setting an appropriate visibility timeout is crucial. If it's too short, the message might become visible again before processing is complete, leading to duplicate processing. If it's too long, it delays the reprocessing of messages if the initial processing fails. You can adjust the visibility timeout using the --visibility-timeout parameter in the receive-message command to match the processing time required by your application. A message that isn't deleted or a message whose visibility isn't extended before the visibility timeout expires counts as a failed receive. Depending on the configuration of the queue, the message might be sent to the dead-letter queue. For more information, see Visibility Timeout in the Amazon SQS Developer Guide. public let visibilityTimeout: Int? - /// The duration (in seconds) for which the call waits for a message to arrive in the queue before returning. If a message is available, the call returns sooner than WaitTimeSeconds. If no messages are available and the wait time expires, the call does not return a message list. To avoid HTTP errors, ensure that the HTTP response timeout for ReceiveMessage requests is longer than the WaitTimeSeconds parameter. For example, with the Java SDK, you can set HTTP transport settings using the NettyNioAsyncHttpClient for asynchronous clients, or the ApacheHttpClient for synchronous clients. + /// The duration (in seconds) for which the call waits for a message to arrive in the queue before returning. If a message is available, the call returns sooner than WaitTimeSeconds. If no messages are available and the wait time expires, the call does not return a message list. If you are using the Java SDK, it returns a ReceiveMessageResponse object, which has a empty list instead of a Null object. To avoid HTTP errors, ensure that the HTTP response timeout for ReceiveMessage requests is longer than the WaitTimeSeconds parameter. For example, with the Java SDK, you can set HTTP transport settings using the NettyNioAsyncHttpClient for asynchronous clients, or the ApacheHttpClient for synchronous clients. public let waitTimeSeconds: Int? @inlinable @@ -1180,7 +1180,7 @@ public struct SQSErrorType: AWSErrorType { public static var batchRequestTooLong: Self { .init(.batchRequestTooLong) } /// The batch request doesn't contain any entries. public static var emptyBatchRequest: Self { .init(.emptyBatchRequest) } - /// The accountId is invalid. + /// The specified ID is invalid. public static var invalidAddress: Self { .init(.invalidAddress) } /// The specified attribute doesn't exist. public static var invalidAttributeName: Self { .init(.invalidAttributeName) } @@ -1192,7 +1192,7 @@ public struct SQSErrorType: AWSErrorType { public static var invalidIdFormat: Self { .init(.invalidIdFormat) } /// The message contains characters outside the allowed set. public static var invalidMessageContents: Self { .init(.invalidMessageContents) } - /// When the request to a queue is not HTTPS and SigV4. + /// The request was not made over HTTPS or did not use SigV4 for signing. public static var invalidSecurity: Self { .init(.invalidSecurity) } /// The caller doesn't have the required KMS access. public static var kmsAccessDenied: Self { .init(.kmsAccessDenied) } @@ -1216,17 +1216,17 @@ public struct SQSErrorType: AWSErrorType { public static var purgeQueueInProgress: Self { .init(.purgeQueueInProgress) } /// You must wait 60 seconds after deleting a queue before you can create another queue with the same name. public static var queueDeletedRecently: Self { .init(.queueDeletedRecently) } - /// The specified queue doesn't exist. + /// Ensure that the QueueUrl is correct and that the queue has not been deleted. public static var queueDoesNotExist: Self { .init(.queueDoesNotExist) } /// A queue with this name already exists. Amazon SQS returns this error only if the request includes attributes whose values differ from those of the existing queue. public static var queueNameExists: Self { .init(.queueNameExists) } /// The specified receipt handle isn't valid. public static var receiptHandleIsInvalid: Self { .init(.receiptHandleIsInvalid) } - /// The request was denied due to request throttling. The rate of requests per second exceeds the Amazon Web Services KMS request quota for an account and Region. A burst or sustained high rate of requests to change the state of the same KMS key. This condition is often known as a "hot key." Requests for operations on KMS keys in a Amazon Web Services CloudHSM key store might be throttled at a lower-than-expected rate when the Amazon Web Services CloudHSM cluster associated with the Amazon Web Services CloudHSM key store is processing numerous commands, including those unrelated to the Amazon Web Services CloudHSM key store. + /// The request was denied due to request throttling. Exceeds the permitted request rate for the queue or for the recipient of the request. Ensure that the request rate is within the Amazon SQS limits for sending messages. For more information, see Amazon SQS quotas in the Amazon SQS Developer Guide. public static var requestThrottled: Self { .init(.requestThrottled) } /// One or more specified resources don't exist. public static var resourceNotFoundException: Self { .init(.resourceNotFoundException) } - /// The batch request contains more entries than permissible. + /// The batch request contains more entries than permissible. For Amazon SQS, the maximum number of entries you can include in a single SendMessageBatch, DeleteMessageBatch, or ChangeMessageVisibilityBatch request is 10. public static var tooManyEntriesInBatchRequest: Self { .init(.tooManyEntriesInBatchRequest) } /// Error code 400. Unsupported operation. public static var unsupportedOperation: Self { .init(.unsupportedOperation) } diff --git a/Sources/Soto/Services/SSM/SSM_api.swift b/Sources/Soto/Services/SSM/SSM_api.swift index 7814915d36..0c7f191120 100644 --- a/Sources/Soto/Services/SSM/SSM_api.swift +++ b/Sources/Soto/Services/SSM/SSM_api.swift @@ -25,7 +25,7 @@ import Foundation /// Service object for interacting with AWS SSM service. /// -/// Amazon Web Services Systems Manager is the operations hub for your Amazon Web Services applications and resources and a secure end-to-end management solution for hybrid cloud environments that enables safe and secure operations at scale. This reference is intended to be used with the Amazon Web Services Systems Manager User Guide. To get started, see Setting up Amazon Web Services Systems Manager. Related resources For information about each of the capabilities that comprise Systems Manager, see Systems Manager capabilities in the Amazon Web Services Systems Manager User Guide. For details about predefined runbooks for Automation, a capability of Amazon Web Services Systems Manager, see the Systems Manager Automation runbook reference . For information about AppConfig, a capability of Systems Manager, see the AppConfig User Guide and the AppConfig API Reference . For information about Incident Manager, a capability of Systems Manager, see the Systems Manager Incident Manager User Guide and the Systems Manager Incident Manager API Reference . +/// Amazon Web Services Systems Manager is the operations hub for your Amazon Web Services applications and resources and a secure end-to-end management solution for hybrid cloud environments that enables safe and secure operations at scale. This reference is intended to be used with the Amazon Web Services Systems Manager User Guide. To get started, see Setting up Amazon Web Services Systems Manager. Related resources For information about each of the tools that comprise Systems Manager, see Using Systems Manager tools in the Amazon Web Services Systems Manager User Guide. For details about predefined runbooks for Automation, a tool in Amazon Web Services Systems Manager, see the Systems Manager Automation runbook reference . For information about AppConfig, a tool in Systems Manager, see the AppConfig User Guide and the AppConfig API Reference . For information about Incident Manager, a tool in Systems Manager, see the Systems Manager Incident Manager User Guide and the Systems Manager Incident Manager API Reference . public struct SSM: AWSService { // MARK: Member variables @@ -130,7 +130,7 @@ public struct SSM: AWSService { return try await self.addTagsToResource(input, logger: logger) } - /// Associates a related item to a Systems Manager OpsCenter OpsItem. For example, you can associate an Incident Manager incident or analysis with an OpsItem. Incident Manager and OpsCenter are capabilities of Amazon Web Services Systems Manager. + /// Associates a related item to a Systems Manager OpsCenter OpsItem. For example, you can associate an Incident Manager incident or analysis with an OpsItem. Incident Manager and OpsCenter are tools in Amazon Web Services Systems Manager. @Sendable @inlinable public func associateOpsItemRelatedItem(_ input: AssociateOpsItemRelatedItemRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> AssociateOpsItemRelatedItemResponse { @@ -143,7 +143,7 @@ public struct SSM: AWSService { logger: logger ) } - /// Associates a related item to a Systems Manager OpsCenter OpsItem. For example, you can associate an Incident Manager incident or analysis with an OpsItem. Incident Manager and OpsCenter are capabilities of Amazon Web Services Systems Manager. + /// Associates a related item to a Systems Manager OpsCenter OpsItem. For example, you can associate an Incident Manager incident or analysis with an OpsItem. Incident Manager and OpsCenter are tools in Amazon Web Services Systems Manager. /// /// Parameters: /// - associationType: The type of association that you want to create between an OpsItem and a resource. OpsCenter supports IsParentOf and RelatesTo association types. @@ -229,7 +229,7 @@ public struct SSM: AWSService { return try await self.cancelMaintenanceWindowExecution(input, logger: logger) } - /// Generates an activation code and activation ID you can use to register your on-premises servers, edge devices, or virtual machine (VM) with Amazon Web Services Systems Manager. Registering these machines with Systems Manager makes it possible to manage them using Systems Manager capabilities. You use the activation code and ID when installing SSM Agent on machines in your hybrid environment. For more information about requirements for managing on-premises machines using Systems Manager, see Using Amazon Web Services Systems Manager in hybrid and multicloud environments in the Amazon Web Services Systems Manager User Guide. Amazon Elastic Compute Cloud (Amazon EC2) instances, edge devices, and on-premises servers and VMs that are configured for Systems Manager are all called managed nodes. + /// Generates an activation code and activation ID you can use to register your on-premises servers, edge devices, or virtual machine (VM) with Amazon Web Services Systems Manager. Registering these machines with Systems Manager makes it possible to manage them using Systems Manager tools. You use the activation code and ID when installing SSM Agent on machines in your hybrid environment. For more information about requirements for managing on-premises machines using Systems Manager, see Using Amazon Web Services Systems Manager in hybrid and multicloud environments in the Amazon Web Services Systems Manager User Guide. Amazon Elastic Compute Cloud (Amazon EC2) instances, edge devices, and on-premises servers and VMs that are configured for Systems Manager are all called managed nodes. @Sendable @inlinable public func createActivation(_ input: CreateActivationRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateActivationResult { @@ -242,7 +242,7 @@ public struct SSM: AWSService { logger: logger ) } - /// Generates an activation code and activation ID you can use to register your on-premises servers, edge devices, or virtual machine (VM) with Amazon Web Services Systems Manager. Registering these machines with Systems Manager makes it possible to manage them using Systems Manager capabilities. You use the activation code and ID when installing SSM Agent on machines in your hybrid environment. For more information about requirements for managing on-premises machines using Systems Manager, see Using Amazon Web Services Systems Manager in hybrid and multicloud environments in the Amazon Web Services Systems Manager User Guide. Amazon Elastic Compute Cloud (Amazon EC2) instances, edge devices, and on-premises servers and VMs that are configured for Systems Manager are all called managed nodes. + /// Generates an activation code and activation ID you can use to register your on-premises servers, edge devices, or virtual machine (VM) with Amazon Web Services Systems Manager. Registering these machines with Systems Manager makes it possible to manage them using Systems Manager tools. You use the activation code and ID when installing SSM Agent on machines in your hybrid environment. For more information about requirements for managing on-premises machines using Systems Manager, see Using Amazon Web Services Systems Manager in hybrid and multicloud environments in the Amazon Web Services Systems Manager User Guide. Amazon Elastic Compute Cloud (Amazon EC2) instances, edge devices, and on-premises servers and VMs that are configured for Systems Manager are all called managed nodes. /// /// Parameters: /// - defaultInstanceName: The name of the registered, managed node as it will appear in the Amazon Web Services Systems Manager console or when you use the Amazon Web Services command line tools to list Systems Manager resources. Don't enter personally identifiable information in this field. @@ -276,7 +276,7 @@ public struct SSM: AWSService { return try await self.createActivation(input, logger: logger) } - /// A State Manager association defines the state that you want to maintain on your managed nodes. For example, an association can specify that anti-virus software must be installed and running on your managed nodes, or that certain ports must be closed. For static targets, the association specifies a schedule for when the configuration is reapplied. For dynamic targets, such as an Amazon Web Services resource group or an Amazon Web Services autoscaling group, State Manager, a capability of Amazon Web Services Systems Manager applies the configuration when new managed nodes are added to the group. The association also specifies actions to take when applying the configuration. For example, an association for anti-virus software might run once a day. If the software isn't installed, then State Manager installs it. If the software is installed, but the service isn't running, then the association might instruct State Manager to start the service. + /// A State Manager association defines the state that you want to maintain on your managed nodes. For example, an association can specify that anti-virus software must be installed and running on your managed nodes, or that certain ports must be closed. For static targets, the association specifies a schedule for when the configuration is reapplied. For dynamic targets, such as an Amazon Web Services resource group or an Amazon Web Services autoscaling group, State Manager, a tool in Amazon Web Services Systems Manager applies the configuration when new managed nodes are added to the group. The association also specifies actions to take when applying the configuration. For example, an association for anti-virus software might run once a day. If the software isn't installed, then State Manager installs it. If the software is installed, but the service isn't running, then the association might instruct State Manager to start the service. @Sendable @inlinable public func createAssociation(_ input: CreateAssociationRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateAssociationResult { @@ -289,13 +289,13 @@ public struct SSM: AWSService { logger: logger ) } - /// A State Manager association defines the state that you want to maintain on your managed nodes. For example, an association can specify that anti-virus software must be installed and running on your managed nodes, or that certain ports must be closed. For static targets, the association specifies a schedule for when the configuration is reapplied. For dynamic targets, such as an Amazon Web Services resource group or an Amazon Web Services autoscaling group, State Manager, a capability of Amazon Web Services Systems Manager applies the configuration when new managed nodes are added to the group. The association also specifies actions to take when applying the configuration. For example, an association for anti-virus software might run once a day. If the software isn't installed, then State Manager installs it. If the software is installed, but the service isn't running, then the association might instruct State Manager to start the service. + /// A State Manager association defines the state that you want to maintain on your managed nodes. For example, an association can specify that anti-virus software must be installed and running on your managed nodes, or that certain ports must be closed. For static targets, the association specifies a schedule for when the configuration is reapplied. For dynamic targets, such as an Amazon Web Services resource group or an Amazon Web Services autoscaling group, State Manager, a tool in Amazon Web Services Systems Manager applies the configuration when new managed nodes are added to the group. The association also specifies actions to take when applying the configuration. For example, an association for anti-virus software might run once a day. If the software isn't installed, then State Manager installs it. If the software is installed, but the service isn't running, then the association might instruct State Manager to start the service. /// /// Parameters: /// - alarmConfiguration: /// - applyOnlyAtCronInterval: By default, when you create a new association, the system runs it immediately after it is created and then according to the schedule you specified. Specify this option if you don't want an association to run immediately after you create it. This parameter isn't supported for rate expressions. /// - associationName: Specify a descriptive name for the association. - /// - automationTargetParameterName: Choose the parameter that will define how your automation will branch out. This target is required for associations that use an Automation runbook and target resources by using rate controls. Automation is a capability of Amazon Web Services Systems Manager. + /// - automationTargetParameterName: Choose the parameter that will define how your automation will branch out. This target is required for associations that use an Automation runbook and target resources by using rate controls. Automation is a tool in Amazon Web Services Systems Manager. /// - calendarNames: The names or Amazon Resource Names (ARNs) of the Change Calendar type documents you want to gate your associations under. The associations only run when that change calendar is open. For more information, see Amazon Web Services Systems Manager Change Calendar. /// - complianceSeverity: The severity level to assign to the association. /// - documentVersion: The document version you want to associate with the targets. Can be a specific version or the default version. State Manager doesn't support running associations that use a new version of a document if that document is shared from another account. State Manager always runs the default version of a document if shared from another account, even though the Systems Manager console shows that a new version was processed. If you want to run an association using a new version of a document shared form another account, you must set the document version to default. @@ -2460,7 +2460,7 @@ public struct SSM: AWSService { return try await self.describeSessions(input, logger: logger) } - /// Deletes the association between an OpsItem and a related item. For example, this API operation can delete an Incident Manager incident from an OpsItem. Incident Manager is a capability of Amazon Web Services Systems Manager. + /// Deletes the association between an OpsItem and a related item. For example, this API operation can delete an Incident Manager incident from an OpsItem. Incident Manager is a tool in Amazon Web Services Systems Manager. @Sendable @inlinable public func disassociateOpsItemRelatedItem(_ input: DisassociateOpsItemRelatedItemRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DisassociateOpsItemRelatedItemResponse { @@ -2473,7 +2473,7 @@ public struct SSM: AWSService { logger: logger ) } - /// Deletes the association between an OpsItem and a related item. For example, this API operation can delete an Incident Manager incident from an OpsItem. Incident Manager is a capability of Amazon Web Services Systems Manager. + /// Deletes the association between an OpsItem and a related item. For example, this API operation can delete an Incident Manager incident from an OpsItem. Incident Manager is a tool in Amazon Web Services Systems Manager. /// /// Parameters: /// - associationId: The ID of the association for which you want to delete an association between the OpsItem and a related item. @@ -2521,7 +2521,7 @@ public struct SSM: AWSService { return try await self.getAutomationExecution(input, logger: logger) } - /// Gets the state of a Amazon Web Services Systems Manager change calendar at the current time or a specified time. If you specify a time, GetCalendarState returns the state of the calendar at that specific time, and returns the next time that the change calendar state will transition. If you don't specify a time, GetCalendarState uses the current time. Change Calendar entries have two possible states: OPEN or CLOSED. If you specify more than one calendar in a request, the command returns the status of OPEN only if all calendars in the request are open. If one or more calendars in the request are closed, the status returned is CLOSED. For more information about Change Calendar, a capability of Amazon Web Services Systems Manager, see Amazon Web Services Systems Manager Change Calendar in the Amazon Web Services Systems Manager User Guide. + /// Gets the state of a Amazon Web Services Systems Manager change calendar at the current time or a specified time. If you specify a time, GetCalendarState returns the state of the calendar at that specific time, and returns the next time that the change calendar state will transition. If you don't specify a time, GetCalendarState uses the current time. Change Calendar entries have two possible states: OPEN or CLOSED. If you specify more than one calendar in a request, the command returns the status of OPEN only if all calendars in the request are open. If one or more calendars in the request are closed, the status returned is CLOSED. For more information about Change Calendar, a tool in Amazon Web Services Systems Manager, see Amazon Web Services Systems Manager Change Calendar in the Amazon Web Services Systems Manager User Guide. @Sendable @inlinable public func getCalendarState(_ input: GetCalendarStateRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetCalendarStateResponse { @@ -2534,7 +2534,7 @@ public struct SSM: AWSService { logger: logger ) } - /// Gets the state of a Amazon Web Services Systems Manager change calendar at the current time or a specified time. If you specify a time, GetCalendarState returns the state of the calendar at that specific time, and returns the next time that the change calendar state will transition. If you don't specify a time, GetCalendarState uses the current time. Change Calendar entries have two possible states: OPEN or CLOSED. If you specify more than one calendar in a request, the command returns the status of OPEN only if all calendars in the request are open. If one or more calendars in the request are closed, the status returned is CLOSED. For more information about Change Calendar, a capability of Amazon Web Services Systems Manager, see Amazon Web Services Systems Manager Change Calendar in the Amazon Web Services Systems Manager User Guide. + /// Gets the state of a Amazon Web Services Systems Manager change calendar at the current time or a specified time. If you specify a time, GetCalendarState returns the state of the calendar at that specific time, and returns the next time that the change calendar state will transition. If you don't specify a time, GetCalendarState uses the current time. Change Calendar entries have two possible states: OPEN or CLOSED. If you specify more than one calendar in a request, the command returns the status of OPEN only if all calendars in the request are open. If one or more calendars in the request are closed, the status returned is CLOSED. For more information about Change Calendar, a tool in Amazon Web Services Systems Manager, see Amazon Web Services Systems Manager Change Calendar in the Amazon Web Services Systems Manager User Guide. /// /// Parameters: /// - atTime: (Optional) The specific time for which you want to get calendar state information, in ISO 8601 format. If you don't specify a value or AtTime, the current time is used. @@ -2646,7 +2646,7 @@ public struct SSM: AWSService { return try await self.getDefaultPatchBaseline(input, logger: logger) } - /// Retrieves the current snapshot for the patch baseline the managed node uses. This API is primarily used by the AWS-RunPatchBaseline Systems Manager document (SSM document). If you run the command locally, such as with the Command Line Interface (CLI), the system attempts to use your local Amazon Web Services credentials and the operation fails. To avoid this, you can run the command in the Amazon Web Services Systems Manager console. Use Run Command, a capability of Amazon Web Services Systems Manager, with an SSM document that enables you to target a managed node with a script or command. For example, run the command using the AWS-RunShellScript document or the AWS-RunPowerShellScript document. + /// Retrieves the current snapshot for the patch baseline the managed node uses. This API is primarily used by the AWS-RunPatchBaseline Systems Manager document (SSM document). If you run the command locally, such as with the Command Line Interface (CLI), the system attempts to use your local Amazon Web Services credentials and the operation fails. To avoid this, you can run the command in the Amazon Web Services Systems Manager console. Use Run Command, a tool in Amazon Web Services Systems Manager, with an SSM document that enables you to target a managed node with a script or command. For example, run the command using the AWS-RunShellScript document or the AWS-RunPowerShellScript document. @Sendable @inlinable public func getDeployablePatchSnapshotForInstance(_ input: GetDeployablePatchSnapshotForInstanceRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetDeployablePatchSnapshotForInstanceResult { @@ -2659,7 +2659,7 @@ public struct SSM: AWSService { logger: logger ) } - /// Retrieves the current snapshot for the patch baseline the managed node uses. This API is primarily used by the AWS-RunPatchBaseline Systems Manager document (SSM document). If you run the command locally, such as with the Command Line Interface (CLI), the system attempts to use your local Amazon Web Services credentials and the operation fails. To avoid this, you can run the command in the Amazon Web Services Systems Manager console. Use Run Command, a capability of Amazon Web Services Systems Manager, with an SSM document that enables you to target a managed node with a script or command. For example, run the command using the AWS-RunShellScript document or the AWS-RunPowerShellScript document. + /// Retrieves the current snapshot for the patch baseline the managed node uses. This API is primarily used by the AWS-RunPatchBaseline Systems Manager document (SSM document). If you run the command locally, such as with the Command Line Interface (CLI), the system attempts to use your local Amazon Web Services credentials and the operation fails. To avoid this, you can run the command in the Amazon Web Services Systems Manager console. Use Run Command, a tool in Amazon Web Services Systems Manager, with an SSM document that enables you to target a managed node with a script or command. For example, run the command using the AWS-RunShellScript document or the AWS-RunPowerShellScript document. /// /// Parameters: /// - baselineOverride: Defines the basic information about a patch baseline override. @@ -3200,7 +3200,7 @@ public struct SSM: AWSService { return try await self.getParameters(input, logger: logger) } - /// Retrieve information about one or more parameters in a specific hierarchy. Request results are returned on a best-effort basis. If you specify MaxResults in the request, the response includes information up to the limit specified. The number of items returned, however, can be between zero and the value of MaxResults. If the service reaches an internal limit while processing the results, it stops the operation and returns the matching values up to that point and a NextToken. You can specify the NextToken in a subsequent call to get the next set of results. + /// Retrieve information about one or more parameters under a specified level in a hierarchy. Request results are returned on a best-effort basis. If you specify MaxResults in the request, the response includes information up to the limit specified. The number of items returned, however, can be between zero and the value of MaxResults. If the service reaches an internal limit while processing the results, it stops the operation and returns the matching values up to that point and a NextToken. You can specify the NextToken in a subsequent call to get the next set of results. @Sendable @inlinable public func getParametersByPath(_ input: GetParametersByPathRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetParametersByPathResult { @@ -3213,7 +3213,7 @@ public struct SSM: AWSService { logger: logger ) } - /// Retrieve information about one or more parameters in a specific hierarchy. Request results are returned on a best-effort basis. If you specify MaxResults in the request, the response includes information up to the limit specified. The number of items returned, however, can be between zero and the value of MaxResults. If the service reaches an internal limit while processing the results, it stops the operation and returns the matching values up to that point and a NextToken. You can specify the NextToken in a subsequent call to get the next set of results. + /// Retrieve information about one or more parameters under a specified level in a hierarchy. Request results are returned on a best-effort basis. If you specify MaxResults in the request, the response includes information up to the limit specified. The number of items returned, however, can be between zero and the value of MaxResults. If the service reaches an internal limit while processing the results, it stops the operation and returns the matching values up to that point and a NextToken. You can specify the NextToken in a subsequent call to get the next set of results. /// /// Parameters: /// - maxResults: The maximum number of items to return for this call. The call also returns a token that you can specify in a subsequent call to get the next set of results. @@ -3439,7 +3439,7 @@ public struct SSM: AWSService { return try await self.listAssociationVersions(input, logger: logger) } - /// Returns all State Manager associations in the current Amazon Web Services account and Amazon Web Services Region. You can limit the results to a specific State Manager association document or managed node by specifying a filter. State Manager is a capability of Amazon Web Services Systems Manager. + /// Returns all State Manager associations in the current Amazon Web Services account and Amazon Web Services Region. You can limit the results to a specific State Manager association document or managed node by specifying a filter. State Manager is a tool in Amazon Web Services Systems Manager. @Sendable @inlinable public func listAssociations(_ input: ListAssociationsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListAssociationsResult { @@ -3452,7 +3452,7 @@ public struct SSM: AWSService { logger: logger ) } - /// Returns all State Manager associations in the current Amazon Web Services account and Amazon Web Services Region. You can limit the results to a specific State Manager association document or managed node by specifying a filter. State Manager is a capability of Amazon Web Services Systems Manager. + /// Returns all State Manager associations in the current Amazon Web Services account and Amazon Web Services Region. You can limit the results to a specific State Manager association document or managed node by specifying a filter. State Manager is a tool in Amazon Web Services Systems Manager. /// /// Parameters: /// - associationFilterList: One or more filters. Use a filter to return a more specific list of results. Filtering associations using the InstanceID attribute only returns legacy associations created using the InstanceID attribute. Associations targeting the managed node that are part of the Target Attributes ResourceGroup or Tags aren't returned. @@ -3904,7 +3904,7 @@ public struct SSM: AWSService { return try await self.listOpsItemEvents(input, logger: logger) } - /// Lists all related-item resources associated with a Systems Manager OpsCenter OpsItem. OpsCenter is a capability of Amazon Web Services Systems Manager. + /// Lists all related-item resources associated with a Systems Manager OpsCenter OpsItem. OpsCenter is a tool in Amazon Web Services Systems Manager. @Sendable @inlinable public func listOpsItemRelatedItems(_ input: ListOpsItemRelatedItemsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListOpsItemRelatedItemsResponse { @@ -3917,7 +3917,7 @@ public struct SSM: AWSService { logger: logger ) } - /// Lists all related-item resources associated with a Systems Manager OpsCenter OpsItem. OpsCenter is a capability of Amazon Web Services Systems Manager. + /// Lists all related-item resources associated with a Systems Manager OpsCenter OpsItem. OpsCenter is a tool in Amazon Web Services Systems Manager. /// /// Parameters: /// - filters: One or more OpsItem filters. Use a filter to return a more specific list of results. @@ -4221,11 +4221,11 @@ public struct SSM: AWSService { /// - keyId: The Key Management Service (KMS) ID that you want to use to encrypt a parameter. Use a custom key for better security. Required for parameters that use the SecureString data type. If you don't specify a key ID, the system uses the default key associated with your Amazon Web Services account which is not as secure as using a custom key. To use a custom KMS key, choose the SecureString data type with the Key ID parameter. /// - name: The fully qualified name of the parameter that you want to add to the system. You can't enter the Amazon Resource Name (ARN) for a parameter, only the parameter name itself. The fully qualified name includes the complete hierarchy of the parameter path and name. For parameters in a hierarchy, you must include a leading forward slash character (/) when you create or reference a parameter. For example: /Dev/DBServer/MySQL/db-string13 Naming Constraints: Parameter names are case sensitive. A parameter name must be unique within an Amazon Web Services Region A parameter name can't be prefixed with "aws" or "ssm" (case-insensitive). Parameter names can include only the following symbols and letters: a-zA-Z0-9_.- In addition, the slash character ( / ) is used to delineate hierarchies in parameter names. For example: /Dev/Production/East/Project-ABC/MyParameter A parameter name can't include spaces. Parameter hierarchies are limited to a maximum depth of fifteen levels. For additional information about valid values for parameter names, see Creating Systems Manager parameters in the Amazon Web Services Systems Manager User Guide. The maximum length constraint of 2048 characters listed below includes 1037 characters reserved for internal use by Systems Manager. The maximum length for a parameter name that you create is 1011 characters. This includes the characters in the ARN that precede the name you specify, such as arn:aws:ssm:us-east-2:111122223333:parameter/. /// - overwrite: Overwrite an existing parameter. The default value is false. - /// - policies: One or more policies to apply to a parameter. This operation takes a JSON array. Parameter Store, a capability of Amazon Web Services Systems Manager supports the following policy types: Expiration: This policy deletes the parameter after it expires. When you create the policy, you specify the expiration date. You can update the expiration date and time by updating the policy. Updating the parameter doesn't affect the expiration date and time. When the expiration time is reached, Parameter Store deletes the parameter. ExpirationNotification: This policy initiates an event in Amazon CloudWatch Events that notifies you about the expiration. By using this policy, you can receive notification before or after the expiration time is reached, in units of days or hours. NoChangeNotification: This policy initiates a CloudWatch Events event if a parameter hasn't been modified for a specified period of time. This policy type is useful when, for example, a secret needs to be changed within a period of time, but it hasn't been changed. All existing policies are preserved until you send new policies or an empty policy. For more information about parameter policies, see Assigning parameter policies. + /// - policies: One or more policies to apply to a parameter. This operation takes a JSON array. Parameter Store, a tool in Amazon Web Services Systems Manager supports the following policy types: Expiration: This policy deletes the parameter after it expires. When you create the policy, you specify the expiration date. You can update the expiration date and time by updating the policy. Updating the parameter doesn't affect the expiration date and time. When the expiration time is reached, Parameter Store deletes the parameter. ExpirationNotification: This policy initiates an event in Amazon CloudWatch Events that notifies you about the expiration. By using this policy, you can receive notification before or after the expiration time is reached, in units of days or hours. NoChangeNotification: This policy initiates a CloudWatch Events event if a parameter hasn't been modified for a specified period of time. This policy type is useful when, for example, a secret needs to be changed within a period of time, but it hasn't been changed. All existing policies are preserved until you send new policies or an empty policy. For more information about parameter policies, see Assigning parameter policies. /// - tags: Optional metadata that you assign to a resource. Tags enable you to categorize a resource in different ways, such as by purpose, owner, or environment. For example, you might want to tag a Systems Manager parameter to identify the type of resource to which it applies, the environment, or the type of configuration data referenced by the parameter. In this case, you could specify the following key-value pairs: Key=Resource,Value=S3bucket Key=OS,Value=Windows Key=ParameterType,Value=LicenseKey To add tags to an existing Systems Manager parameter, use the AddTagsToResource operation. /// - tier: The parameter tier to assign to a parameter. Parameter Store offers a standard tier and an advanced tier for parameters. Standard parameters have a content size limit of 4 KB and can't be configured to use parameter policies. You can create a maximum of 10,000 standard parameters for each Region in an Amazon Web Services account. Standard parameters are offered at no additional cost. Advanced parameters have a content size limit of 8 KB and can be configured to use parameter policies. You can create a maximum of 100,000 advanced parameters for each Region in an Amazon Web Services account. Advanced parameters incur a charge. For more information, see Managing parameter tiers in the Amazon Web Services Systems Manager User Guide. You can change a standard parameter to an advanced parameter any time. But you can't revert an advanced parameter to a standard parameter. Reverting an advanced parameter to a standard parameter would result in data loss because the system would truncate the size of the parameter from 8 KB to 4 KB. Reverting would also remove any policies attached to the parameter. Lastly, advanced parameters use a different form of encryption than standard parameters. If you no longer need an advanced parameter, or if you no longer want to incur charges for an advanced parameter, you must delete it and recreate it as a new standard parameter. Using the Default Tier Configuration In PutParameter requests, you can specify the tier to create the parameter in. Whenever you specify a tier in the request, Parameter Store creates or updates the parameter according to that request. However, if you don't specify a tier in a request, Parameter Store assigns the tier based on the current Parameter Store default tier configuration. The default tier when you begin using Parameter Store is the standard-parameter tier. If you use the advanced-parameter tier, you can specify one of the following as the default: Advanced: With this option, Parameter Store evaluates all requests as advanced parameters. Intelligent-Tiering: With this option, Parameter Store evaluates each request to determine if the parameter is standard or advanced. If the request doesn't include any options that require an advanced parameter, the parameter is created in the standard-parameter tier. If one or more options requiring an advanced parameter are included in the request, Parameter Store create a parameter in the advanced-parameter tier. This approach helps control your parameter-related costs by always creating standard parameters unless an advanced parameter is necessary. Options that require an advanced parameter include the following: The content size of the parameter is more than 4 KB. The parameter uses a parameter policy. More than 10,000 parameters already exist in your Amazon Web Services account in the current Amazon Web Services Region. For more information about configuring the default tier option, see Specifying a default parameter tier in the Amazon Web Services Systems Manager User Guide. /// - type: The type of parameter that you want to add to the system. SecureString isn't currently supported for CloudFormation templates. Items in a StringList must be separated by a comma (,). You can't use other punctuation or special character to escape items in the list. If you have a parameter value that requires a comma, then use the String data type. Specifying a parameter type isn't required when updating a parameter. You must specify a parameter type when creating a parameter. - /// - value: The parameter value that you want to add to the system. Standard parameters have a value limit of 4 KB. Advanced parameters have a value limit of 8 KB. Parameters can't be referenced or nested in the values of other parameters. You can't include {{}} or {{ssm:parameter-name}} in a parameter value. + /// - value: The parameter value that you want to add to the system. Standard parameters have a value limit of 4 KB. Advanced parameters have a value limit of 8 KB. Parameters can't be referenced or nested in the values of other parameters. You can't include values wrapped in double brackets {{}} or {{ssm:parameter-name}} in a parameter value. /// - logger: Logger use during operation @inlinable public func putParameter( @@ -4623,7 +4623,7 @@ public struct SSM: AWSService { /// /// Parameters: /// - alarmConfiguration: The CloudWatch alarm you want to apply to your command. - /// - cloudWatchOutputConfig: Enables Amazon Web Services Systems Manager to send Run Command output to Amazon CloudWatch Logs. Run Command is a capability of Amazon Web Services Systems Manager. + /// - cloudWatchOutputConfig: Enables Amazon Web Services Systems Manager to send Run Command output to Amazon CloudWatch Logs. Run Command is a tool in Amazon Web Services Systems Manager. /// - comment: User-specified information about the command, such as a brief description of what the command should do. /// - documentHash: The Sha256 or Sha1 hash created by the system when the document was created. Sha1 hashes have been deprecated. /// - documentHashType: Sha256 or Sha1. Sha1 hashes have been deprecated. @@ -4739,7 +4739,7 @@ public struct SSM: AWSService { /// - maxErrors: The number of errors that are allowed before the system stops running the automation on additional targets. You can specify either an absolute number of errors, for example 10, or a percentage of the target set, for example 10%. If you specify 3, for example, the system stops running the automation when the fourth error is received. If you specify 0, then the system stops running the automation on additional targets after the first error result is returned. If you run an automation on 50 resources and set max-errors to 10%, then the system stops running the automation on additional targets when the sixth error is received. Executions that are already running an automation when max-errors is reached are allowed to complete, but some of these executions may fail as well. If you need to ensure that there won't be more than max-errors failed executions, set max-concurrency to 1 so the executions proceed one at a time. If this parameter and the TargetLocation:TargetsMaxErrors parameter are both supplied, TargetLocation:TargetsMaxErrors takes precedence. /// - mode: The execution mode of the automation. Valid modes include the following: Auto and Interactive. The default mode is Auto. /// - parameters: A key-value map of execution parameters, which match the declared parameters in the Automation runbook. - /// - tags: Optional metadata that you assign to a resource. You can specify a maximum of five tags for an automation. Tags enable you to categorize a resource in different ways, such as by purpose, owner, or environment. For example, you might want to tag an automation to identify an environment or operating system. In this case, you could specify the following key-value pairs: Key=environment,Value=test Key=OS,Value=Windows To add tags to an existing automation, use the AddTagsToResource operation. + /// - tags: Optional metadata that you assign to a resource. You can specify a maximum of five tags for an automation. Tags enable you to categorize a resource in different ways, such as by purpose, owner, or environment. For example, you might want to tag an automation to identify an environment or operating system. In this case, you could specify the following key-value pairs: Key=environment,Value=test Key=OS,Value=Windows The Array Members maximum value is reported as 1000. This number includes capacity reserved for internal operations. When calling the StartAutomationExecution action, you can specify a maximum of 5 tags. You can, however, use the AddTagsToResource action to add up to a total of 50 tags to an existing automation configuration. /// - targetLocations: A location is a combination of Amazon Web Services Regions and/or Amazon Web Services accounts where you want to run the automation. Use this operation to start an automation in multiple Amazon Web Services Regions and multiple Amazon Web Services accounts. For more information, see Running automations in multiple Amazon Web Services Regions and accounts in the Amazon Web Services Systems Manager User Guide. /// - targetLocationsURL: Specify a publicly accessible URL for a file that contains the TargetLocations body. Currently, only files in presigned Amazon S3 buckets are supported. /// - targetMaps: A key-value mapping of document parameters to target resources. Both Targets and TargetMaps can't be specified together. @@ -4809,7 +4809,7 @@ public struct SSM: AWSService { /// - runbooks: Information about the Automation runbooks that are run during the runbook workflow. The Automation runbooks specified for the runbook workflow can't run until all required approvals for the change request have been received. /// - scheduledEndTime: The time that the requester expects the runbook workflow related to the change request to complete. The time is an estimate only that the requester provides for reviewers. /// - scheduledTime: The date and time specified in the change request to run the Automation runbooks. The Automation runbooks specified for the runbook workflow can't run until all required approvals for the change request have been received. - /// - tags: Optional metadata that you assign to a resource. You can specify a maximum of five tags for a change request. Tags enable you to categorize a resource in different ways, such as by purpose, owner, or environment. For example, you might want to tag a change request to identify an environment or target Amazon Web Services Region. In this case, you could specify the following key-value pairs: Key=Environment,Value=Production Key=Region,Value=us-east-2 + /// - tags: Optional metadata that you assign to a resource. You can specify a maximum of five tags for a change request. Tags enable you to categorize a resource in different ways, such as by purpose, owner, or environment. For example, you might want to tag a change request to identify an environment or target Amazon Web Services Region. In this case, you could specify the following key-value pairs: Key=Environment,Value=Production Key=Region,Value=us-east-2 The Array Members maximum value is reported as 1000. This number includes capacity reserved for internal operations. When calling the StartChangeRequestExecution action, you can specify a maximum of 5 tags. You can, however, use the AddTagsToResource action to add up to a total of 50 tags to an existing change request configuration. /// - logger: Logger use during operation @inlinable public func startChangeRequestExecution( @@ -4894,7 +4894,7 @@ public struct SSM: AWSService { /// /// Parameters: /// - documentName: The name of the SSM document you want to use to define the type of session, input parameters, or preferences for the session. For example, SSM-SessionManagerRunShell. You can call the GetDocument API to verify the document exists before attempting to start a session. If no document name is provided, a shell to the managed node is launched by default. For more information, see Start a session in the Amazon Web Services Systems Manager User Guide. - /// - parameters: The values you want to specify for the parameters defined in the Session document. + /// - parameters: The values you want to specify for the parameters defined in the Session document. For more information about these parameters, see Create a Session Manager preferences document in the Amazon Web Services Systems Manager User Guide. /// - reason: The reason for connecting to the instance. This value is included in the details for the Amazon CloudWatch Events event created when you start the session. /// - target: The managed node to connect to for the session. /// - logger: Logger use during operation @@ -5032,7 +5032,7 @@ public struct SSM: AWSService { /// - associationId: The ID of the association you want to update. /// - associationName: The name of the association that you want to update. /// - associationVersion: This parameter is provided for concurrency control purposes. You must specify the latest association version in the service. If you want to ensure that this request succeeds, either specify $LATEST, or omit this parameter. - /// - automationTargetParameterName: Choose the parameter that will define how your automation will branch out. This target is required for associations that use an Automation runbook and target resources by using rate controls. Automation is a capability of Amazon Web Services Systems Manager. + /// - automationTargetParameterName: Choose the parameter that will define how your automation will branch out. This target is required for associations that use an Automation runbook and target resources by using rate controls. Automation is a tool in Amazon Web Services Systems Manager. /// - calendarNames: The names or Amazon Resource Names (ARNs) of the Change Calendar type documents you want to gate your associations under. The associations only run when that change calendar is open. For more information, see Amazon Web Services Systems Manager Change Calendar. /// - complianceSeverity: The severity level to assign to the association. /// - documentVersion: The document version you want update for the association. State Manager doesn't support running associations that use a new version of a document if that document is shared from another account. State Manager always runs the default version of a document if shared from another account, even though the Systems Manager console shows that a new version was processed. If you want to run an association using a new version of a document shared form another account, you must set the document version to default. @@ -5041,10 +5041,10 @@ public struct SSM: AWSService { /// - maxErrors: The number of errors that are allowed before the system stops sending requests to run the association on additional targets. You can specify either an absolute number of errors, for example 10, or a percentage of the target set, for example 10%. If you specify 3, for example, the system stops sending requests when the fourth error is received. If you specify 0, then the system stops sending requests after the first error is returned. If you run an association on 50 managed nodes and set MaxError to 10%, then the system stops sending the request when the sixth error is received. Executions that are already running an association when MaxErrors is reached are allowed to complete, but some of these executions may fail as well. If you need to ensure that there won't be more than max-errors failed executions, set MaxConcurrency to 1 so that executions proceed one at a time. /// - name: The name of the SSM Command document or Automation runbook that contains the configuration information for the managed node. You can specify Amazon Web Services-predefined documents, documents you created, or a document that is shared with you from another account. For Systems Manager document (SSM document) that are shared with you from other Amazon Web Services accounts, you must specify the complete SSM document ARN, in the following format: arn:aws:ssm:region:account-id:document/document-name For example: arn:aws:ssm:us-east-2:12345678912:document/My-Shared-Document For Amazon Web Services-predefined documents and SSM documents you created in your account, you only need to specify the document name. For example, AWS-ApplyPatchBaseline or My-Document. /// - outputLocation: An S3 bucket where you want to store the results of this request. - /// - parameters: The parameters you want to update for the association. If you create a parameter using Parameter Store, a capability of Amazon Web Services Systems Manager, you can reference the parameter using {{ssm:parameter-name}}. + /// - parameters: The parameters you want to update for the association. If you create a parameter using Parameter Store, a tool in Amazon Web Services Systems Manager, you can reference the parameter using {{ssm:parameter-name}}. /// - scheduleExpression: The cron expression used to schedule the association that you want to update. /// - scheduleOffset: Number of days to wait after the scheduled day to run an association. For example, if you specified a cron schedule of cron(0 0 ? * THU#2 *), you could specify an offset of 3 to run the association each Sunday after the second Thursday of the month. For more information about cron schedules for associations, see Reference: Cron and rate expressions for Systems Manager in the Amazon Web Services Systems Manager User Guide. To use offsets, you must specify the ApplyOnlyAtCronInterval parameter. This option tells the system not to run an association immediately after you create it. - /// - syncCompliance: The mode for generating association compliance. You can specify AUTO or MANUAL. In AUTO mode, the system uses the status of the association execution to determine the compliance status. If the association execution runs successfully, then the association is COMPLIANT. If the association execution doesn't run successfully, the association is NON-COMPLIANT. In MANUAL mode, you must specify the AssociationId as a parameter for the PutComplianceItems API operation. In this case, compliance data isn't managed by State Manager, a capability of Amazon Web Services Systems Manager. It is managed by your direct call to the PutComplianceItems API operation. By default, all associations use AUTO mode. + /// - syncCompliance: The mode for generating association compliance. You can specify AUTO or MANUAL. In AUTO mode, the system uses the status of the association execution to determine the compliance status. If the association execution runs successfully, then the association is COMPLIANT. If the association execution doesn't run successfully, the association is NON-COMPLIANT. In MANUAL mode, you must specify the AssociationId as a parameter for the PutComplianceItems API operation. In this case, compliance data isn't managed by State Manager, a tool in Amazon Web Services Systems Manager. It is managed by your direct call to the PutComplianceItems API operation. By default, all associations use AUTO mode. /// - targetLocations: A location is a combination of Amazon Web Services Regions and Amazon Web Services accounts where you want to run the association. Use this action to update an association in multiple Regions and multiple accounts. /// - targetMaps: A key-value mapping of document parameters to target resources. Both Targets and TargetMaps can't be specified together. /// - targets: The targets of the association. diff --git a/Sources/Soto/Services/SSM/SSM_shapes.swift b/Sources/Soto/Services/SSM/SSM_shapes.swift index 1238abedc2..24452ffcd7 100644 --- a/Sources/Soto/Services/SSM/SSM_shapes.swift +++ b/Sources/Soto/Services/SSM/SSM_shapes.swift @@ -1142,7 +1142,7 @@ extension SSM { public let associationName: String? /// The association version. public let associationVersion: String? - /// Choose the parameter that will define how your automation will branch out. This target is required for associations that use an Automation runbook and target resources by using rate controls. Automation is a capability of Amazon Web Services Systems Manager. + /// Choose the parameter that will define how your automation will branch out. This target is required for associations that use an Automation runbook and target resources by using rate controls. Automation is a tool in Amazon Web Services Systems Manager. public let automationTargetParameterName: String? /// The names or Amazon Resource Names (ARNs) of the Change Calendar type documents your associations are gated under. The associations only run when that change calendar is open. For more information, see Amazon Web Services Systems Manager Change Calendar. public let calendarNames: [String]? @@ -1180,7 +1180,7 @@ extension SSM { public let scheduleOffset: Int? /// The association status. public let status: AssociationStatus? - /// The mode for generating association compliance. You can specify AUTO or MANUAL. In AUTO mode, the system uses the status of the association execution to determine the compliance status. If the association execution runs successfully, then the association is COMPLIANT. If the association execution doesn't run successfully, the association is NON-COMPLIANT. In MANUAL mode, you must specify the AssociationId as a parameter for the PutComplianceItems API operation. In this case, compliance data isn't managed by State Manager, a capability of Amazon Web Services Systems Manager. It is managed by your direct call to the PutComplianceItems API operation. By default, all associations use AUTO mode. + /// The mode for generating association compliance. You can specify AUTO or MANUAL. In AUTO mode, the system uses the status of the association execution to determine the compliance status. If the association execution runs successfully, then the association is COMPLIANT. If the association execution doesn't run successfully, the association is NON-COMPLIANT. In MANUAL mode, you must specify the AssociationId as a parameter for the PutComplianceItems API operation. In this case, compliance data isn't managed by State Manager, a tool in Amazon Web Services Systems Manager. It is managed by your direct call to the PutComplianceItems API operation. By default, all associations use AUTO mode. public let syncCompliance: AssociationSyncCompliance? /// The combination of Amazon Web Services Regions and Amazon Web Services accounts where you want to run the association. public let targetLocations: [TargetLocation]? @@ -1509,7 +1509,7 @@ extension SSM { public let scheduleExpression: String? /// Number of days to wait after the scheduled day to run an association. public let scheduleOffset: Int? - /// The mode for generating association compliance. You can specify AUTO or MANUAL. In AUTO mode, the system uses the status of the association execution to determine the compliance status. If the association execution runs successfully, then the association is COMPLIANT. If the association execution doesn't run successfully, the association is NON-COMPLIANT. In MANUAL mode, you must specify the AssociationId as a parameter for the PutComplianceItems API operation. In this case, compliance data isn't managed by State Manager, a capability of Amazon Web Services Systems Manager. It is managed by your direct call to the PutComplianceItems API operation. By default, all associations use AUTO mode. + /// The mode for generating association compliance. You can specify AUTO or MANUAL. In AUTO mode, the system uses the status of the association execution to determine the compliance status. If the association execution runs successfully, then the association is COMPLIANT. If the association execution doesn't run successfully, the association is NON-COMPLIANT. In MANUAL mode, you must specify the AssociationId as a parameter for the PutComplianceItems API operation. In this case, compliance data isn't managed by State Manager, a tool in Amazon Web Services Systems Manager. It is managed by your direct call to the PutComplianceItems API operation. By default, all associations use AUTO mode. public let syncCompliance: AssociationSyncCompliance? /// The combination of Amazon Web Services Regions and Amazon Web Services accounts where you wanted to run the association when this association version was created. public let targetLocations: [TargetLocation]? @@ -2230,7 +2230,7 @@ extension SSM { public let parameters: [String: [String]]? /// The date and time the command was requested. public let requestedDateTime: Date? - /// The Identity and Access Management (IAM) service role that Run Command, a capability of Amazon Web Services Systems Manager, uses to act on your behalf when sending notifications about command status changes. + /// The Identity and Access Management (IAM) service role that Run Command, a tool in Amazon Web Services Systems Manager, uses to act on your behalf when sending notifications about command status changes. public let serviceRole: String? /// The status of the command. public let status: CommandStatus? @@ -2349,7 +2349,7 @@ extension SSM { public let notificationConfig: NotificationConfig? /// The time and date the request was sent to this managed node. public let requestedDateTime: Date? - /// The Identity and Access Management (IAM) service role that Run Command, a capability of Amazon Web Services Systems Manager, uses to act on your behalf when sending notifications about command status changes on a per managed node basis. + /// The Identity and Access Management (IAM) service role that Run Command, a tool in Amazon Web Services Systems Manager, uses to act on your behalf when sending notifications about command status changes on a per managed node basis. public let serviceRole: String? /// The URL to the plugin's StdErr file in Amazon Simple Storage Service (Amazon S3), if the S3 bucket was defined for the parent command. For an invocation, StandardErrorUrl is populated if there is just one plugin defined for the command, and the S3 bucket was defined for the command. public let standardErrorUrl: String? @@ -2740,7 +2740,7 @@ extension SSM { public let applyOnlyAtCronInterval: Bool? /// Specify a descriptive name for the association. public let associationName: String? - /// Specify the target for the association. This target is required for associations that use an Automation runbook and target resources by using rate controls. Automation is a capability of Amazon Web Services Systems Manager. + /// Specify the target for the association. This target is required for associations that use an Automation runbook and target resources by using rate controls. Automation is a tool in Amazon Web Services Systems Manager. public let automationTargetParameterName: String? /// The names or Amazon Resource Names (ARNs) of the Change Calendar type documents your associations are gated under. The associations only run when that Change Calendar is open. For more information, see Amazon Web Services Systems Manager Change Calendar. public let calendarNames: [String]? @@ -2766,7 +2766,7 @@ extension SSM { public let scheduleExpression: String? /// Number of days to wait after the scheduled day to run an association. public let scheduleOffset: Int? - /// The mode for generating association compliance. You can specify AUTO or MANUAL. In AUTO mode, the system uses the status of the association execution to determine the compliance status. If the association execution runs successfully, then the association is COMPLIANT. If the association execution doesn't run successfully, the association is NON-COMPLIANT. In MANUAL mode, you must specify the AssociationId as a parameter for the PutComplianceItems API operation. In this case, compliance data isn't managed by State Manager, a capability of Amazon Web Services Systems Manager. It is managed by your direct call to the PutComplianceItems API operation. By default, all associations use AUTO mode. + /// The mode for generating association compliance. You can specify AUTO or MANUAL. In AUTO mode, the system uses the status of the association execution to determine the compliance status. If the association execution runs successfully, then the association is COMPLIANT. If the association execution doesn't run successfully, the association is NON-COMPLIANT. In MANUAL mode, you must specify the AssociationId as a parameter for the PutComplianceItems API operation. In this case, compliance data isn't managed by State Manager, a tool in Amazon Web Services Systems Manager. It is managed by your direct call to the PutComplianceItems API operation. By default, all associations use AUTO mode. public let syncCompliance: AssociationSyncCompliance? /// Use this action to create an association in multiple Regions and multiple accounts. public let targetLocations: [TargetLocation]? @@ -2884,7 +2884,7 @@ extension SSM { public let applyOnlyAtCronInterval: Bool? /// Specify a descriptive name for the association. public let associationName: String? - /// Choose the parameter that will define how your automation will branch out. This target is required for associations that use an Automation runbook and target resources by using rate controls. Automation is a capability of Amazon Web Services Systems Manager. + /// Choose the parameter that will define how your automation will branch out. This target is required for associations that use an Automation runbook and target resources by using rate controls. Automation is a tool in Amazon Web Services Systems Manager. public let automationTargetParameterName: String? /// The names or Amazon Resource Names (ARNs) of the Change Calendar type documents you want to gate your associations under. The associations only run when that change calendar is open. For more information, see Amazon Web Services Systems Manager Change Calendar. public let calendarNames: [String]? @@ -5662,7 +5662,7 @@ extension SSM { public let instancesWithOtherNonCompliantPatches: Int? /// The number of managed nodes where patches that are specified as Security in a patch advisory aren't installed. These patches might be missing, have failed installation, were rejected, or were installed but awaiting a required managed node reboot. The status of these managed nodes is NON_COMPLIANT. public let instancesWithSecurityNonCompliantPatches: Int? - /// The number of managed nodes with NotApplicable patches beyond the supported limit, which aren't reported by name to Inventory. Inventory is a capability of Amazon Web Services Systems Manager. + /// The number of managed nodes with NotApplicable patches beyond the supported limit, which aren't reported by name to Inventory. Inventory is a tool in Amazon Web Services Systems Manager. public let instancesWithUnreportedNotApplicablePatches: Int? @inlinable @@ -8390,7 +8390,7 @@ extension SSM { public let securityNonCompliantCount: Int? /// The ID of the patch baseline snapshot used during the patching operation when this compliance data was collected. public let snapshotId: String? - /// The number of patches beyond the supported limit of NotApplicableCount that aren't reported by name to Inventory. Inventory is a capability of Amazon Web Services Systems Manager. + /// The number of patches beyond the supported limit of NotApplicableCount that aren't reported by name to Inventory. Inventory is a tool in Amazon Web Services Systems Manager. public let unreportedNotApplicableCount: Int? @inlinable @@ -11531,7 +11531,7 @@ extension SSM { public let policyStatus: String? /// The JSON text of the policy. public let policyText: String? - /// The type of policy. Parameter Store, a capability of Amazon Web Services Systems Manager, supports the following policy types: Expiration, ExpirationNotification, and NoChangeNotification. + /// The type of policy. Parameter Store, a tool in Amazon Web Services Systems Manager, supports the following policy types: Expiration, ExpirationNotification, and NoChangeNotification. public let policyType: String? @inlinable @@ -12219,7 +12219,7 @@ extension SSM { public let name: String /// Overwrite an existing parameter. The default value is false. public let overwrite: Bool? - /// One or more policies to apply to a parameter. This operation takes a JSON array. Parameter Store, a capability of Amazon Web Services Systems Manager supports the following policy types: Expiration: This policy deletes the parameter after it expires. When you create the policy, you specify the expiration date. You can update the expiration date and time by updating the policy. Updating the parameter doesn't affect the expiration date and time. When the expiration time is reached, Parameter Store deletes the parameter. ExpirationNotification: This policy initiates an event in Amazon CloudWatch Events that notifies you about the expiration. By using this policy, you can receive notification before or after the expiration time is reached, in units of days or hours. NoChangeNotification: This policy initiates a CloudWatch Events event if a parameter hasn't been modified for a specified period of time. This policy type is useful when, for example, a secret needs to be changed within a period of time, but it hasn't been changed. All existing policies are preserved until you send new policies or an empty policy. For more information about parameter policies, see Assigning parameter policies. + /// One or more policies to apply to a parameter. This operation takes a JSON array. Parameter Store, a tool in Amazon Web Services Systems Manager supports the following policy types: Expiration: This policy deletes the parameter after it expires. When you create the policy, you specify the expiration date. You can update the expiration date and time by updating the policy. Updating the parameter doesn't affect the expiration date and time. When the expiration time is reached, Parameter Store deletes the parameter. ExpirationNotification: This policy initiates an event in Amazon CloudWatch Events that notifies you about the expiration. By using this policy, you can receive notification before or after the expiration time is reached, in units of days or hours. NoChangeNotification: This policy initiates a CloudWatch Events event if a parameter hasn't been modified for a specified period of time. This policy type is useful when, for example, a secret needs to be changed within a period of time, but it hasn't been changed. All existing policies are preserved until you send new policies or an empty policy. For more information about parameter policies, see Assigning parameter policies. public let policies: String? /// Optional metadata that you assign to a resource. Tags enable you to categorize a resource in different ways, such as by purpose, owner, or environment. For example, you might want to tag a Systems Manager parameter to identify the type of resource to which it applies, the environment, or the type of configuration data referenced by the parameter. In this case, you could specify the following key-value pairs: Key=Resource,Value=S3bucket Key=OS,Value=Windows Key=ParameterType,Value=LicenseKey To add tags to an existing Systems Manager parameter, use the AddTagsToResource operation. public let tags: [Tag]? @@ -12227,7 +12227,7 @@ extension SSM { public let tier: ParameterTier? /// The type of parameter that you want to add to the system. SecureString isn't currently supported for CloudFormation templates. Items in a StringList must be separated by a comma (,). You can't use other punctuation or special character to escape items in the list. If you have a parameter value that requires a comma, then use the String data type. Specifying a parameter type isn't required when updating a parameter. You must specify a parameter type when creating a parameter. public let type: ParameterType? - /// The parameter value that you want to add to the system. Standard parameters have a value limit of 4 KB. Advanced parameters have a value limit of 8 KB. Parameters can't be referenced or nested in the values of other parameters. You can't include {{}} or {{ssm:parameter-name}} in a parameter value. + /// The parameter value that you want to add to the system. Standard parameters have a value limit of 4 KB. Advanced parameters have a value limit of 8 KB. Parameters can't be referenced or nested in the values of other parameters. You can't include values wrapped in double brackets {{}} or {{ssm:parameter-name}} in a parameter value. public let value: String @inlinable @@ -13295,7 +13295,7 @@ extension SSM { public struct SendCommandRequest: AWSEncodableShape { /// The CloudWatch alarm you want to apply to your command. public let alarmConfiguration: AlarmConfiguration? - /// Enables Amazon Web Services Systems Manager to send Run Command output to Amazon CloudWatch Logs. Run Command is a capability of Amazon Web Services Systems Manager. + /// Enables Amazon Web Services Systems Manager to send Run Command output to Amazon CloudWatch Logs. Run Command is a tool in Amazon Web Services Systems Manager. public let cloudWatchOutputConfig: CloudWatchOutputConfig? /// User-specified information about the command, such as a brief description of what the command should do. public let comment: String? @@ -13624,7 +13624,7 @@ extension SSM { public let mode: ExecutionMode? /// A key-value map of execution parameters, which match the declared parameters in the Automation runbook. public let parameters: [String: [String]]? - /// Optional metadata that you assign to a resource. You can specify a maximum of five tags for an automation. Tags enable you to categorize a resource in different ways, such as by purpose, owner, or environment. For example, you might want to tag an automation to identify an environment or operating system. In this case, you could specify the following key-value pairs: Key=environment,Value=test Key=OS,Value=Windows To add tags to an existing automation, use the AddTagsToResource operation. + /// Optional metadata that you assign to a resource. You can specify a maximum of five tags for an automation. Tags enable you to categorize a resource in different ways, such as by purpose, owner, or environment. For example, you might want to tag an automation to identify an environment or operating system. In this case, you could specify the following key-value pairs: Key=environment,Value=test Key=OS,Value=Windows The Array Members maximum value is reported as 1000. This number includes capacity reserved for internal operations. When calling the StartAutomationExecution action, you can specify a maximum of 5 tags. You can, however, use the AddTagsToResource action to add up to a total of 50 tags to an existing automation configuration. public let tags: [Tag]? /// A location is a combination of Amazon Web Services Regions and/or Amazon Web Services accounts where you want to run the automation. Use this operation to start an automation in multiple Amazon Web Services Regions and multiple Amazon Web Services accounts. For more information, see Running automations in multiple Amazon Web Services Regions and accounts in the Amazon Web Services Systems Manager User Guide. public let targetLocations: [TargetLocation]? @@ -13751,7 +13751,7 @@ extension SSM { public let scheduledEndTime: Date? /// The date and time specified in the change request to run the Automation runbooks. The Automation runbooks specified for the runbook workflow can't run until all required approvals for the change request have been received. public let scheduledTime: Date? - /// Optional metadata that you assign to a resource. You can specify a maximum of five tags for a change request. Tags enable you to categorize a resource in different ways, such as by purpose, owner, or environment. For example, you might want to tag a change request to identify an environment or target Amazon Web Services Region. In this case, you could specify the following key-value pairs: Key=Environment,Value=Production Key=Region,Value=us-east-2 + /// Optional metadata that you assign to a resource. You can specify a maximum of five tags for a change request. Tags enable you to categorize a resource in different ways, such as by purpose, owner, or environment. For example, you might want to tag a change request to identify an environment or target Amazon Web Services Region. In this case, you could specify the following key-value pairs: Key=Environment,Value=Production Key=Region,Value=us-east-2 The Array Members maximum value is reported as 1000. This number includes capacity reserved for internal operations. When calling the StartChangeRequestExecution action, you can specify a maximum of 5 tags. You can, however, use the AddTagsToResource action to add up to a total of 50 tags to an existing change request configuration. public let tags: [Tag]? @inlinable @@ -13871,7 +13871,7 @@ extension SSM { public struct StartSessionRequest: AWSEncodableShape { /// The name of the SSM document you want to use to define the type of session, input parameters, or preferences for the session. For example, SSM-SessionManagerRunShell. You can call the GetDocument API to verify the document exists before attempting to start a session. If no document name is provided, a shell to the managed node is launched by default. For more information, see Start a session in the Amazon Web Services Systems Manager User Guide. public let documentName: String? - /// The values you want to specify for the parameters defined in the Session document. + /// The values you want to specify for the parameters defined in the Session document. For more information about these parameters, see Create a Session Manager preferences document in the Amazon Web Services Systems Manager User Guide. public let parameters: [String: [String]]? /// The reason for connecting to the instance. This value is included in the details for the Amazon CloudWatch Events event created when you start the session. public let reason: String? @@ -14341,7 +14341,7 @@ extension SSM { public let associationName: String? /// This parameter is provided for concurrency control purposes. You must specify the latest association version in the service. If you want to ensure that this request succeeds, either specify $LATEST, or omit this parameter. public let associationVersion: String? - /// Choose the parameter that will define how your automation will branch out. This target is required for associations that use an Automation runbook and target resources by using rate controls. Automation is a capability of Amazon Web Services Systems Manager. + /// Choose the parameter that will define how your automation will branch out. This target is required for associations that use an Automation runbook and target resources by using rate controls. Automation is a tool in Amazon Web Services Systems Manager. public let automationTargetParameterName: String? /// The names or Amazon Resource Names (ARNs) of the Change Calendar type documents you want to gate your associations under. The associations only run when that change calendar is open. For more information, see Amazon Web Services Systems Manager Change Calendar. public let calendarNames: [String]? @@ -14359,13 +14359,13 @@ extension SSM { public let name: String? /// An S3 bucket where you want to store the results of this request. public let outputLocation: InstanceAssociationOutputLocation? - /// The parameters you want to update for the association. If you create a parameter using Parameter Store, a capability of Amazon Web Services Systems Manager, you can reference the parameter using {{ssm:parameter-name}}. + /// The parameters you want to update for the association. If you create a parameter using Parameter Store, a tool in Amazon Web Services Systems Manager, you can reference the parameter using {{ssm:parameter-name}}. public let parameters: [String: [String]]? /// The cron expression used to schedule the association that you want to update. public let scheduleExpression: String? /// Number of days to wait after the scheduled day to run an association. For example, if you specified a cron schedule of cron(0 0 ? * THU#2 *), you could specify an offset of 3 to run the association each Sunday after the second Thursday of the month. For more information about cron schedules for associations, see Reference: Cron and rate expressions for Systems Manager in the Amazon Web Services Systems Manager User Guide. To use offsets, you must specify the ApplyOnlyAtCronInterval parameter. This option tells the system not to run an association immediately after you create it. public let scheduleOffset: Int? - /// The mode for generating association compliance. You can specify AUTO or MANUAL. In AUTO mode, the system uses the status of the association execution to determine the compliance status. If the association execution runs successfully, then the association is COMPLIANT. If the association execution doesn't run successfully, the association is NON-COMPLIANT. In MANUAL mode, you must specify the AssociationId as a parameter for the PutComplianceItems API operation. In this case, compliance data isn't managed by State Manager, a capability of Amazon Web Services Systems Manager. It is managed by your direct call to the PutComplianceItems API operation. By default, all associations use AUTO mode. + /// The mode for generating association compliance. You can specify AUTO or MANUAL. In AUTO mode, the system uses the status of the association execution to determine the compliance status. If the association execution runs successfully, then the association is COMPLIANT. If the association execution doesn't run successfully, the association is NON-COMPLIANT. In MANUAL mode, you must specify the AssociationId as a parameter for the PutComplianceItems API operation. In this case, compliance data isn't managed by State Manager, a tool in Amazon Web Services Systems Manager. It is managed by your direct call to the PutComplianceItems API operation. By default, all associations use AUTO mode. public let syncCompliance: AssociationSyncCompliance? /// A location is a combination of Amazon Web Services Regions and Amazon Web Services accounts where you want to run the association. Use this action to update an association in multiple Regions and multiple accounts. public let targetLocations: [TargetLocation]? diff --git a/Sources/Soto/Services/SSOOIDC/SSOOIDC_api.swift b/Sources/Soto/Services/SSOOIDC/SSOOIDC_api.swift index 71c67d149b..673d77b48b 100644 --- a/Sources/Soto/Services/SSOOIDC/SSOOIDC_api.swift +++ b/Sources/Soto/Services/SSOOIDC/SSOOIDC_api.swift @@ -25,7 +25,7 @@ import Foundation /// Service object for interacting with AWS SSOOIDC service. /// -/// IAM Identity Center OpenID Connect (OIDC) is a web service that enables a client (such as CLI or a native application) to register with IAM Identity Center. The service also enables the client to fetch the user’s access token upon successful authentication and authorization with IAM Identity Center. IAM Identity Center uses the sso and identitystore API namespaces. Considerations for Using This Guide Before you begin using this guide, we recommend that you first review the following important information about how the IAM Identity Center OIDC service works. The IAM Identity Center OIDC service currently implements only the portions of the OAuth 2.0 Device Authorization Grant standard (https://tools.ietf.org/html/rfc8628) that are necessary to enable single sign-on authentication with the CLI. With older versions of the CLI, the service only emits OIDC access tokens, so to obtain a new token, users must explicitly re-authenticate. To access the OIDC flow that supports token refresh and doesn’t require re-authentication, update to the latest CLI version (1.27.10 for CLI V1 and 2.9.0 for CLI V2) with support for OIDC token refresh and configurable IAM Identity Center session durations. For more information, see Configure Amazon Web Services access portal session duration . The access tokens provided by this service grant access to all Amazon Web Services account entitlements assigned to an IAM Identity Center user, not just a particular application. The documentation in this guide does not describe the mechanism to convert the access token into Amazon Web Services Auth (“sigv4”) credentials for use with IAM-protected Amazon Web Services service endpoints. For more information, see GetRoleCredentials in the IAM Identity Center Portal API Reference Guide. For general information about IAM Identity Center, see What is IAM Identity Center? in the IAM Identity Center User Guide. +/// IAM Identity Center OpenID Connect (OIDC) is a web service that enables a client (such as CLI or a native application) to register with IAM Identity Center. The service also enables the client to fetch the user’s access token upon successful authentication and authorization with IAM Identity Center. API namespaces IAM Identity Center uses the sso and identitystore API namespaces. IAM Identity Center OpenID Connect uses the sso-oidc namespace. Considerations for using this guide Before you begin using this guide, we recommend that you first review the following important information about how the IAM Identity Center OIDC service works. The IAM Identity Center OIDC service currently implements only the portions of the OAuth 2.0 Device Authorization Grant standard (https://tools.ietf.org/html/rfc8628) that are necessary to enable single sign-on authentication with the CLI. With older versions of the CLI, the service only emits OIDC access tokens, so to obtain a new token, users must explicitly re-authenticate. To access the OIDC flow that supports token refresh and doesn’t require re-authentication, update to the latest CLI version (1.27.10 for CLI V1 and 2.9.0 for CLI V2) with support for OIDC token refresh and configurable IAM Identity Center session durations. For more information, see Configure Amazon Web Services access portal session duration . The access tokens provided by this service grant access to all Amazon Web Services account entitlements assigned to an IAM Identity Center user, not just a particular application. The documentation in this guide does not describe the mechanism to convert the access token into Amazon Web Services Auth (“sigv4”) credentials for use with IAM-protected Amazon Web Services service endpoints. For more information, see GetRoleCredentials in the IAM Identity Center Portal API Reference Guide. For general information about IAM Identity Center, see What is IAM Identity Center? in the IAM Identity Center User Guide. public struct SSOOIDC: AWSService { // MARK: Member variables @@ -117,7 +117,7 @@ public struct SSOOIDC: AWSService { // MARK: API Calls - /// Creates and returns access and refresh tokens for clients that are authenticated using client secrets. The access token can be used to fetch short-term credentials for the assigned AWS accounts or to access application APIs using bearer authentication. + /// Creates and returns access and refresh tokens for clients that are authenticated using client secrets. The access token can be used to fetch short-lived credentials for the assigned AWS accounts or to access application APIs using bearer authentication. @Sendable @inlinable public func createToken(_ input: CreateTokenRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateTokenResponse { @@ -130,17 +130,17 @@ public struct SSOOIDC: AWSService { logger: logger ) } - /// Creates and returns access and refresh tokens for clients that are authenticated using client secrets. The access token can be used to fetch short-term credentials for the assigned AWS accounts or to access application APIs using bearer authentication. + /// Creates and returns access and refresh tokens for clients that are authenticated using client secrets. The access token can be used to fetch short-lived credentials for the assigned AWS accounts or to access application APIs using bearer authentication. /// /// Parameters: /// - clientId: The unique identifier string for the client or application. This value comes from the result of the RegisterClient API. /// - clientSecret: A secret string generated for the client. This value should come from the persisted result of the RegisterClient API. - /// - code: Used only when calling this API for the Authorization Code grant type. The short-term code is used to identify this authorization request. This grant type is currently unsupported for the CreateToken API. + /// - code: Used only when calling this API for the Authorization Code grant type. The short-lived code is used to identify this authorization request. /// - codeVerifier: Used only when calling this API for the Authorization Code grant type. This value is generated by the client and presented to validate the original code challenge value the client passed at authorization time. - /// - deviceCode: Used only when calling this API for the Device Code grant type. This short-term code is used to identify this authorization request. This comes from the result of the StartDeviceAuthorization API. - /// - grantType: Supports the following OAuth grant types: Device Code and Refresh Token. Specify either of the following values, depending on the grant type that you want: * Device Code - urn:ietf:params:oauth:grant-type:device_code * Refresh Token - refresh_token For information about how to obtain the device code, see the StartDeviceAuthorization topic. + /// - deviceCode: Used only when calling this API for the Device Code grant type. This short-lived code is used to identify this authorization request. This comes from the result of the StartDeviceAuthorization API. + /// - grantType: Supports the following OAuth grant types: Authorization Code, Device Code, and Refresh Token. Specify one of the following values, depending on the grant type that you want: * Authorization Code - authorization_code * Device Code - urn:ietf:params:oauth:grant-type:device_code * Refresh Token - refresh_token /// - redirectUri: Used only when calling this API for the Authorization Code grant type. This value specifies the location of the client or application that has registered to receive the authorization code. - /// - refreshToken: Used only when calling this API for the Refresh Token grant type. This token is used to refresh short-term tokens, such as the access token, that might expire. For more information about the features and limitations of the current IAM Identity Center OIDC implementation, see Considerations for Using this Guide in the IAM Identity Center OIDC API Reference. + /// - refreshToken: Used only when calling this API for the Refresh Token grant type. This token is used to refresh short-lived tokens, such as the access token, that might expire. For more information about the features and limitations of the current IAM Identity Center OIDC implementation, see Considerations for Using this Guide in the IAM Identity Center OIDC API Reference. /// - scope: The list of scopes for which authorization is requested. The access token that is issued is limited to the scopes that are granted. If this value is not specified, IAM Identity Center authorizes all scopes that are configured for the client during the call to RegisterClient. /// - logger: Logger use during operation @inlinable @@ -170,7 +170,7 @@ public struct SSOOIDC: AWSService { return try await self.createToken(input, logger: logger) } - /// Creates and returns access and refresh tokens for clients and applications that are authenticated using IAM entities. The access token can be used to fetch short-term credentials for the assigned Amazon Web Services accounts or to access application APIs using bearer authentication. + /// Creates and returns access and refresh tokens for clients and applications that are authenticated using IAM entities. The access token can be used to fetch short-lived credentials for the assigned Amazon Web Services accounts or to access application APIs using bearer authentication. @Sendable @inlinable public func createTokenWithIAM(_ input: CreateTokenWithIAMRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateTokenWithIAMResponse { @@ -183,16 +183,16 @@ public struct SSOOIDC: AWSService { logger: logger ) } - /// Creates and returns access and refresh tokens for clients and applications that are authenticated using IAM entities. The access token can be used to fetch short-term credentials for the assigned Amazon Web Services accounts or to access application APIs using bearer authentication. + /// Creates and returns access and refresh tokens for clients and applications that are authenticated using IAM entities. The access token can be used to fetch short-lived credentials for the assigned Amazon Web Services accounts or to access application APIs using bearer authentication. /// /// Parameters: /// - assertion: Used only when calling this API for the JWT Bearer grant type. This value specifies the JSON Web Token (JWT) issued by a trusted token issuer. To authorize a trusted token issuer, configure the JWT Bearer GrantOptions for the application. /// - clientId: The unique identifier string for the client or application. This value is an application ARN that has OAuth grants configured. - /// - code: Used only when calling this API for the Authorization Code grant type. This short-term code is used to identify this authorization request. The code is obtained through a redirect from IAM Identity Center to a redirect URI persisted in the Authorization Code GrantOptions for the application. + /// - code: Used only when calling this API for the Authorization Code grant type. This short-lived code is used to identify this authorization request. The code is obtained through a redirect from IAM Identity Center to a redirect URI persisted in the Authorization Code GrantOptions for the application. /// - codeVerifier: Used only when calling this API for the Authorization Code grant type. This value is generated by the client and presented to validate the original code challenge value the client passed at authorization time. /// - grantType: Supports the following OAuth grant types: Authorization Code, Refresh Token, JWT Bearer, and Token Exchange. Specify one of the following values, depending on the grant type that you want: * Authorization Code - authorization_code * Refresh Token - refresh_token * JWT Bearer - urn:ietf:params:oauth:grant-type:jwt-bearer * Token Exchange - urn:ietf:params:oauth:grant-type:token-exchange /// - redirectUri: Used only when calling this API for the Authorization Code grant type. This value specifies the location of the client or application that has registered to receive the authorization code. - /// - refreshToken: Used only when calling this API for the Refresh Token grant type. This token is used to refresh short-term tokens, such as the access token, that might expire. For more information about the features and limitations of the current IAM Identity Center OIDC implementation, see Considerations for Using this Guide in the IAM Identity Center OIDC API Reference. + /// - refreshToken: Used only when calling this API for the Refresh Token grant type. This token is used to refresh short-lived tokens, such as the access token, that might expire. For more information about the features and limitations of the current IAM Identity Center OIDC implementation, see Considerations for Using this Guide in the IAM Identity Center OIDC API Reference. /// - requestedTokenType: Used only when calling this API for the Token Exchange grant type. This value specifies the type of token that the requester can receive. The following values are supported: * Access Token - urn:ietf:params:oauth:token-type:access_token * Refresh Token - urn:ietf:params:oauth:token-type:refresh_token /// - scope: The list of scopes for which authorization is requested. The access token that is issued is limited to the scopes that are granted. If the value is not specified, IAM Identity Center authorizes all scopes configured for the application, including the following default scopes: openid, aws, sts:identity_context. /// - subjectToken: Used only when calling this API for the Token Exchange grant type. This value specifies the subject of the exchange. The value of the subject token must be an access token issued by IAM Identity Center to a different client or application. The access token must have authorized scopes that indicate the requested application as a target audience. @@ -229,7 +229,7 @@ public struct SSOOIDC: AWSService { return try await self.createTokenWithIAM(input, logger: logger) } - /// Registers a client with IAM Identity Center. This allows clients to initiate device authorization. The output should be persisted for reuse through many authentication requests. + /// Registers a public client with IAM Identity Center. This allows clients to perform authorization using the authorization code grant with Proof Key for Code Exchange (PKCE) or the device code grant. @Sendable @inlinable public func registerClient(_ input: RegisterClientRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> RegisterClientResponse { @@ -242,13 +242,13 @@ public struct SSOOIDC: AWSService { logger: logger ) } - /// Registers a client with IAM Identity Center. This allows clients to initiate device authorization. The output should be persisted for reuse through many authentication requests. + /// Registers a public client with IAM Identity Center. This allows clients to perform authorization using the authorization code grant with Proof Key for Code Exchange (PKCE) or the device code grant. /// /// Parameters: /// - clientName: The friendly name of the client. /// - clientType: The type of client. The service supports only public as a client type. Anything other than public will be rejected by the service. /// - entitledApplicationArn: This IAM Identity Center application ARN is used to define administrator-managed configuration for public client access to resources. At authorization, the scopes, grants, and redirect URI available to this client will be restricted by this application resource. - /// - grantTypes: The list of OAuth 2.0 grant types that are defined by the client. This list is used to restrict the token granting flows available to the client. + /// - grantTypes: The list of OAuth 2.0 grant types that are defined by the client. This list is used to restrict the token granting flows available to the client. Supports the following OAuth 2.0 grant types: Authorization Code, Device Code, and Refresh Token. * Authorization Code - authorization_code * Device Code - urn:ietf:params:oauth:grant-type:device_code * Refresh Token - refresh_token /// - issuerUrl: The IAM Identity Center Issuer URL associated with an instance of IAM Identity Center. This value is needed for user access to resources through the client. /// - redirectUris: The list of redirect URI that are defined by the client. At completion of authorization, this list is used to restrict what locations the user agent can be redirected back to. /// - scopes: The list of scopes that are defined by the client. Upon authorization, this list is used to restrict permissions when granting an access token. diff --git a/Sources/Soto/Services/SSOOIDC/SSOOIDC_shapes.swift b/Sources/Soto/Services/SSOOIDC/SSOOIDC_shapes.swift index d1671c984c..813dc6d9a5 100644 --- a/Sources/Soto/Services/SSOOIDC/SSOOIDC_shapes.swift +++ b/Sources/Soto/Services/SSOOIDC/SSOOIDC_shapes.swift @@ -33,17 +33,17 @@ extension SSOOIDC { public let clientId: String /// A secret string generated for the client. This value should come from the persisted result of the RegisterClient API. public let clientSecret: String - /// Used only when calling this API for the Authorization Code grant type. The short-term code is used to identify this authorization request. This grant type is currently unsupported for the CreateToken API. + /// Used only when calling this API for the Authorization Code grant type. The short-lived code is used to identify this authorization request. public let code: String? /// Used only when calling this API for the Authorization Code grant type. This value is generated by the client and presented to validate the original code challenge value the client passed at authorization time. public let codeVerifier: String? - /// Used only when calling this API for the Device Code grant type. This short-term code is used to identify this authorization request. This comes from the result of the StartDeviceAuthorization API. + /// Used only when calling this API for the Device Code grant type. This short-lived code is used to identify this authorization request. This comes from the result of the StartDeviceAuthorization API. public let deviceCode: String? - /// Supports the following OAuth grant types: Device Code and Refresh Token. Specify either of the following values, depending on the grant type that you want: * Device Code - urn:ietf:params:oauth:grant-type:device_code * Refresh Token - refresh_token For information about how to obtain the device code, see the StartDeviceAuthorization topic. + /// Supports the following OAuth grant types: Authorization Code, Device Code, and Refresh Token. Specify one of the following values, depending on the grant type that you want: * Authorization Code - authorization_code * Device Code - urn:ietf:params:oauth:grant-type:device_code * Refresh Token - refresh_token public let grantType: String /// Used only when calling this API for the Authorization Code grant type. This value specifies the location of the client or application that has registered to receive the authorization code. public let redirectUri: String? - /// Used only when calling this API for the Refresh Token grant type. This token is used to refresh short-term tokens, such as the access token, that might expire. For more information about the features and limitations of the current IAM Identity Center OIDC implementation, see Considerations for Using this Guide in the IAM Identity Center OIDC API Reference. + /// Used only when calling this API for the Refresh Token grant type. This token is used to refresh short-lived tokens, such as the access token, that might expire. For more information about the features and limitations of the current IAM Identity Center OIDC implementation, see Considerations for Using this Guide in the IAM Identity Center OIDC API Reference. public let refreshToken: String? /// The list of scopes for which authorization is requested. The access token that is issued is limited to the scopes that are granted. If this value is not specified, IAM Identity Center authorizes all scopes that are configured for the client during the call to RegisterClient. public let scope: [String]? @@ -109,7 +109,7 @@ extension SSOOIDC { public let assertion: String? /// The unique identifier string for the client or application. This value is an application ARN that has OAuth grants configured. public let clientId: String - /// Used only when calling this API for the Authorization Code grant type. This short-term code is used to identify this authorization request. The code is obtained through a redirect from IAM Identity Center to a redirect URI persisted in the Authorization Code GrantOptions for the application. + /// Used only when calling this API for the Authorization Code grant type. This short-lived code is used to identify this authorization request. The code is obtained through a redirect from IAM Identity Center to a redirect URI persisted in the Authorization Code GrantOptions for the application. public let code: String? /// Used only when calling this API for the Authorization Code grant type. This value is generated by the client and presented to validate the original code challenge value the client passed at authorization time. public let codeVerifier: String? @@ -117,7 +117,7 @@ extension SSOOIDC { public let grantType: String /// Used only when calling this API for the Authorization Code grant type. This value specifies the location of the client or application that has registered to receive the authorization code. public let redirectUri: String? - /// Used only when calling this API for the Refresh Token grant type. This token is used to refresh short-term tokens, such as the access token, that might expire. For more information about the features and limitations of the current IAM Identity Center OIDC implementation, see Considerations for Using this Guide in the IAM Identity Center OIDC API Reference. + /// Used only when calling this API for the Refresh Token grant type. This token is used to refresh short-lived tokens, such as the access token, that might expire. For more information about the features and limitations of the current IAM Identity Center OIDC implementation, see Considerations for Using this Guide in the IAM Identity Center OIDC API Reference. public let refreshToken: String? /// Used only when calling this API for the Token Exchange grant type. This value specifies the type of token that the requester can receive. The following values are supported: * Access Token - urn:ietf:params:oauth:token-type:access_token * Refresh Token - urn:ietf:params:oauth:token-type:refresh_token public let requestedTokenType: String? @@ -203,7 +203,7 @@ extension SSOOIDC { public let clientType: String /// This IAM Identity Center application ARN is used to define administrator-managed configuration for public client access to resources. At authorization, the scopes, grants, and redirect URI available to this client will be restricted by this application resource. public let entitledApplicationArn: String? - /// The list of OAuth 2.0 grant types that are defined by the client. This list is used to restrict the token granting flows available to the client. + /// The list of OAuth 2.0 grant types that are defined by the client. This list is used to restrict the token granting flows available to the client. Supports the following OAuth 2.0 grant types: Authorization Code, Device Code, and Refresh Token. * Authorization Code - authorization_code * Device Code - urn:ietf:params:oauth:grant-type:device_code * Refresh Token - refresh_token public let grantTypes: [String]? /// The IAM Identity Center Issuer URL associated with an instance of IAM Identity Center. This value is needed for user access to resources through the client. public let issuerUrl: String? diff --git a/Sources/Soto/Services/STS/STS_api.swift b/Sources/Soto/Services/STS/STS_api.swift index bdb45047f2..4c7bdad954 100644 --- a/Sources/Soto/Services/STS/STS_api.swift +++ b/Sources/Soto/Services/STS/STS_api.swift @@ -92,6 +92,7 @@ public struct STS: AWSService { "ap-southeast-3": "sts.ap-southeast-3.amazonaws.com", "ap-southeast-4": "sts.ap-southeast-4.amazonaws.com", "ap-southeast-5": "sts.ap-southeast-5.amazonaws.com", + "ap-southeast-7": "sts.ap-southeast-7.amazonaws.com", "aws-global": "sts.amazonaws.com", "ca-central-1": "sts.ca-central-1.amazonaws.com", "ca-west-1": "sts.ca-west-1.amazonaws.com", @@ -106,6 +107,7 @@ public struct STS: AWSService { "il-central-1": "sts.il-central-1.amazonaws.com", "me-central-1": "sts.me-central-1.amazonaws.com", "me-south-1": "sts.me-south-1.amazonaws.com", + "mx-central-1": "sts.mx-central-1.amazonaws.com", "sa-east-1": "sts.sa-east-1.amazonaws.com", "us-east-1": "sts.us-east-1.amazonaws.com", "us-east-2": "sts.us-east-2.amazonaws.com", @@ -156,7 +158,7 @@ public struct STS: AWSService { /// - roleArn: The Amazon Resource Name (ARN) of the role to assume. /// - roleSessionName: An identifier for the assumed role session. Use the role session name to uniquely identify a session when the same role is assumed by different principals or for different reasons. In cross-account scenarios, the role session name is visible to, and can be logged by the account that owns the role. The role session name is also used in the ARN of the assumed role principal. This means that subsequent cross-account API requests that use the temporary security credentials will expose the role session name to the external account in their CloudTrail logs. For security purposes, administrators can view this field in CloudTrail logs to help identify who performed an action in Amazon Web Services. Your administrator might require that you specify your user name as the session name when you assume the role. For more information, see sts:RoleSessionName . The regex used to validate this parameter is a string of characters consisting of upper- and lower-case alphanumeric characters with no spaces. You can also include underscores or any of the following characters: =,.@- /// - serialNumber: The identification number of the MFA device that is associated with the user who is making the AssumeRole call. Specify this value if the trust policy of the role being assumed includes a condition that requires MFA authentication. The value is either the serial number for a hardware device (such as GAHT12345678) or an Amazon Resource Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user). The regex used to validate this parameter is a string of characters consisting of upper- and lower-case alphanumeric characters with no spaces. You can also include underscores or any of the following characters: =,.@- - /// - sourceIdentity: The source identity specified by the principal that is calling the AssumeRole operation. The source identity value persists across chained role sessions. You can require users to specify a source identity when they assume a role. You do this by using the sts:SourceIdentity condition key in a role trust policy. You can use source identity information in CloudTrail logs to determine who took actions with a role. You can use the aws:SourceIdentity condition key to further control access to Amazon Web Services resources based on the value of source identity. For more information about using source identity, see Monitor and control actions taken with assumed roles in the IAM User Guide. The regex used to validate this parameter is a string of characters consisting of upper- and lower-case alphanumeric characters with no spaces. You can also include underscores or any of the following characters: =,.@-. You cannot use a value that begins with the text aws:. This prefix is reserved for Amazon Web Services internal use. + /// - sourceIdentity: The source identity specified by the principal that is calling the AssumeRole operation. The source identity value persists across chained role sessions. You can require users to specify a source identity when they assume a role. You do this by using the sts:SourceIdentity condition key in a role trust policy. You can use source identity information in CloudTrail logs to determine who took actions with a role. You can use the aws:SourceIdentity condition key to further control access to Amazon Web Services resources based on the value of source identity. For more information about using source identity, see Monitor and control actions taken with assumed roles in the IAM User Guide. The regex used to validate this parameter is a string of characters consisting of upper- and lower-case alphanumeric characters with no spaces. You can also include underscores or any of the following characters: +=,.@-. You cannot use a value that begins with the text aws:. This prefix is reserved for Amazon Web Services internal use. /// - tags: A list of session tags that you want to pass. Each session tag consists of a key name and an associated value. For more information about session tags, see Tagging Amazon Web Services STS Sessions in the IAM User Guide. This parameter is optional. You can pass up to 50 session tags. The plaintext session tag keys can’t exceed 128 characters, and the values can’t exceed 256 characters. For these and additional limits, see IAM and STS Character Limits in the IAM User Guide. An Amazon Web Services conversion compresses the passed inline session policy, managed policy ARNs, and session tags into a packed binary format that has a separate limit. Your request can fail for this limit even if your plaintext meets the other requirements. The PackedPolicySize response element indicates by percentage how close the policies and tags for your request are to the upper size limit. You can pass a session tag with the same key as a tag that is already attached to the role. When you do, session tags override a role tag with the same key. Tag key–value pairs are not case sensitive, but case is preserved. This means that you cannot have separate Department and department tag keys. Assume that the role has the Department=Marketing tag and you pass the department=engineering session tag. Department and department are not saved as separate tags, and the session tag passed in the request takes precedence over the role tag. Additionally, if you used temporary credentials to perform this operation, the new session inherits any transitive session tags from the calling session. If you pass a session tag with the same key as an inherited tag, the operation fails. To view the inherited tags for a session, see the CloudTrail logs. For more information, see Viewing Session Tags in CloudTrail in the IAM User Guide. /// - tokenCode: The value provided by the MFA device, if the trust policy of the role being assumed requires MFA. (In other words, if the policy includes a condition that tests for MFA). If the role being assumed requires MFA and if the TokenCode value is missing or expired, the AssumeRole call returns an "access denied" error. The format for this parameter, as described by its regex pattern, is a sequence of six numeric digits. /// - transitiveTagKeys: A list of keys for session tags that you want to set as transitive. If you set a tag key as transitive, the corresponding key and value passes to subsequent sessions in a role chain. For more information, see Chaining Roles with Session Tags in the IAM User Guide. This parameter is optional. The transitive status of a session tag does not impact its packed binary size. If you choose not to specify a transitive tag key, then no tags are passed from this session to any subsequent sessions. @@ -260,7 +262,7 @@ public struct STS: AWSService { /// - providerId: The fully qualified host component of the domain name of the OAuth 2.0 identity provider. Do not specify this value for an OpenID Connect identity provider. Currently www.amazon.com and graph.facebook.com are the only supported identity providers for OAuth 2.0 access tokens. Do not include URL schemes and port numbers. Do not specify this value for OpenID Connect ID tokens. /// - roleArn: The Amazon Resource Name (ARN) of the role that the caller is assuming. Additional considerations apply to Amazon Cognito identity pools that assume cross-account IAM roles. The trust policies of these roles must accept the cognito-identity.amazonaws.com service principal and must contain the cognito-identity.amazonaws.com:aud condition key to restrict role assumption to users from your intended identity pools. A policy that trusts Amazon Cognito identity pools without this condition creates a risk that a user from an unintended identity pool can assume the role. For more information, see Trust policies for IAM roles in Basic (Classic) authentication in the Amazon Cognito Developer Guide. /// - roleSessionName: An identifier for the assumed role session. Typically, you pass the name or identifier that is associated with the user who is using your application. That way, the temporary security credentials that your application will use are associated with that user. This session name is included as part of the ARN and assumed role ID in the AssumedRoleUser response element. For security purposes, administrators can view this field in CloudTrail logs to help identify who performed an action in Amazon Web Services. Your administrator might require that you specify your user name as the session name when you assume the role. For more information, see sts:RoleSessionName . The regex used to validate this parameter is a string of characters consisting of upper- and lower-case alphanumeric characters with no spaces. You can also include underscores or any of the following characters: =,.@- - /// - webIdentityToken: The OAuth 2.0 access token or OpenID Connect ID token that is provided by the identity provider. Your application must get this token by authenticating the user who is using your application with a web identity provider before the application makes an AssumeRoleWithWebIdentity call. Timestamps in the token must be formatted as either an integer or a long integer. Only tokens with RSA algorithms (RS256) are supported. + /// - webIdentityToken: The OAuth 2.0 access token or OpenID Connect ID token that is provided by the identity provider. Your application must get this token by authenticating the user who is using your application with a web identity provider before the application makes an AssumeRoleWithWebIdentity call. Timestamps in the token must be formatted as either an integer or a long integer. Tokens must be signed using either RSA keys (RS256, RS384, or RS512) or ECDSA keys (ES256, ES384, or ES512). /// - logger: Logger use during operation @inlinable public func assumeRoleWithWebIdentity( @@ -285,7 +287,7 @@ public struct STS: AWSService { return try await self.assumeRoleWithWebIdentity(input, logger: logger) } - /// Returns a set of short term credentials you can use to perform privileged tasks in a member account. Before you can launch a privileged session, you must have enabled centralized root access in your organization. For steps to enable this feature, see Centralize root access for member accounts in the IAM User Guide. The global endpoint is not supported for AssumeRoot. You must send this request to a Regional STS endpoint. For more information, see Endpoints. You can track AssumeRoot in CloudTrail logs to determine what actions were performed in a session. For more information, see Track privileged tasks in CloudTrail in the IAM User Guide. + /// Returns a set of short term credentials you can use to perform privileged tasks on a member account in your organization. Before you can launch a privileged session, you must have centralized root access in your organization. For steps to enable this feature, see Centralize root access for member accounts in the IAM User Guide. The STS global endpoint is not supported for AssumeRoot. You must send this request to a Regional STS endpoint. For more information, see Endpoints. You can track AssumeRoot in CloudTrail logs to determine what actions were performed in a session. For more information, see Track privileged tasks in CloudTrail in the IAM User Guide. @Sendable @inlinable public func assumeRoot(_ input: AssumeRootRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> AssumeRootResponse { @@ -298,12 +300,12 @@ public struct STS: AWSService { logger: logger ) } - /// Returns a set of short term credentials you can use to perform privileged tasks in a member account. Before you can launch a privileged session, you must have enabled centralized root access in your organization. For steps to enable this feature, see Centralize root access for member accounts in the IAM User Guide. The global endpoint is not supported for AssumeRoot. You must send this request to a Regional STS endpoint. For more information, see Endpoints. You can track AssumeRoot in CloudTrail logs to determine what actions were performed in a session. For more information, see Track privileged tasks in CloudTrail in the IAM User Guide. + /// Returns a set of short term credentials you can use to perform privileged tasks on a member account in your organization. Before you can launch a privileged session, you must have centralized root access in your organization. For steps to enable this feature, see Centralize root access for member accounts in the IAM User Guide. The STS global endpoint is not supported for AssumeRoot. You must send this request to a Regional STS endpoint. For more information, see Endpoints. You can track AssumeRoot in CloudTrail logs to determine what actions were performed in a session. For more information, see Track privileged tasks in CloudTrail in the IAM User Guide. /// /// Parameters: /// - durationSeconds: The duration, in seconds, of the privileged session. The value can range from 0 seconds up to the maximum session duration of 900 seconds (15 minutes). If you specify a value higher than this setting, the operation fails. By default, the value is set to 900 seconds. /// - targetPrincipal: The member account principal ARN or account ID. - /// - taskPolicyArn: The identity based policy that scopes the session to the privileged tasks that can be performed. You can use one of following Amazon Web Services managed policies to scope root session actions. You can add additional customer managed policies to further limit the permissions for the root session. IAMAuditRootUserCredentials IAMCreateRootUserPassword IAMDeleteRootUserCredentials S3UnlockBucketPolicy SQSUnlockQueuePolicy + /// - taskPolicyArn: The identity based policy that scopes the session to the privileged tasks that can be performed. You can use one of following Amazon Web Services managed policies to scope root session actions. IAMAuditRootUserCredentials IAMCreateRootUserPassword IAMDeleteRootUserCredentials S3UnlockBucketPolicy SQSUnlockQueuePolicy /// - logger: Logger use during operation @inlinable public func assumeRoot( diff --git a/Sources/Soto/Services/STS/STS_shapes.swift b/Sources/Soto/Services/STS/STS_shapes.swift index 99175ff64c..3e064b0c1e 100644 --- a/Sources/Soto/Services/STS/STS_shapes.swift +++ b/Sources/Soto/Services/STS/STS_shapes.swift @@ -47,7 +47,7 @@ extension STS { public let roleSessionName: String /// The identification number of the MFA device that is associated with the user who is making the AssumeRole call. Specify this value if the trust policy of the role being assumed includes a condition that requires MFA authentication. The value is either the serial number for a hardware device (such as GAHT12345678) or an Amazon Resource Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user). The regex used to validate this parameter is a string of characters consisting of upper- and lower-case alphanumeric characters with no spaces. You can also include underscores or any of the following characters: =,.@- public let serialNumber: String? - /// The source identity specified by the principal that is calling the AssumeRole operation. The source identity value persists across chained role sessions. You can require users to specify a source identity when they assume a role. You do this by using the sts:SourceIdentity condition key in a role trust policy. You can use source identity information in CloudTrail logs to determine who took actions with a role. You can use the aws:SourceIdentity condition key to further control access to Amazon Web Services resources based on the value of source identity. For more information about using source identity, see Monitor and control actions taken with assumed roles in the IAM User Guide. The regex used to validate this parameter is a string of characters consisting of upper- and lower-case alphanumeric characters with no spaces. You can also include underscores or any of the following characters: =,.@-. You cannot use a value that begins with the text aws:. This prefix is reserved for Amazon Web Services internal use. + /// The source identity specified by the principal that is calling the AssumeRole operation. The source identity value persists across chained role sessions. You can require users to specify a source identity when they assume a role. You do this by using the sts:SourceIdentity condition key in a role trust policy. You can use source identity information in CloudTrail logs to determine who took actions with a role. You can use the aws:SourceIdentity condition key to further control access to Amazon Web Services resources based on the value of source identity. For more information about using source identity, see Monitor and control actions taken with assumed roles in the IAM User Guide. The regex used to validate this parameter is a string of characters consisting of upper- and lower-case alphanumeric characters with no spaces. You can also include underscores or any of the following characters: +=,.@-. You cannot use a value that begins with the text aws:. This prefix is reserved for Amazon Web Services internal use. public let sourceIdentity: String? /// A list of session tags that you want to pass. Each session tag consists of a key name and an associated value. For more information about session tags, see Tagging Amazon Web Services STS Sessions in the IAM User Guide. This parameter is optional. You can pass up to 50 session tags. The plaintext session tag keys can’t exceed 128 characters, and the values can’t exceed 256 characters. For these and additional limits, see IAM and STS Character Limits in the IAM User Guide. An Amazon Web Services conversion compresses the passed inline session policy, managed policy ARNs, and session tags into a packed binary format that has a separate limit. Your request can fail for this limit even if your plaintext meets the other requirements. The PackedPolicySize response element indicates by percentage how close the policies and tags for your request are to the upper size limit. You can pass a session tag with the same key as a tag that is already attached to the role. When you do, session tags override a role tag with the same key. Tag key–value pairs are not case sensitive, but case is preserved. This means that you cannot have separate Department and department tag keys. Assume that the role has the Department=Marketing tag and you pass the department=engineering session tag. Department and department are not saved as separate tags, and the session tag passed in the request takes precedence over the role tag. Additionally, if you used temporary credentials to perform this operation, the new session inherits any transitive session tags from the calling session. If you pass a session tag with the same key as an inherited tag, the operation fails. To view the inherited tags for a session, see the CloudTrail logs. For more information, see Viewing Session Tags in CloudTrail in the IAM User Guide. @OptionalCustomCoding> @@ -272,7 +272,7 @@ extension STS { public let roleArn: String /// An identifier for the assumed role session. Typically, you pass the name or identifier that is associated with the user who is using your application. That way, the temporary security credentials that your application will use are associated with that user. This session name is included as part of the ARN and assumed role ID in the AssumedRoleUser response element. For security purposes, administrators can view this field in CloudTrail logs to help identify who performed an action in Amazon Web Services. Your administrator might require that you specify your user name as the session name when you assume the role. For more information, see sts:RoleSessionName . The regex used to validate this parameter is a string of characters consisting of upper- and lower-case alphanumeric characters with no spaces. You can also include underscores or any of the following characters: =,.@- public let roleSessionName: String - /// The OAuth 2.0 access token or OpenID Connect ID token that is provided by the identity provider. Your application must get this token by authenticating the user who is using your application with a web identity provider before the application makes an AssumeRoleWithWebIdentity call. Timestamps in the token must be formatted as either an integer or a long integer. Only tokens with RSA algorithms (RS256) are supported. + /// The OAuth 2.0 access token or OpenID Connect ID token that is provided by the identity provider. Your application must get this token by authenticating the user who is using your application with a web identity provider before the application makes an AssumeRoleWithWebIdentity call. Timestamps in the token must be formatted as either an integer or a long integer. Tokens must be signed using either RSA keys (RS256, RS384, or RS512) or ECDSA keys (ES256, ES384, or ES512). public let webIdentityToken: String @inlinable @@ -361,7 +361,7 @@ extension STS { public let durationSeconds: Int? /// The member account principal ARN or account ID. public let targetPrincipal: String - /// The identity based policy that scopes the session to the privileged tasks that can be performed. You can use one of following Amazon Web Services managed policies to scope root session actions. You can add additional customer managed policies to further limit the permissions for the root session. IAMAuditRootUserCredentials IAMCreateRootUserPassword IAMDeleteRootUserCredentials S3UnlockBucketPolicy SQSUnlockQueuePolicy + /// The identity based policy that scopes the session to the privileged tasks that can be performed. You can use one of following Amazon Web Services managed policies to scope root session actions. IAMAuditRootUserCredentials IAMCreateRootUserPassword IAMDeleteRootUserCredentials S3UnlockBucketPolicy SQSUnlockQueuePolicy public let taskPolicyArn: PolicyDescriptorType @inlinable diff --git a/Sources/Soto/Services/SageMaker/SageMaker_api.swift b/Sources/Soto/Services/SageMaker/SageMaker_api.swift index 6c582b114e..61805acd0a 100644 --- a/Sources/Soto/Services/SageMaker/SageMaker_api.swift +++ b/Sources/Soto/Services/SageMaker/SageMaker_api.swift @@ -376,7 +376,7 @@ public struct SageMaker: AWSService { return try await self.createAlgorithm(input, logger: logger) } - /// Creates a running app for the specified UserProfile. This operation is automatically invoked by Amazon SageMaker upon access to the associated Domain, and when new kernel configurations are selected by the user. A user may have multiple Apps active simultaneously. + /// Creates a running app for the specified UserProfile. This operation is automatically invoked by Amazon SageMaker AI upon access to the associated Domain, and when new kernel configurations are selected by the user. A user may have multiple Apps active simultaneously. @Sendable @inlinable public func createApp(_ input: CreateAppRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateAppResponse { @@ -389,13 +389,13 @@ public struct SageMaker: AWSService { logger: logger ) } - /// Creates a running app for the specified UserProfile. This operation is automatically invoked by Amazon SageMaker upon access to the associated Domain, and when new kernel configurations are selected by the user. A user may have multiple Apps active simultaneously. + /// Creates a running app for the specified UserProfile. This operation is automatically invoked by Amazon SageMaker AI upon access to the associated Domain, and when new kernel configurations are selected by the user. A user may have multiple Apps active simultaneously. /// /// Parameters: /// - appName: The name of the app. /// - appType: The type of app. /// - domainId: The domain ID. - /// - resourceSpec: The instance type and the Amazon Resource Name (ARN) of the SageMaker image created on the instance. The value of InstanceType passed as part of the ResourceSpec in the CreateApp call overrides the value passed as part of the ResourceSpec configured for the user profile or the domain. If InstanceType is not specified in any of those three ResourceSpec values for a KernelGateway app, the CreateApp call fails with a request validation error. + /// - resourceSpec: The instance type and the Amazon Resource Name (ARN) of the SageMaker AI image created on the instance. The value of InstanceType passed as part of the ResourceSpec in the CreateApp call overrides the value passed as part of the ResourceSpec configured for the user profile or the domain. If InstanceType is not specified in any of those three ResourceSpec values for a KernelGateway app, the CreateApp call fails with a request validation error. /// - spaceName: The name of the space. If this value is not set, then UserProfileName must be set. /// - tags: Each tag consists of a key and an optional value. Tag keys must be unique per resource. /// - userProfileName: The user profile name. If this value is not set, then SpaceName must be set. @@ -423,7 +423,7 @@ public struct SageMaker: AWSService { return try await self.createApp(input, logger: logger) } - /// Creates a configuration for running a SageMaker image as a KernelGateway app. The configuration specifies the Amazon Elastic File System storage volume on the image, and a list of the kernels in the image. + /// Creates a configuration for running a SageMaker AI image as a KernelGateway app. The configuration specifies the Amazon Elastic File System storage volume on the image, and a list of the kernels in the image. @Sendable @inlinable public func createAppImageConfig(_ input: CreateAppImageConfigRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateAppImageConfigResponse { @@ -436,7 +436,7 @@ public struct SageMaker: AWSService { logger: logger ) } - /// Creates a configuration for running a SageMaker image as a KernelGateway app. The configuration specifies the Amazon Elastic File System storage volume on the image, and a list of the kernels in the image. + /// Creates a configuration for running a SageMaker AI image as a KernelGateway app. The configuration specifies the Amazon Elastic File System storage volume on the image, and a list of the kernels in the image. /// /// Parameters: /// - appImageConfigName: The name of the AppImageConfig. Must be unique to your account. @@ -508,7 +508,7 @@ public struct SageMaker: AWSService { return try await self.createArtifact(input, logger: logger) } - /// Creates an Autopilot job also referred to as Autopilot experiment or AutoML job. An AutoML job in SageMaker is a fully automated process that allows you to build machine learning models with minimal effort and machine learning expertise. When initiating an AutoML job, you provide your data and optionally specify parameters tailored to your use case. SageMaker then automates the entire model development lifecycle, including data preprocessing, model training, tuning, and evaluation. AutoML jobs are designed to simplify and accelerate the model building process by automating various tasks and exploring different combinations of machine learning algorithms, data preprocessing techniques, and hyperparameter values. The output of an AutoML job comprises one or more trained models ready for deployment and inference. Additionally, SageMaker AutoML jobs generate a candidate model leaderboard, allowing you to select the best-performing model for deployment. For more information about AutoML jobs, see https://docs.aws.amazon.com/sagemaker/latest/dg/autopilot-automate-model-development.html in the SageMaker developer guide. We recommend using the new versions CreateAutoMLJobV2 and DescribeAutoMLJobV2, which offer backward compatibility. CreateAutoMLJobV2 can manage tabular problem types identical to those of its previous version CreateAutoMLJob, as well as time-series forecasting, non-tabular problem types such as image or text classification, and text generation (LLMs fine-tuning). Find guidelines about how to migrate a CreateAutoMLJob to CreateAutoMLJobV2 in Migrate a CreateAutoMLJob to CreateAutoMLJobV2. You can find the best-performing model after you run an AutoML job by calling DescribeAutoMLJobV2 (recommended) or DescribeAutoMLJob. + /// Creates an Autopilot job also referred to as Autopilot experiment or AutoML job. An AutoML job in SageMaker AI is a fully automated process that allows you to build machine learning models with minimal effort and machine learning expertise. When initiating an AutoML job, you provide your data and optionally specify parameters tailored to your use case. SageMaker AI then automates the entire model development lifecycle, including data preprocessing, model training, tuning, and evaluation. AutoML jobs are designed to simplify and accelerate the model building process by automating various tasks and exploring different combinations of machine learning algorithms, data preprocessing techniques, and hyperparameter values. The output of an AutoML job comprises one or more trained models ready for deployment and inference. Additionally, SageMaker AI AutoML jobs generate a candidate model leaderboard, allowing you to select the best-performing model for deployment. For more information about AutoML jobs, see https://docs.aws.amazon.com/sagemaker/latest/dg/autopilot-automate-model-development.html in the SageMaker AI developer guide. We recommend using the new versions CreateAutoMLJobV2 and DescribeAutoMLJobV2, which offer backward compatibility. CreateAutoMLJobV2 can manage tabular problem types identical to those of its previous version CreateAutoMLJob, as well as time-series forecasting, non-tabular problem types such as image or text classification, and text generation (LLMs fine-tuning). Find guidelines about how to migrate a CreateAutoMLJob to CreateAutoMLJobV2 in Migrate a CreateAutoMLJob to CreateAutoMLJobV2. You can find the best-performing model after you run an AutoML job by calling DescribeAutoMLJobV2 (recommended) or DescribeAutoMLJob. @Sendable @inlinable public func createAutoMLJob(_ input: CreateAutoMLJobRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateAutoMLJobResponse { @@ -521,7 +521,7 @@ public struct SageMaker: AWSService { logger: logger ) } - /// Creates an Autopilot job also referred to as Autopilot experiment or AutoML job. An AutoML job in SageMaker is a fully automated process that allows you to build machine learning models with minimal effort and machine learning expertise. When initiating an AutoML job, you provide your data and optionally specify parameters tailored to your use case. SageMaker then automates the entire model development lifecycle, including data preprocessing, model training, tuning, and evaluation. AutoML jobs are designed to simplify and accelerate the model building process by automating various tasks and exploring different combinations of machine learning algorithms, data preprocessing techniques, and hyperparameter values. The output of an AutoML job comprises one or more trained models ready for deployment and inference. Additionally, SageMaker AutoML jobs generate a candidate model leaderboard, allowing you to select the best-performing model for deployment. For more information about AutoML jobs, see https://docs.aws.amazon.com/sagemaker/latest/dg/autopilot-automate-model-development.html in the SageMaker developer guide. We recommend using the new versions CreateAutoMLJobV2 and DescribeAutoMLJobV2, which offer backward compatibility. CreateAutoMLJobV2 can manage tabular problem types identical to those of its previous version CreateAutoMLJob, as well as time-series forecasting, non-tabular problem types such as image or text classification, and text generation (LLMs fine-tuning). Find guidelines about how to migrate a CreateAutoMLJob to CreateAutoMLJobV2 in Migrate a CreateAutoMLJob to CreateAutoMLJobV2. You can find the best-performing model after you run an AutoML job by calling DescribeAutoMLJobV2 (recommended) or DescribeAutoMLJob. + /// Creates an Autopilot job also referred to as Autopilot experiment or AutoML job. An AutoML job in SageMaker AI is a fully automated process that allows you to build machine learning models with minimal effort and machine learning expertise. When initiating an AutoML job, you provide your data and optionally specify parameters tailored to your use case. SageMaker AI then automates the entire model development lifecycle, including data preprocessing, model training, tuning, and evaluation. AutoML jobs are designed to simplify and accelerate the model building process by automating various tasks and exploring different combinations of machine learning algorithms, data preprocessing techniques, and hyperparameter values. The output of an AutoML job comprises one or more trained models ready for deployment and inference. Additionally, SageMaker AI AutoML jobs generate a candidate model leaderboard, allowing you to select the best-performing model for deployment. For more information about AutoML jobs, see https://docs.aws.amazon.com/sagemaker/latest/dg/autopilot-automate-model-development.html in the SageMaker AI developer guide. We recommend using the new versions CreateAutoMLJobV2 and DescribeAutoMLJobV2, which offer backward compatibility. CreateAutoMLJobV2 can manage tabular problem types identical to those of its previous version CreateAutoMLJob, as well as time-series forecasting, non-tabular problem types such as image or text classification, and text generation (LLMs fine-tuning). Find guidelines about how to migrate a CreateAutoMLJob to CreateAutoMLJobV2 in Migrate a CreateAutoMLJob to CreateAutoMLJobV2. You can find the best-performing model after you run an AutoML job by calling DescribeAutoMLJobV2 (recommended) or DescribeAutoMLJob. /// /// Parameters: /// - autoMLJobConfig: A collection of settings used to configure an AutoML job. @@ -564,7 +564,7 @@ public struct SageMaker: AWSService { return try await self.createAutoMLJob(input, logger: logger) } - /// Creates an Autopilot job also referred to as Autopilot experiment or AutoML job V2. An AutoML job in SageMaker is a fully automated process that allows you to build machine learning models with minimal effort and machine learning expertise. When initiating an AutoML job, you provide your data and optionally specify parameters tailored to your use case. SageMaker then automates the entire model development lifecycle, including data preprocessing, model training, tuning, and evaluation. AutoML jobs are designed to simplify and accelerate the model building process by automating various tasks and exploring different combinations of machine learning algorithms, data preprocessing techniques, and hyperparameter values. The output of an AutoML job comprises one or more trained models ready for deployment and inference. Additionally, SageMaker AutoML jobs generate a candidate model leaderboard, allowing you to select the best-performing model for deployment. For more information about AutoML jobs, see https://docs.aws.amazon.com/sagemaker/latest/dg/autopilot-automate-model-development.html in the SageMaker developer guide. AutoML jobs V2 support various problem types such as regression, binary, and multiclass classification with tabular data, text and image classification, time-series forecasting, and fine-tuning of large language models (LLMs) for text generation. CreateAutoMLJobV2 and DescribeAutoMLJobV2 are new versions of CreateAutoMLJob and DescribeAutoMLJob which offer backward compatibility. CreateAutoMLJobV2 can manage tabular problem types identical to those of its previous version CreateAutoMLJob, as well as time-series forecasting, non-tabular problem types such as image or text classification, and text generation (LLMs fine-tuning). Find guidelines about how to migrate a CreateAutoMLJob to CreateAutoMLJobV2 in Migrate a CreateAutoMLJob to CreateAutoMLJobV2. For the list of available problem types supported by CreateAutoMLJobV2, see AutoMLProblemTypeConfig. You can find the best-performing model after you run an AutoML job V2 by calling DescribeAutoMLJobV2. + /// Creates an Autopilot job also referred to as Autopilot experiment or AutoML job V2. An AutoML job in SageMaker AI is a fully automated process that allows you to build machine learning models with minimal effort and machine learning expertise. When initiating an AutoML job, you provide your data and optionally specify parameters tailored to your use case. SageMaker AI then automates the entire model development lifecycle, including data preprocessing, model training, tuning, and evaluation. AutoML jobs are designed to simplify and accelerate the model building process by automating various tasks and exploring different combinations of machine learning algorithms, data preprocessing techniques, and hyperparameter values. The output of an AutoML job comprises one or more trained models ready for deployment and inference. Additionally, SageMaker AI AutoML jobs generate a candidate model leaderboard, allowing you to select the best-performing model for deployment. For more information about AutoML jobs, see https://docs.aws.amazon.com/sagemaker/latest/dg/autopilot-automate-model-development.html in the SageMaker AI developer guide. AutoML jobs V2 support various problem types such as regression, binary, and multiclass classification with tabular data, text and image classification, time-series forecasting, and fine-tuning of large language models (LLMs) for text generation. CreateAutoMLJobV2 and DescribeAutoMLJobV2 are new versions of CreateAutoMLJob and DescribeAutoMLJob which offer backward compatibility. CreateAutoMLJobV2 can manage tabular problem types identical to those of its previous version CreateAutoMLJob, as well as time-series forecasting, non-tabular problem types such as image or text classification, and text generation (LLMs fine-tuning). Find guidelines about how to migrate a CreateAutoMLJob to CreateAutoMLJobV2 in Migrate a CreateAutoMLJob to CreateAutoMLJobV2. For the list of available problem types supported by CreateAutoMLJobV2, see AutoMLProblemTypeConfig. You can find the best-performing model after you run an AutoML job V2 by calling DescribeAutoMLJobV2. @Sendable @inlinable public func createAutoMLJobV2(_ input: CreateAutoMLJobV2Request, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateAutoMLJobV2Response { @@ -577,7 +577,7 @@ public struct SageMaker: AWSService { logger: logger ) } - /// Creates an Autopilot job also referred to as Autopilot experiment or AutoML job V2. An AutoML job in SageMaker is a fully automated process that allows you to build machine learning models with minimal effort and machine learning expertise. When initiating an AutoML job, you provide your data and optionally specify parameters tailored to your use case. SageMaker then automates the entire model development lifecycle, including data preprocessing, model training, tuning, and evaluation. AutoML jobs are designed to simplify and accelerate the model building process by automating various tasks and exploring different combinations of machine learning algorithms, data preprocessing techniques, and hyperparameter values. The output of an AutoML job comprises one or more trained models ready for deployment and inference. Additionally, SageMaker AutoML jobs generate a candidate model leaderboard, allowing you to select the best-performing model for deployment. For more information about AutoML jobs, see https://docs.aws.amazon.com/sagemaker/latest/dg/autopilot-automate-model-development.html in the SageMaker developer guide. AutoML jobs V2 support various problem types such as regression, binary, and multiclass classification with tabular data, text and image classification, time-series forecasting, and fine-tuning of large language models (LLMs) for text generation. CreateAutoMLJobV2 and DescribeAutoMLJobV2 are new versions of CreateAutoMLJob and DescribeAutoMLJob which offer backward compatibility. CreateAutoMLJobV2 can manage tabular problem types identical to those of its previous version CreateAutoMLJob, as well as time-series forecasting, non-tabular problem types such as image or text classification, and text generation (LLMs fine-tuning). Find guidelines about how to migrate a CreateAutoMLJob to CreateAutoMLJobV2 in Migrate a CreateAutoMLJob to CreateAutoMLJobV2. For the list of available problem types supported by CreateAutoMLJobV2, see AutoMLProblemTypeConfig. You can find the best-performing model after you run an AutoML job V2 by calling DescribeAutoMLJobV2. + /// Creates an Autopilot job also referred to as Autopilot experiment or AutoML job V2. An AutoML job in SageMaker AI is a fully automated process that allows you to build machine learning models with minimal effort and machine learning expertise. When initiating an AutoML job, you provide your data and optionally specify parameters tailored to your use case. SageMaker AI then automates the entire model development lifecycle, including data preprocessing, model training, tuning, and evaluation. AutoML jobs are designed to simplify and accelerate the model building process by automating various tasks and exploring different combinations of machine learning algorithms, data preprocessing techniques, and hyperparameter values. The output of an AutoML job comprises one or more trained models ready for deployment and inference. Additionally, SageMaker AI AutoML jobs generate a candidate model leaderboard, allowing you to select the best-performing model for deployment. For more information about AutoML jobs, see https://docs.aws.amazon.com/sagemaker/latest/dg/autopilot-automate-model-development.html in the SageMaker AI developer guide. AutoML jobs V2 support various problem types such as regression, binary, and multiclass classification with tabular data, text and image classification, time-series forecasting, and fine-tuning of large language models (LLMs) for text generation. CreateAutoMLJobV2 and DescribeAutoMLJobV2 are new versions of CreateAutoMLJob and DescribeAutoMLJob which offer backward compatibility. CreateAutoMLJobV2 can manage tabular problem types identical to those of its previous version CreateAutoMLJob, as well as time-series forecasting, non-tabular problem types such as image or text classification, and text generation (LLMs fine-tuning). Find guidelines about how to migrate a CreateAutoMLJob to CreateAutoMLJobV2 in Migrate a CreateAutoMLJob to CreateAutoMLJobV2. For the list of available problem types supported by CreateAutoMLJobV2, see AutoMLProblemTypeConfig. You can find the best-performing model after you run an AutoML job V2 by calling DescribeAutoMLJobV2. /// /// Parameters: /// - autoMLComputeConfig: Specifies the compute configuration for the AutoML job V2. @@ -708,7 +708,7 @@ public struct SageMaker: AWSService { return try await self.createClusterSchedulerConfig(input, logger: logger) } - /// Creates a Git repository as a resource in your SageMaker account. You can associate the repository with notebook instances so that you can use Git source control for the notebooks you create. The Git repository is a resource in your SageMaker account, so it can be associated with more than one notebook instance, and it persists independently from the lifecycle of any notebook instances it is associated with. The repository can be hosted either in Amazon Web Services CodeCommit or in any other Git repository. + /// Creates a Git repository as a resource in your SageMaker AI account. You can associate the repository with notebook instances so that you can use Git source control for the notebooks you create. The Git repository is a resource in your SageMaker AI account, so it can be associated with more than one notebook instance, and it persists independently from the lifecycle of any notebook instances it is associated with. The repository can be hosted either in Amazon Web Services CodeCommit or in any other Git repository. @Sendable @inlinable public func createCodeRepository(_ input: CreateCodeRepositoryInput, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateCodeRepositoryOutput { @@ -721,7 +721,7 @@ public struct SageMaker: AWSService { logger: logger ) } - /// Creates a Git repository as a resource in your SageMaker account. You can associate the repository with notebook instances so that you can use Git source control for the notebooks you create. The Git repository is a resource in your SageMaker account, so it can be associated with more than one notebook instance, and it persists independently from the lifecycle of any notebook instances it is associated with. The repository can be hosted either in Amazon Web Services CodeCommit or in any other Git repository. + /// Creates a Git repository as a resource in your SageMaker AI account. You can associate the repository with notebook instances so that you can use Git source control for the notebooks you create. The Git repository is a resource in your SageMaker AI account, so it can be associated with more than one notebook instance, and it persists independently from the lifecycle of any notebook instances it is associated with. The repository can be hosted either in Amazon Web Services CodeCommit or in any other Git repository. /// /// Parameters: /// - codeRepositoryName: The name of the Git repository. The name must have 1 to 63 characters. Valid characters are a-z, A-Z, 0-9, and - (hyphen). @@ -743,7 +743,7 @@ public struct SageMaker: AWSService { return try await self.createCodeRepository(input, logger: logger) } - /// Starts a model compilation job. After the model has been compiled, Amazon SageMaker saves the resulting model artifacts to an Amazon Simple Storage Service (Amazon S3) bucket that you specify. If you choose to host your model using Amazon SageMaker hosting services, you can use the resulting model artifacts as part of the model. You can also use the artifacts with Amazon Web Services IoT Greengrass. In that case, deploy them as an ML resource. In the request body, you provide the following: A name for the compilation job Information about the input model artifacts The output location for the compiled model and the device (target) that the model runs on The Amazon Resource Name (ARN) of the IAM role that Amazon SageMaker assumes to perform the model compilation job. You can also provide a Tag to track the model compilation job's resource use and costs. The response body contains the CompilationJobArn for the compiled job. To stop a model compilation job, use StopCompilationJob. To get information about a particular model compilation job, use DescribeCompilationJob. To get information about multiple model compilation jobs, use ListCompilationJobs. + /// Starts a model compilation job. After the model has been compiled, Amazon SageMaker AI saves the resulting model artifacts to an Amazon Simple Storage Service (Amazon S3) bucket that you specify. If you choose to host your model using Amazon SageMaker AI hosting services, you can use the resulting model artifacts as part of the model. You can also use the artifacts with Amazon Web Services IoT Greengrass. In that case, deploy them as an ML resource. In the request body, you provide the following: A name for the compilation job Information about the input model artifacts The output location for the compiled model and the device (target) that the model runs on The Amazon Resource Name (ARN) of the IAM role that Amazon SageMaker AI assumes to perform the model compilation job. You can also provide a Tag to track the model compilation job's resource use and costs. The response body contains the CompilationJobArn for the compiled job. To stop a model compilation job, use StopCompilationJob. To get information about a particular model compilation job, use DescribeCompilationJob. To get information about multiple model compilation jobs, use ListCompilationJobs. @Sendable @inlinable public func createCompilationJob(_ input: CreateCompilationJobRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateCompilationJobResponse { @@ -756,15 +756,15 @@ public struct SageMaker: AWSService { logger: logger ) } - /// Starts a model compilation job. After the model has been compiled, Amazon SageMaker saves the resulting model artifacts to an Amazon Simple Storage Service (Amazon S3) bucket that you specify. If you choose to host your model using Amazon SageMaker hosting services, you can use the resulting model artifacts as part of the model. You can also use the artifacts with Amazon Web Services IoT Greengrass. In that case, deploy them as an ML resource. In the request body, you provide the following: A name for the compilation job Information about the input model artifacts The output location for the compiled model and the device (target) that the model runs on The Amazon Resource Name (ARN) of the IAM role that Amazon SageMaker assumes to perform the model compilation job. You can also provide a Tag to track the model compilation job's resource use and costs. The response body contains the CompilationJobArn for the compiled job. To stop a model compilation job, use StopCompilationJob. To get information about a particular model compilation job, use DescribeCompilationJob. To get information about multiple model compilation jobs, use ListCompilationJobs. + /// Starts a model compilation job. After the model has been compiled, Amazon SageMaker AI saves the resulting model artifacts to an Amazon Simple Storage Service (Amazon S3) bucket that you specify. If you choose to host your model using Amazon SageMaker AI hosting services, you can use the resulting model artifacts as part of the model. You can also use the artifacts with Amazon Web Services IoT Greengrass. In that case, deploy them as an ML resource. In the request body, you provide the following: A name for the compilation job Information about the input model artifacts The output location for the compiled model and the device (target) that the model runs on The Amazon Resource Name (ARN) of the IAM role that Amazon SageMaker AI assumes to perform the model compilation job. You can also provide a Tag to track the model compilation job's resource use and costs. The response body contains the CompilationJobArn for the compiled job. To stop a model compilation job, use StopCompilationJob. To get information about a particular model compilation job, use DescribeCompilationJob. To get information about multiple model compilation jobs, use ListCompilationJobs. /// /// Parameters: /// - compilationJobName: A name for the model compilation job. The name must be unique within the Amazon Web Services Region and within your Amazon Web Services account. /// - inputConfig: Provides information about the location of input model artifacts, the name and shape of the expected data inputs, and the framework in which the model was trained. /// - modelPackageVersionArn: The Amazon Resource Name (ARN) of a versioned model package. Provide either a ModelPackageVersionArn or an InputConfig object in the request syntax. The presence of both objects in the CreateCompilationJob request will return an exception. /// - outputConfig: Provides information about the output location for the compiled model and the target device the model runs on. - /// - roleArn: The Amazon Resource Name (ARN) of an IAM role that enables Amazon SageMaker to perform tasks on your behalf. During model compilation, Amazon SageMaker needs your permission to: Read input data from an S3 bucket Write model artifacts to an S3 bucket Write logs to Amazon CloudWatch Logs Publish metrics to Amazon CloudWatch You grant permissions for all of these tasks to an IAM role. To pass this role to Amazon SageMaker, the caller of this API must have the iam:PassRole permission. For more information, see Amazon SageMaker Roles. - /// - stoppingCondition: Specifies a limit to how long a model compilation job can run. When the job reaches the time limit, Amazon SageMaker ends the compilation job. Use this API to cap model training costs. + /// - roleArn: The Amazon Resource Name (ARN) of an IAM role that enables Amazon SageMaker AI to perform tasks on your behalf. During model compilation, Amazon SageMaker AI needs your permission to: Read input data from an S3 bucket Write model artifacts to an S3 bucket Write logs to Amazon CloudWatch Logs Publish metrics to Amazon CloudWatch You grant permissions for all of these tasks to an IAM role. To pass this role to Amazon SageMaker AI, the caller of this API must have the iam:PassRole permission. For more information, see Amazon SageMaker AI Roles. + /// - stoppingCondition: Specifies a limit to how long a model compilation job can run. When the job reaches the time limit, Amazon SageMaker AI ends the compilation job. Use this API to cap model training costs. /// - tags: An array of key-value pairs. You can use tags to categorize your Amazon Web Services resources in different ways, for example, by purpose, owner, or environment. For more information, see Tagging Amazon Web Services Resources. /// - vpcConfig: A VpcConfig object that specifies the VPC that you want your compilation job to connect to. Control access to your models by configuring the VPC. For more information, see Protect Compilation Jobs by Using an Amazon Virtual Private Cloud. /// - logger: Logger use during operation @@ -884,7 +884,7 @@ public struct SageMaker: AWSService { return try await self.createContext(input, logger: logger) } - /// Creates a definition for a job that monitors data quality and drift. For information about model monitor, see Amazon SageMaker Model Monitor. + /// Creates a definition for a job that monitors data quality and drift. For information about model monitor, see Amazon SageMaker AI Model Monitor. @Sendable @inlinable public func createDataQualityJobDefinition(_ input: CreateDataQualityJobDefinitionRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateDataQualityJobDefinitionResponse { @@ -897,7 +897,7 @@ public struct SageMaker: AWSService { logger: logger ) } - /// Creates a definition for a job that monitors data quality and drift. For information about model monitor, see Amazon SageMaker Model Monitor. + /// Creates a definition for a job that monitors data quality and drift. For information about model monitor, see Amazon SageMaker AI Model Monitor. /// /// Parameters: /// - dataQualityAppSpecification: Specifies the container that runs the monitoring job. @@ -907,7 +907,7 @@ public struct SageMaker: AWSService { /// - jobDefinitionName: The name for the monitoring job definition. /// - jobResources: /// - networkConfig: Specifies networking configuration for the monitoring job. - /// - roleArn: The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker can assume to perform tasks on your behalf. + /// - roleArn: The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker AI can assume to perform tasks on your behalf. /// - stoppingCondition: /// - tags: (Optional) An array of key-value pairs. For more information, see Using Cost Allocation Tags in the Amazon Web Services Billing and Cost Management User Guide. /// - logger: Logger use during operation @@ -984,7 +984,7 @@ public struct SageMaker: AWSService { return try await self.createDeviceFleet(input, logger: logger) } - /// Creates a Domain. A domain consists of an associated Amazon Elastic File System volume, a list of authorized users, and a variety of security, application, policy, and Amazon Virtual Private Cloud (VPC) configurations. Users within a domain can share notebook files and other artifacts with each other. EFS storage When a domain is created, an EFS volume is created for use by all of the users within the domain. Each user receives a private home directory within the EFS volume for notebooks, Git repositories, and data files. SageMaker uses the Amazon Web Services Key Management Service (Amazon Web Services KMS) to encrypt the EFS volume attached to the domain with an Amazon Web Services managed key by default. For more control, you can specify a customer managed key. For more information, see Protect Data at Rest Using Encryption. VPC configuration All traffic between the domain and the Amazon EFS volume is through the specified VPC and subnets. For other traffic, you can specify the AppNetworkAccessType parameter. AppNetworkAccessType corresponds to the network access type that you choose when you onboard to the domain. The following options are available: PublicInternetOnly - Non-EFS traffic goes through a VPC managed by Amazon SageMaker, which allows internet access. This is the default value. VpcOnly - All traffic is through the specified VPC and subnets. Internet access is disabled by default. To allow internet access, you must specify a NAT gateway. When internet access is disabled, you won't be able to run a Amazon SageMaker Studio notebook or to train or host models unless your VPC has an interface endpoint to the SageMaker API and runtime or a NAT gateway and your security groups allow outbound connections. NFS traffic over TCP on port 2049 needs to be allowed in both inbound and outbound rules in order to launch a Amazon SageMaker Studio app successfully. For more information, see Connect Amazon SageMaker Studio Notebooks to Resources in a VPC. + /// Creates a Domain. A domain consists of an associated Amazon Elastic File System volume, a list of authorized users, and a variety of security, application, policy, and Amazon Virtual Private Cloud (VPC) configurations. Users within a domain can share notebook files and other artifacts with each other. EFS storage When a domain is created, an EFS volume is created for use by all of the users within the domain. Each user receives a private home directory within the EFS volume for notebooks, Git repositories, and data files. SageMaker AI uses the Amazon Web Services Key Management Service (Amazon Web Services KMS) to encrypt the EFS volume attached to the domain with an Amazon Web Services managed key by default. For more control, you can specify a customer managed key. For more information, see Protect Data at Rest Using Encryption. VPC configuration All traffic between the domain and the Amazon EFS volume is through the specified VPC and subnets. For other traffic, you can specify the AppNetworkAccessType parameter. AppNetworkAccessType corresponds to the network access type that you choose when you onboard to the domain. The following options are available: PublicInternetOnly - Non-EFS traffic goes through a VPC managed by Amazon SageMaker AI, which allows internet access. This is the default value. VpcOnly - All traffic is through the specified VPC and subnets. Internet access is disabled by default. To allow internet access, you must specify a NAT gateway. When internet access is disabled, you won't be able to run a Amazon SageMaker AI Studio notebook or to train or host models unless your VPC has an interface endpoint to the SageMaker AI API and runtime or a NAT gateway and your security groups allow outbound connections. NFS traffic over TCP on port 2049 needs to be allowed in both inbound and outbound rules in order to launch a Amazon SageMaker AI Studio app successfully. For more information, see Connect Amazon SageMaker AI Studio Notebooks to Resources in a VPC. @Sendable @inlinable public func createDomain(_ input: CreateDomainRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateDomainResponse { @@ -997,17 +997,17 @@ public struct SageMaker: AWSService { logger: logger ) } - /// Creates a Domain. A domain consists of an associated Amazon Elastic File System volume, a list of authorized users, and a variety of security, application, policy, and Amazon Virtual Private Cloud (VPC) configurations. Users within a domain can share notebook files and other artifacts with each other. EFS storage When a domain is created, an EFS volume is created for use by all of the users within the domain. Each user receives a private home directory within the EFS volume for notebooks, Git repositories, and data files. SageMaker uses the Amazon Web Services Key Management Service (Amazon Web Services KMS) to encrypt the EFS volume attached to the domain with an Amazon Web Services managed key by default. For more control, you can specify a customer managed key. For more information, see Protect Data at Rest Using Encryption. VPC configuration All traffic between the domain and the Amazon EFS volume is through the specified VPC and subnets. For other traffic, you can specify the AppNetworkAccessType parameter. AppNetworkAccessType corresponds to the network access type that you choose when you onboard to the domain. The following options are available: PublicInternetOnly - Non-EFS traffic goes through a VPC managed by Amazon SageMaker, which allows internet access. This is the default value. VpcOnly - All traffic is through the specified VPC and subnets. Internet access is disabled by default. To allow internet access, you must specify a NAT gateway. When internet access is disabled, you won't be able to run a Amazon SageMaker Studio notebook or to train or host models unless your VPC has an interface endpoint to the SageMaker API and runtime or a NAT gateway and your security groups allow outbound connections. NFS traffic over TCP on port 2049 needs to be allowed in both inbound and outbound rules in order to launch a Amazon SageMaker Studio app successfully. For more information, see Connect Amazon SageMaker Studio Notebooks to Resources in a VPC. + /// Creates a Domain. A domain consists of an associated Amazon Elastic File System volume, a list of authorized users, and a variety of security, application, policy, and Amazon Virtual Private Cloud (VPC) configurations. Users within a domain can share notebook files and other artifacts with each other. EFS storage When a domain is created, an EFS volume is created for use by all of the users within the domain. Each user receives a private home directory within the EFS volume for notebooks, Git repositories, and data files. SageMaker AI uses the Amazon Web Services Key Management Service (Amazon Web Services KMS) to encrypt the EFS volume attached to the domain with an Amazon Web Services managed key by default. For more control, you can specify a customer managed key. For more information, see Protect Data at Rest Using Encryption. VPC configuration All traffic between the domain and the Amazon EFS volume is through the specified VPC and subnets. For other traffic, you can specify the AppNetworkAccessType parameter. AppNetworkAccessType corresponds to the network access type that you choose when you onboard to the domain. The following options are available: PublicInternetOnly - Non-EFS traffic goes through a VPC managed by Amazon SageMaker AI, which allows internet access. This is the default value. VpcOnly - All traffic is through the specified VPC and subnets. Internet access is disabled by default. To allow internet access, you must specify a NAT gateway. When internet access is disabled, you won't be able to run a Amazon SageMaker AI Studio notebook or to train or host models unless your VPC has an interface endpoint to the SageMaker AI API and runtime or a NAT gateway and your security groups allow outbound connections. NFS traffic over TCP on port 2049 needs to be allowed in both inbound and outbound rules in order to launch a Amazon SageMaker AI Studio app successfully. For more information, see Connect Amazon SageMaker AI Studio Notebooks to Resources in a VPC. /// /// Parameters: - /// - appNetworkAccessType: Specifies the VPC used for non-EFS traffic. The default value is PublicInternetOnly. PublicInternetOnly - Non-EFS traffic is through a VPC managed by Amazon SageMaker, which allows direct internet access VpcOnly - All traffic is through the specified VPC and subnets + /// - appNetworkAccessType: Specifies the VPC used for non-EFS traffic. The default value is PublicInternetOnly. PublicInternetOnly - Non-EFS traffic is through a VPC managed by Amazon SageMaker AI, which allows direct internet access VpcOnly - All traffic is through the specified VPC and subnets /// - appSecurityGroupManagement: The entity that creates and manages the required security groups for inter-app communication in VPCOnly mode. Required when CreateDomain.AppNetworkAccessType is VPCOnly and DomainSettings.RStudioServerProDomainSettings.DomainExecutionRoleArn is provided. If setting up the domain for use with RStudio, this value must be set to Service. /// - authMode: The mode of authentication that members use to access the domain. /// - defaultSpaceSettings: The default settings for shared spaces that users create in the domain. /// - defaultUserSettings: The default settings to use to create a user profile when UserSettings isn't specified in the call to the CreateUserProfile API. SecurityGroups is aggregated when specified in both calls. For all other settings in UserSettings, the values specified in CreateUserProfile take precedence over those specified in CreateDomain. /// - domainName: A name for the domain. /// - domainSettings: A collection of Domain settings. - /// - kmsKeyId: SageMaker uses Amazon Web Services KMS to encrypt EFS and EBS volumes attached to the domain with an Amazon Web Services managed key by default. For more control, specify a customer managed key. + /// - kmsKeyId: SageMaker AI uses Amazon Web Services KMS to encrypt EFS and EBS volumes attached to the domain with an Amazon Web Services managed key by default. For more control, specify a customer managed key. /// - subnetIds: The VPC subnets that the domain uses for communication. /// - tagPropagation: Indicates whether custom tag propagation is supported for the domain. Defaults to DISABLED. /// - tags: Tags to associated with the Domain. Each tag consists of a key and an optional value. Tag keys must be unique per resource. Tags are searchable using the Search API. Tags that you specify for the Domain are also added to all Apps that the Domain launches. @@ -1227,7 +1227,7 @@ public struct SageMaker: AWSService { /// - dataCaptureConfig: /// - enableNetworkIsolation: Sets whether all model containers deployed to the endpoint are isolated. If they are, no inbound or outbound network calls can be made to or from the model containers. /// - endpointConfigName: The name of the endpoint configuration. You specify this name in a CreateEndpoint request. - /// - executionRoleArn: The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker can assume to perform actions on your behalf. For more information, see SageMaker Roles. To be able to pass this role to Amazon SageMaker, the caller of this action must have the iam:PassRole permission. + /// - executionRoleArn: The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker AI can assume to perform actions on your behalf. For more information, see SageMaker AI Roles. To be able to pass this role to Amazon SageMaker AI, the caller of this action must have the iam:PassRole permission. /// - explainerConfig: A member of CreateEndpointConfig that enables explainers. /// - kmsKeyId: The Amazon Resource Name (ARN) of a Amazon Web Services Key Management Service key that SageMaker uses to encrypt data on the storage volume attached to the ML compute instance that hosts the endpoint. The KmsKeyId can be any of the following formats: Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab Key ARN: arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab Alias name: alias/ExampleAlias Alias name ARN: arn:aws:kms:us-west-2:111122223333:alias/ExampleAlias The KMS key policy must grant permission to the IAM role that you specify in your CreateEndpoint, UpdateEndpoint requests. For more information, refer to the Amazon Web Services Key Management Service section Using Key Policies in Amazon Web Services KMS Certain Nitro-based instances include local storage, dependent on the instance type. Local storage volumes are encrypted using a hardware module on the instance. You can't request a KmsKeyId when using an instance type with local storage. If any of the models that you specify in the ProductionVariants parameter use nitro-based instances with local storage, do not specify a value for the KmsKeyId parameter. If you specify a value for KmsKeyId when using any nitro-based instances with local storage, the call to CreateEndpointConfig fails. For a list of instance types that support local instance storage, see Instance Store Volumes. For more information about local instance storage encryption, see SSD Instance Store Volumes. /// - productionVariants: An array of ProductionVariant objects, one for each model that you want to host at this endpoint. @@ -1574,7 +1574,7 @@ public struct SageMaker: AWSService { return try await self.createHyperParameterTuningJob(input, logger: logger) } - /// Creates a custom SageMaker image. A SageMaker image is a set of image versions. Each image version represents a container image stored in Amazon ECR. For more information, see Bring your own SageMaker image. + /// Creates a custom SageMaker AI image. A SageMaker AI image is a set of image versions. Each image version represents a container image stored in Amazon ECR. For more information, see Bring your own SageMaker AI image. @Sendable @inlinable public func createImage(_ input: CreateImageRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateImageResponse { @@ -1587,13 +1587,13 @@ public struct SageMaker: AWSService { logger: logger ) } - /// Creates a custom SageMaker image. A SageMaker image is a set of image versions. Each image version represents a container image stored in Amazon ECR. For more information, see Bring your own SageMaker image. + /// Creates a custom SageMaker AI image. A SageMaker AI image is a set of image versions. Each image version represents a container image stored in Amazon ECR. For more information, see Bring your own SageMaker AI image. /// /// Parameters: /// - description: The description of the image. /// - displayName: The display name of the image. If not provided, ImageName is displayed. /// - imageName: The name of the image. Must be unique to your account. - /// - roleArn: The ARN of an IAM role that enables Amazon SageMaker to perform tasks on your behalf. + /// - roleArn: The ARN of an IAM role that enables Amazon SageMaker AI to perform tasks on your behalf. /// - tags: A list of tags to apply to the image. /// - logger: Logger use during operation @inlinable @@ -1615,7 +1615,7 @@ public struct SageMaker: AWSService { return try await self.createImage(input, logger: logger) } - /// Creates a version of the SageMaker image specified by ImageName. The version represents the Amazon ECR container image specified by BaseImage. + /// Creates a version of the SageMaker AI image specified by ImageName. The version represents the Amazon ECR container image specified by BaseImage. @Sendable @inlinable public func createImageVersion(_ input: CreateImageVersionRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateImageVersionResponse { @@ -1628,7 +1628,7 @@ public struct SageMaker: AWSService { logger: logger ) } - /// Creates a version of the SageMaker image specified by ImageName. The version represents the Amazon ECR container image specified by BaseImage. + /// Creates a version of the SageMaker AI image specified by ImageName. The version represents the Amazon ECR container image specified by BaseImage. /// /// Parameters: /// - aliases: A list of aliases created with the image version. @@ -1636,7 +1636,7 @@ public struct SageMaker: AWSService { /// - clientToken: A unique ID. If not specified, the Amazon Web Services CLI and Amazon Web Services SDKs, such as the SDK for Python (Boto3), add a unique value to the call. /// - horovod: Indicates Horovod compatibility. /// - imageName: The ImageName of the Image to create a version of. - /// - jobType: Indicates SageMaker job type compatibility. TRAINING: The image version is compatible with SageMaker training jobs. INFERENCE: The image version is compatible with SageMaker inference jobs. NOTEBOOK_KERNEL: The image version is compatible with SageMaker notebook kernels. + /// - jobType: Indicates SageMaker AI job type compatibility. TRAINING: The image version is compatible with SageMaker AI training jobs. INFERENCE: The image version is compatible with SageMaker AI inference jobs. NOTEBOOK_KERNEL: The image version is compatible with SageMaker AI notebook kernels. /// - mlFramework: The machine learning framework vended in the image version. /// - processor: Indicates CPU or GPU compatibility. CPU: The image version is compatible with CPU. GPU: The image version is compatible with GPU. /// - programmingLang: The supported programming language and its version. @@ -1674,7 +1674,7 @@ public struct SageMaker: AWSService { return try await self.createImageVersion(input, logger: logger) } - /// Creates an inference component, which is a SageMaker hosting object that you can use to deploy a model to an endpoint. In the inference component settings, you specify the model, the endpoint, and how the model utilizes the resources that the endpoint hosts. You can optimize resource utilization by tailoring how the required CPU cores, accelerators, and memory are allocated. You can deploy multiple inference components to an endpoint, where each inference component contains one model and the resource utilization needs for that individual model. After you deploy an inference component, you can directly invoke the associated model when you use the InvokeEndpoint API action. + /// Creates an inference component, which is a SageMaker AI hosting object that you can use to deploy a model to an endpoint. In the inference component settings, you specify the model, the endpoint, and how the model utilizes the resources that the endpoint hosts. You can optimize resource utilization by tailoring how the required CPU cores, accelerators, and memory are allocated. You can deploy multiple inference components to an endpoint, where each inference component contains one model and the resource utilization needs for that individual model. After you deploy an inference component, you can directly invoke the associated model when you use the InvokeEndpoint API action. @Sendable @inlinable public func createInferenceComponent(_ input: CreateInferenceComponentInput, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateInferenceComponentOutput { @@ -1687,7 +1687,7 @@ public struct SageMaker: AWSService { logger: logger ) } - /// Creates an inference component, which is a SageMaker hosting object that you can use to deploy a model to an endpoint. In the inference component settings, you specify the model, the endpoint, and how the model utilizes the resources that the endpoint hosts. You can optimize resource utilization by tailoring how the required CPU cores, accelerators, and memory are allocated. You can deploy multiple inference components to an endpoint, where each inference component contains one model and the resource utilization needs for that individual model. After you deploy an inference component, you can directly invoke the associated model when you use the InvokeEndpoint API action. + /// Creates an inference component, which is a SageMaker AI hosting object that you can use to deploy a model to an endpoint. In the inference component settings, you specify the model, the endpoint, and how the model utilizes the resources that the endpoint hosts. You can optimize resource utilization by tailoring how the required CPU cores, accelerators, and memory are allocated. You can deploy multiple inference components to an endpoint, where each inference component contains one model and the resource utilization needs for that individual model. After you deploy an inference component, you can directly invoke the associated model when you use the InvokeEndpoint API action. /// /// Parameters: /// - endpointName: The name of an existing endpoint where you host the inference component. @@ -2006,7 +2006,7 @@ public struct SageMaker: AWSService { /// - modelBiasJobInput: Inputs for the model bias job. /// - modelBiasJobOutputConfig: /// - networkConfig: Networking options for a model bias job. - /// - roleArn: The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker can assume to perform tasks on your behalf. + /// - roleArn: The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker AI can assume to perform tasks on your behalf. /// - stoppingCondition: /// - tags: (Optional) An array of key-value pairs. For more information, see Using Cost Allocation Tags in the Amazon Web Services Billing and Cost Management User Guide. /// - logger: Logger use during operation @@ -2141,7 +2141,7 @@ public struct SageMaker: AWSService { /// - modelExplainabilityJobInput: Inputs for the model explainability job. /// - modelExplainabilityJobOutputConfig: /// - networkConfig: Networking options for a model explainability job. - /// - roleArn: The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker can assume to perform tasks on your behalf. + /// - roleArn: The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker AI can assume to perform tasks on your behalf. /// - stoppingCondition: /// - tags: (Optional) An array of key-value pairs. For more information, see Using Cost Allocation Tags in the Amazon Web Services Billing and Cost Management User Guide. /// - logger: Logger use during operation @@ -2304,7 +2304,7 @@ public struct SageMaker: AWSService { return try await self.createModelPackageGroup(input, logger: logger) } - /// Creates a definition for a job that monitors model quality and drift. For information about model monitor, see Amazon SageMaker Model Monitor. + /// Creates a definition for a job that monitors model quality and drift. For information about model monitor, see Amazon SageMaker AI Model Monitor. @Sendable @inlinable public func createModelQualityJobDefinition(_ input: CreateModelQualityJobDefinitionRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateModelQualityJobDefinitionResponse { @@ -2317,7 +2317,7 @@ public struct SageMaker: AWSService { logger: logger ) } - /// Creates a definition for a job that monitors model quality and drift. For information about model monitor, see Amazon SageMaker Model Monitor. + /// Creates a definition for a job that monitors model quality and drift. For information about model monitor, see Amazon SageMaker AI Model Monitor. /// /// Parameters: /// - jobDefinitionName: The name of the monitoring job definition. @@ -2327,7 +2327,7 @@ public struct SageMaker: AWSService { /// - modelQualityJobInput: A list of the inputs that are monitored. Currently endpoints are supported. /// - modelQualityJobOutputConfig: /// - networkConfig: Specifies the network configuration for the monitoring job. - /// - roleArn: The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker can assume to perform tasks on your behalf. + /// - roleArn: The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker AI can assume to perform tasks on your behalf. /// - stoppingCondition: /// - tags: (Optional) An array of key-value pairs. For more information, see Using Cost Allocation Tags in the Amazon Web Services Billing and Cost Management User Guide. /// - logger: Logger use during operation @@ -2360,7 +2360,7 @@ public struct SageMaker: AWSService { return try await self.createModelQualityJobDefinition(input, logger: logger) } - /// Creates a schedule that regularly starts Amazon SageMaker Processing Jobs to monitor the data captured for an Amazon SageMaker Endpoint. + /// Creates a schedule that regularly starts Amazon SageMaker AI Processing Jobs to monitor the data captured for an Amazon SageMaker AI Endpoint. @Sendable @inlinable public func createMonitoringSchedule(_ input: CreateMonitoringScheduleRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateMonitoringScheduleResponse { @@ -2373,7 +2373,7 @@ public struct SageMaker: AWSService { logger: logger ) } - /// Creates a schedule that regularly starts Amazon SageMaker Processing Jobs to monitor the data captured for an Amazon SageMaker Endpoint. + /// Creates a schedule that regularly starts Amazon SageMaker AI Processing Jobs to monitor the data captured for an Amazon SageMaker AI Endpoint. /// /// Parameters: /// - monitoringScheduleConfig: The configuration object that specifies the monitoring schedule and defines the monitoring job. @@ -2395,7 +2395,7 @@ public struct SageMaker: AWSService { return try await self.createMonitoringSchedule(input, logger: logger) } - /// Creates an SageMaker notebook instance. A notebook instance is a machine learning (ML) compute instance running on a Jupyter notebook. In a CreateNotebookInstance request, specify the type of ML compute instance that you want to run. SageMaker launches the instance, installs common libraries that you can use to explore datasets for model training, and attaches an ML storage volume to the notebook instance. SageMaker also provides a set of example notebooks. Each notebook demonstrates how to use SageMaker with a specific algorithm or with a machine learning framework. After receiving the request, SageMaker does the following: Creates a network interface in the SageMaker VPC. (Option) If you specified SubnetId, SageMaker creates a network interface in your own VPC, which is inferred from the subnet ID that you provide in the input. When creating this network interface, SageMaker attaches the security group that you specified in the request to the network interface that it creates in your VPC. Launches an EC2 instance of the type specified in the request in the SageMaker VPC. If you specified SubnetId of your VPC, SageMaker specifies both network interfaces when launching this instance. This enables inbound traffic from your own VPC to the notebook instance, assuming that the security groups allow it. After creating the notebook instance, SageMaker returns its Amazon Resource Name (ARN). You can't change the name of a notebook instance after you create it. After SageMaker creates the notebook instance, you can connect to the Jupyter server and work in Jupyter notebooks. For example, you can write code to explore a dataset that you can use for model training, train a model, host models by creating SageMaker endpoints, and validate hosted models. For more information, see How It Works. + /// Creates an SageMaker AI notebook instance. A notebook instance is a machine learning (ML) compute instance running on a Jupyter notebook. In a CreateNotebookInstance request, specify the type of ML compute instance that you want to run. SageMaker AI launches the instance, installs common libraries that you can use to explore datasets for model training, and attaches an ML storage volume to the notebook instance. SageMaker AI also provides a set of example notebooks. Each notebook demonstrates how to use SageMaker AI with a specific algorithm or with a machine learning framework. After receiving the request, SageMaker AI does the following: Creates a network interface in the SageMaker AI VPC. (Option) If you specified SubnetId, SageMaker AI creates a network interface in your own VPC, which is inferred from the subnet ID that you provide in the input. When creating this network interface, SageMaker AI attaches the security group that you specified in the request to the network interface that it creates in your VPC. Launches an EC2 instance of the type specified in the request in the SageMaker AI VPC. If you specified SubnetId of your VPC, SageMaker AI specifies both network interfaces when launching this instance. This enables inbound traffic from your own VPC to the notebook instance, assuming that the security groups allow it. After creating the notebook instance, SageMaker AI returns its Amazon Resource Name (ARN). You can't change the name of a notebook instance after you create it. After SageMaker AI creates the notebook instance, you can connect to the Jupyter server and work in Jupyter notebooks. For example, you can write code to explore a dataset that you can use for model training, train a model, host models by creating SageMaker AI endpoints, and validate hosted models. For more information, see How It Works. @Sendable @inlinable public func createNotebookInstance(_ input: CreateNotebookInstanceInput, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateNotebookInstanceOutput { @@ -2408,20 +2408,20 @@ public struct SageMaker: AWSService { logger: logger ) } - /// Creates an SageMaker notebook instance. A notebook instance is a machine learning (ML) compute instance running on a Jupyter notebook. In a CreateNotebookInstance request, specify the type of ML compute instance that you want to run. SageMaker launches the instance, installs common libraries that you can use to explore datasets for model training, and attaches an ML storage volume to the notebook instance. SageMaker also provides a set of example notebooks. Each notebook demonstrates how to use SageMaker with a specific algorithm or with a machine learning framework. After receiving the request, SageMaker does the following: Creates a network interface in the SageMaker VPC. (Option) If you specified SubnetId, SageMaker creates a network interface in your own VPC, which is inferred from the subnet ID that you provide in the input. When creating this network interface, SageMaker attaches the security group that you specified in the request to the network interface that it creates in your VPC. Launches an EC2 instance of the type specified in the request in the SageMaker VPC. If you specified SubnetId of your VPC, SageMaker specifies both network interfaces when launching this instance. This enables inbound traffic from your own VPC to the notebook instance, assuming that the security groups allow it. After creating the notebook instance, SageMaker returns its Amazon Resource Name (ARN). You can't change the name of a notebook instance after you create it. After SageMaker creates the notebook instance, you can connect to the Jupyter server and work in Jupyter notebooks. For example, you can write code to explore a dataset that you can use for model training, train a model, host models by creating SageMaker endpoints, and validate hosted models. For more information, see How It Works. + /// Creates an SageMaker AI notebook instance. A notebook instance is a machine learning (ML) compute instance running on a Jupyter notebook. In a CreateNotebookInstance request, specify the type of ML compute instance that you want to run. SageMaker AI launches the instance, installs common libraries that you can use to explore datasets for model training, and attaches an ML storage volume to the notebook instance. SageMaker AI also provides a set of example notebooks. Each notebook demonstrates how to use SageMaker AI with a specific algorithm or with a machine learning framework. After receiving the request, SageMaker AI does the following: Creates a network interface in the SageMaker AI VPC. (Option) If you specified SubnetId, SageMaker AI creates a network interface in your own VPC, which is inferred from the subnet ID that you provide in the input. When creating this network interface, SageMaker AI attaches the security group that you specified in the request to the network interface that it creates in your VPC. Launches an EC2 instance of the type specified in the request in the SageMaker AI VPC. If you specified SubnetId of your VPC, SageMaker AI specifies both network interfaces when launching this instance. This enables inbound traffic from your own VPC to the notebook instance, assuming that the security groups allow it. After creating the notebook instance, SageMaker AI returns its Amazon Resource Name (ARN). You can't change the name of a notebook instance after you create it. After SageMaker AI creates the notebook instance, you can connect to the Jupyter server and work in Jupyter notebooks. For example, you can write code to explore a dataset that you can use for model training, train a model, host models by creating SageMaker AI endpoints, and validate hosted models. For more information, see How It Works. /// /// Parameters: /// - acceleratorTypes: This parameter is no longer supported. Elastic Inference (EI) is no longer available. This parameter was used to specify a list of EI instance types to associate with this notebook instance. - /// - additionalCodeRepositories: An array of up to three Git repositories to associate with the notebook instance. These can be either the names of Git repositories stored as resources in your account, or the URL of Git repositories in Amazon Web Services CodeCommit or in any other Git repository. These repositories are cloned at the same level as the default repository of your notebook instance. For more information, see Associating Git Repositories with SageMaker Notebook Instances. - /// - defaultCodeRepository: A Git repository to associate with the notebook instance as its default code repository. This can be either the name of a Git repository stored as a resource in your account, or the URL of a Git repository in Amazon Web Services CodeCommit or in any other Git repository. When you open a notebook instance, it opens in the directory that contains this repository. For more information, see Associating Git Repositories with SageMaker Notebook Instances. - /// - directInternetAccess: Sets whether SageMaker provides internet access to the notebook instance. If you set this to Disabled this notebook instance is able to access resources only in your VPC, and is not be able to connect to SageMaker training and endpoint services unless you configure a NAT Gateway in your VPC. For more information, see Notebook Instances Are Internet-Enabled by Default. You can set the value of this parameter to Disabled only if you set a value for the SubnetId parameter. + /// - additionalCodeRepositories: An array of up to three Git repositories to associate with the notebook instance. These can be either the names of Git repositories stored as resources in your account, or the URL of Git repositories in Amazon Web Services CodeCommit or in any other Git repository. These repositories are cloned at the same level as the default repository of your notebook instance. For more information, see Associating Git Repositories with SageMaker AI Notebook Instances. + /// - defaultCodeRepository: A Git repository to associate with the notebook instance as its default code repository. This can be either the name of a Git repository stored as a resource in your account, or the URL of a Git repository in Amazon Web Services CodeCommit or in any other Git repository. When you open a notebook instance, it opens in the directory that contains this repository. For more information, see Associating Git Repositories with SageMaker AI Notebook Instances. + /// - directInternetAccess: Sets whether SageMaker AI provides internet access to the notebook instance. If you set this to Disabled this notebook instance is able to access resources only in your VPC, and is not be able to connect to SageMaker AI training and endpoint services unless you configure a NAT Gateway in your VPC. For more information, see Notebook Instances Are Internet-Enabled by Default. You can set the value of this parameter to Disabled only if you set a value for the SubnetId parameter. /// - instanceMetadataServiceConfiguration: Information on the IMDS configuration of the notebook instance /// - instanceType: The type of ML compute instance to launch for the notebook instance. - /// - kmsKeyId: The Amazon Resource Name (ARN) of a Amazon Web Services Key Management Service key that SageMaker uses to encrypt data on the storage volume attached to your notebook instance. The KMS key you provide must be enabled. For information, see Enabling and Disabling Keys in the Amazon Web Services Key Management Service Developer Guide. + /// - kmsKeyId: The Amazon Resource Name (ARN) of a Amazon Web Services Key Management Service key that SageMaker AI uses to encrypt data on the storage volume attached to your notebook instance. The KMS key you provide must be enabled. For information, see Enabling and Disabling Keys in the Amazon Web Services Key Management Service Developer Guide. /// - lifecycleConfigName: The name of a lifecycle configuration to associate with the notebook instance. For information about lifestyle configurations, see Step 2.1: (Optional) Customize a Notebook Instance. /// - notebookInstanceName: The name of the new notebook instance. /// - platformIdentifier: The platform identifier of the notebook instance runtime environment. - /// - roleArn: When you send any requests to Amazon Web Services resources from the notebook instance, SageMaker assumes this role to perform tasks on your behalf. You must grant this role necessary permissions so SageMaker can perform these tasks. The policy must allow the SageMaker service principal (sagemaker.amazonaws.com) permissions to assume this role. For more information, see SageMaker Roles. To be able to pass this role to SageMaker, the caller of this API must have the iam:PassRole permission. + /// - roleArn: When you send any requests to Amazon Web Services resources from the notebook instance, SageMaker AI assumes this role to perform tasks on your behalf. You must grant this role necessary permissions so SageMaker AI can perform these tasks. The policy must allow the SageMaker AI service principal (sagemaker.amazonaws.com) permissions to assume this role. For more information, see SageMaker AI Roles. To be able to pass this role to SageMaker AI, the caller of this API must have the iam:PassRole permission. /// - rootAccess: Whether root access is enabled or disabled for users of the notebook instance. The default value is Enabled. Lifecycle configurations need root access to be able to set up a notebook instance. Because of this, lifecycle configurations associated with a notebook instance always run with root access even if you disable root access for users. /// - securityGroupIds: The VPC security group IDs, in the form sg-xxxxxxxx. The security groups must be for the same VPC as specified in the subnet. /// - subnetId: The ID of the subnet in a VPC to which you would like to have a connectivity from your ML compute instance. @@ -2526,7 +2526,7 @@ public struct SageMaker: AWSService { /// - optimizationEnvironment: The environment variables to set in the model container. /// - optimizationJobName: A custom name for the new optimization job. /// - outputConfig: Details for where to store the optimized model that you create with the optimization job. - /// - roleArn: The Amazon Resource Name (ARN) of an IAM role that enables Amazon SageMaker to perform tasks on your behalf. During model optimization, Amazon SageMaker needs your permission to: Read input data from an S3 bucket Write model artifacts to an S3 bucket Write logs to Amazon CloudWatch Logs Publish metrics to Amazon CloudWatch You grant permissions for all of these tasks to an IAM role. To pass this role to Amazon SageMaker, the caller of this API must have the iam:PassRole permission. For more information, see Amazon SageMaker Roles. + /// - roleArn: The Amazon Resource Name (ARN) of an IAM role that enables Amazon SageMaker AI to perform tasks on your behalf. During model optimization, Amazon SageMaker AI needs your permission to: Read input data from an S3 bucket Write model artifacts to an S3 bucket Write logs to Amazon CloudWatch Logs Publish metrics to Amazon CloudWatch You grant permissions for all of these tasks to an IAM role. To pass this role to Amazon SageMaker AI, the caller of this API must have the iam:PassRole permission. For more information, see Amazon SageMaker AI Roles. /// - stoppingCondition: /// - tags: A list of key-value pairs associated with the optimization job. For more information, see Tagging Amazon Web Services resources in the Amazon Web Services General Reference Guide. /// - vpcConfig: A VPC in Amazon VPC that your optimized model has access to. @@ -2704,7 +2704,7 @@ public struct SageMaker: AWSService { return try await self.createPipeline(input, logger: logger) } - /// Creates a URL for a specified UserProfile in a Domain. When accessed in a web browser, the user will be automatically signed in to the domain, and granted access to all of the Apps and files associated with the Domain's Amazon Elastic File System volume. This operation can only be called when the authentication mode equals IAM. The IAM role or user passed to this API defines the permissions to access the app. Once the presigned URL is created, no additional permission is required to access this URL. IAM authorization policies for this API are also enforced for every HTTP request and WebSocket frame that attempts to connect to the app. You can restrict access to this API and to the URL that it returns to a list of IP addresses, Amazon VPCs or Amazon VPC Endpoints that you specify. For more information, see Connect to Amazon SageMaker Studio Through an Interface VPC Endpoint . The URL that you get from a call to CreatePresignedDomainUrl has a default timeout of 5 minutes. You can configure this value using ExpiresInSeconds. If you try to use the URL after the timeout limit expires, you are directed to the Amazon Web Services console sign-in page. The JupyterLab session default expiration time is 12 hours. You can configure this value using SessionExpirationDurationInSeconds. + /// Creates a URL for a specified UserProfile in a Domain. When accessed in a web browser, the user will be automatically signed in to the domain, and granted access to all of the Apps and files associated with the Domain's Amazon Elastic File System volume. This operation can only be called when the authentication mode equals IAM. The IAM role or user passed to this API defines the permissions to access the app. Once the presigned URL is created, no additional permission is required to access this URL. IAM authorization policies for this API are also enforced for every HTTP request and WebSocket frame that attempts to connect to the app. You can restrict access to this API and to the URL that it returns to a list of IP addresses, Amazon VPCs or Amazon VPC Endpoints that you specify. For more information, see Connect to Amazon SageMaker AI Studio Through an Interface VPC Endpoint . The URL that you get from a call to CreatePresignedDomainUrl has a default timeout of 5 minutes. You can configure this value using ExpiresInSeconds. If you try to use the URL after the timeout limit expires, you are directed to the Amazon Web Services console sign-in page. The JupyterLab session default expiration time is 12 hours. You can configure this value using SessionExpirationDurationInSeconds. @Sendable @inlinable public func createPresignedDomainUrl(_ input: CreatePresignedDomainUrlRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreatePresignedDomainUrlResponse { @@ -2717,7 +2717,7 @@ public struct SageMaker: AWSService { logger: logger ) } - /// Creates a URL for a specified UserProfile in a Domain. When accessed in a web browser, the user will be automatically signed in to the domain, and granted access to all of the Apps and files associated with the Domain's Amazon Elastic File System volume. This operation can only be called when the authentication mode equals IAM. The IAM role or user passed to this API defines the permissions to access the app. Once the presigned URL is created, no additional permission is required to access this URL. IAM authorization policies for this API are also enforced for every HTTP request and WebSocket frame that attempts to connect to the app. You can restrict access to this API and to the URL that it returns to a list of IP addresses, Amazon VPCs or Amazon VPC Endpoints that you specify. For more information, see Connect to Amazon SageMaker Studio Through an Interface VPC Endpoint . The URL that you get from a call to CreatePresignedDomainUrl has a default timeout of 5 minutes. You can configure this value using ExpiresInSeconds. If you try to use the URL after the timeout limit expires, you are directed to the Amazon Web Services console sign-in page. The JupyterLab session default expiration time is 12 hours. You can configure this value using SessionExpirationDurationInSeconds. + /// Creates a URL for a specified UserProfile in a Domain. When accessed in a web browser, the user will be automatically signed in to the domain, and granted access to all of the Apps and files associated with the Domain's Amazon Elastic File System volume. This operation can only be called when the authentication mode equals IAM. The IAM role or user passed to this API defines the permissions to access the app. Once the presigned URL is created, no additional permission is required to access this URL. IAM authorization policies for this API are also enforced for every HTTP request and WebSocket frame that attempts to connect to the app. You can restrict access to this API and to the URL that it returns to a list of IP addresses, Amazon VPCs or Amazon VPC Endpoints that you specify. For more information, see Connect to Amazon SageMaker AI Studio Through an Interface VPC Endpoint . The URL that you get from a call to CreatePresignedDomainUrl has a default timeout of 5 minutes. You can configure this value using ExpiresInSeconds. If you try to use the URL after the timeout limit expires, you are directed to the Amazon Web Services console sign-in page. The JupyterLab session default expiration time is 12 hours. You can configure this value using SessionExpirationDurationInSeconds. /// /// Parameters: /// - domainId: The domain ID. @@ -2783,7 +2783,7 @@ public struct SageMaker: AWSService { return try await self.createPresignedMlflowTrackingServerUrl(input, logger: logger) } - /// Returns a URL that you can use to connect to the Jupyter server from a notebook instance. In the SageMaker console, when you choose Open next to a notebook instance, SageMaker opens a new tab showing the Jupyter server home page from the notebook instance. The console uses this API to get the URL and show the page. The IAM role or user used to call this API defines the permissions to access the notebook instance. Once the presigned URL is created, no additional permission is required to access this URL. IAM authorization policies for this API are also enforced for every HTTP request and WebSocket frame that attempts to connect to the notebook instance. You can restrict access to this API and to the URL that it returns to a list of IP addresses that you specify. Use the NotIpAddress condition operator and the aws:SourceIP condition context key to specify the list of IP addresses that you want to have access to the notebook instance. For more information, see Limit Access to a Notebook Instance by IP Address. The URL that you get from a call to CreatePresignedNotebookInstanceUrl is valid only for 5 minutes. If you try to use the URL after the 5-minute limit expires, you are directed to the Amazon Web Services console sign-in page. + /// Returns a URL that you can use to connect to the Jupyter server from a notebook instance. In the SageMaker AI console, when you choose Open next to a notebook instance, SageMaker AI opens a new tab showing the Jupyter server home page from the notebook instance. The console uses this API to get the URL and show the page. The IAM role or user used to call this API defines the permissions to access the notebook instance. Once the presigned URL is created, no additional permission is required to access this URL. IAM authorization policies for this API are also enforced for every HTTP request and WebSocket frame that attempts to connect to the notebook instance. You can restrict access to this API and to the URL that it returns to a list of IP addresses that you specify. Use the NotIpAddress condition operator and the aws:SourceIP condition context key to specify the list of IP addresses that you want to have access to the notebook instance. For more information, see Limit Access to a Notebook Instance by IP Address. The URL that you get from a call to CreatePresignedNotebookInstanceUrl is valid only for 5 minutes. If you try to use the URL after the 5-minute limit expires, you are directed to the Amazon Web Services console sign-in page. @Sendable @inlinable public func createPresignedNotebookInstanceUrl(_ input: CreatePresignedNotebookInstanceUrlInput, logger: Logger = AWSClient.loggingDisabled) async throws -> CreatePresignedNotebookInstanceUrlOutput { @@ -2796,7 +2796,7 @@ public struct SageMaker: AWSService { logger: logger ) } - /// Returns a URL that you can use to connect to the Jupyter server from a notebook instance. In the SageMaker console, when you choose Open next to a notebook instance, SageMaker opens a new tab showing the Jupyter server home page from the notebook instance. The console uses this API to get the URL and show the page. The IAM role or user used to call this API defines the permissions to access the notebook instance. Once the presigned URL is created, no additional permission is required to access this URL. IAM authorization policies for this API are also enforced for every HTTP request and WebSocket frame that attempts to connect to the notebook instance. You can restrict access to this API and to the URL that it returns to a list of IP addresses that you specify. Use the NotIpAddress condition operator and the aws:SourceIP condition context key to specify the list of IP addresses that you want to have access to the notebook instance. For more information, see Limit Access to a Notebook Instance by IP Address. The URL that you get from a call to CreatePresignedNotebookInstanceUrl is valid only for 5 minutes. If you try to use the URL after the 5-minute limit expires, you are directed to the Amazon Web Services console sign-in page. + /// Returns a URL that you can use to connect to the Jupyter server from a notebook instance. In the SageMaker AI console, when you choose Open next to a notebook instance, SageMaker AI opens a new tab showing the Jupyter server home page from the notebook instance. The console uses this API to get the URL and show the page. The IAM role or user used to call this API defines the permissions to access the notebook instance. Once the presigned URL is created, no additional permission is required to access this URL. IAM authorization policies for this API are also enforced for every HTTP request and WebSocket frame that attempts to connect to the notebook instance. You can restrict access to this API and to the URL that it returns to a list of IP addresses that you specify. Use the NotIpAddress condition operator and the aws:SourceIP condition context key to specify the list of IP addresses that you want to have access to the notebook instance. For more information, see Limit Access to a Notebook Instance by IP Address. The URL that you get from a call to CreatePresignedNotebookInstanceUrl is valid only for 5 minutes. If you try to use the URL after the 5-minute limit expires, you are directed to the Amazon Web Services console sign-in page. /// /// Parameters: /// - notebookInstanceName: The name of the notebook instance. @@ -2959,7 +2959,7 @@ public struct SageMaker: AWSService { return try await self.createSpace(input, logger: logger) } - /// Creates a new Amazon SageMaker Studio Lifecycle Configuration. + /// Creates a new Amazon SageMaker AI Studio Lifecycle Configuration. @Sendable @inlinable public func createStudioLifecycleConfig(_ input: CreateStudioLifecycleConfigRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateStudioLifecycleConfigResponse { @@ -2972,12 +2972,12 @@ public struct SageMaker: AWSService { logger: logger ) } - /// Creates a new Amazon SageMaker Studio Lifecycle Configuration. + /// Creates a new Amazon SageMaker AI Studio Lifecycle Configuration. /// /// Parameters: /// - studioLifecycleConfigAppType: The App type that the Lifecycle Configuration is attached to. - /// - studioLifecycleConfigContent: The content of your Amazon SageMaker Studio Lifecycle Configuration script. This content must be base64 encoded. - /// - studioLifecycleConfigName: The name of the Amazon SageMaker Studio Lifecycle Configuration to create. + /// - studioLifecycleConfigContent: The content of your Amazon SageMaker AI Studio Lifecycle Configuration script. This content must be base64 encoded. + /// - studioLifecycleConfigName: The name of the Amazon SageMaker AI Studio Lifecycle Configuration to create. /// - tags: Tags to be associated with the Lifecycle Configuration. Each tag consists of a key and an optional value. Tag keys must be unique per resource. Tags are searchable using the Search API. /// - logger: Logger use during operation @inlinable @@ -3714,7 +3714,7 @@ public struct SageMaker: AWSService { return try await self.deleteCodeRepository(input, logger: logger) } - /// Deletes the specified compilation job. This action deletes only the compilation job resource in Amazon SageMaker. It doesn't delete other resources that are related to that job, such as the model artifacts that the job creates, the compilation logs in CloudWatch, the compiled model, or the IAM role. You can delete a compilation job only if its current status is COMPLETED, FAILED, or STOPPED. If the job status is STARTING or INPROGRESS, stop the job, and then delete it after its status becomes STOPPED. + /// Deletes the specified compilation job. This action deletes only the compilation job resource in Amazon SageMaker AI. It doesn't delete other resources that are related to that job, such as the model artifacts that the job creates, the compilation logs in CloudWatch, the compiled model, or the IAM role. You can delete a compilation job only if its current status is COMPLETED, FAILED, or STOPPED. If the job status is STARTING or INPROGRESS, stop the job, and then delete it after its status becomes STOPPED. @Sendable @inlinable public func deleteCompilationJob(_ input: DeleteCompilationJobRequest, logger: Logger = AWSClient.loggingDisabled) async throws { @@ -3727,7 +3727,7 @@ public struct SageMaker: AWSService { logger: logger ) } - /// Deletes the specified compilation job. This action deletes only the compilation job resource in Amazon SageMaker. It doesn't delete other resources that are related to that job, such as the model artifacts that the job creates, the compilation logs in CloudWatch, the compiled model, or the IAM role. You can delete a compilation job only if its current status is COMPLETED, FAILED, or STOPPED. If the job status is STARTING or INPROGRESS, stop the job, and then delete it after its status becomes STOPPED. + /// Deletes the specified compilation job. This action deletes only the compilation job resource in Amazon SageMaker AI. It doesn't delete other resources that are related to that job, such as the model artifacts that the job creates, the compilation logs in CloudWatch, the compiled model, or the IAM role. You can delete a compilation job only if its current status is COMPLETED, FAILED, or STOPPED. If the job status is STARTING or INPROGRESS, stop the job, and then delete it after its status becomes STOPPED. /// /// Parameters: /// - compilationJobName: The name of the compilation job to delete. @@ -4257,7 +4257,7 @@ public struct SageMaker: AWSService { return try await self.deleteHyperParameterTuningJob(input, logger: logger) } - /// Deletes a SageMaker image and all versions of the image. The container images aren't deleted. + /// Deletes a SageMaker AI image and all versions of the image. The container images aren't deleted. @Sendable @inlinable public func deleteImage(_ input: DeleteImageRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DeleteImageResponse { @@ -4270,7 +4270,7 @@ public struct SageMaker: AWSService { logger: logger ) } - /// Deletes a SageMaker image and all versions of the image. The container images aren't deleted. + /// Deletes a SageMaker AI image and all versions of the image. The container images aren't deleted. /// /// Parameters: /// - imageName: The name of the image to delete. @@ -4286,7 +4286,7 @@ public struct SageMaker: AWSService { return try await self.deleteImage(input, logger: logger) } - /// Deletes a version of a SageMaker image. The container image the version represents isn't deleted. + /// Deletes a version of a SageMaker AI image. The container image the version represents isn't deleted. @Sendable @inlinable public func deleteImageVersion(_ input: DeleteImageVersionRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DeleteImageVersionResponse { @@ -4299,7 +4299,7 @@ public struct SageMaker: AWSService { logger: logger ) } - /// Deletes a version of a SageMaker image. The container image the version represents isn't deleted. + /// Deletes a version of a SageMaker AI image. The container image the version represents isn't deleted. /// /// Parameters: /// - alias: The alias of the image to delete. @@ -4437,7 +4437,7 @@ public struct SageMaker: AWSService { return try await self.deleteModel(input, logger: logger) } - /// Deletes an Amazon SageMaker model bias job definition. + /// Deletes an Amazon SageMaker AI model bias job definition. @Sendable @inlinable public func deleteModelBiasJobDefinition(_ input: DeleteModelBiasJobDefinitionRequest, logger: Logger = AWSClient.loggingDisabled) async throws { @@ -4450,7 +4450,7 @@ public struct SageMaker: AWSService { logger: logger ) } - /// Deletes an Amazon SageMaker model bias job definition. + /// Deletes an Amazon SageMaker AI model bias job definition. /// /// Parameters: /// - jobDefinitionName: The name of the model bias job definition to delete. @@ -4495,7 +4495,7 @@ public struct SageMaker: AWSService { return try await self.deleteModelCard(input, logger: logger) } - /// Deletes an Amazon SageMaker model explainability job definition. + /// Deletes an Amazon SageMaker AI model explainability job definition. @Sendable @inlinable public func deleteModelExplainabilityJobDefinition(_ input: DeleteModelExplainabilityJobDefinitionRequest, logger: Logger = AWSClient.loggingDisabled) async throws { @@ -4508,7 +4508,7 @@ public struct SageMaker: AWSService { logger: logger ) } - /// Deletes an Amazon SageMaker model explainability job definition. + /// Deletes an Amazon SageMaker AI model explainability job definition. /// /// Parameters: /// - jobDefinitionName: The name of the model explainability job definition to delete. @@ -4669,7 +4669,7 @@ public struct SageMaker: AWSService { return try await self.deleteMonitoringSchedule(input, logger: logger) } - /// Deletes an SageMaker notebook instance. Before you can delete a notebook instance, you must call the StopNotebookInstance API. When you delete a notebook instance, you lose all of your data. SageMaker removes the ML compute instance, and deletes the ML storage volume and the network interface associated with the notebook instance. + /// Deletes an SageMaker AI notebook instance. Before you can delete a notebook instance, you must call the StopNotebookInstance API. When you delete a notebook instance, you lose all of your data. SageMaker AI removes the ML compute instance, and deletes the ML storage volume and the network interface associated with the notebook instance. @Sendable @inlinable public func deleteNotebookInstance(_ input: DeleteNotebookInstanceInput, logger: Logger = AWSClient.loggingDisabled) async throws { @@ -4682,10 +4682,10 @@ public struct SageMaker: AWSService { logger: logger ) } - /// Deletes an SageMaker notebook instance. Before you can delete a notebook instance, you must call the StopNotebookInstance API. When you delete a notebook instance, you lose all of your data. SageMaker removes the ML compute instance, and deletes the ML storage volume and the network interface associated with the notebook instance. + /// Deletes an SageMaker AI notebook instance. Before you can delete a notebook instance, you must call the StopNotebookInstance API. When you delete a notebook instance, you lose all of your data. SageMaker AI removes the ML compute instance, and deletes the ML storage volume and the network interface associated with the notebook instance. /// /// Parameters: - /// - notebookInstanceName: The name of the SageMaker notebook instance to delete. + /// - notebookInstanceName: The name of the SageMaker AI notebook instance to delete. /// - logger: Logger use during operation @inlinable public func deleteNotebookInstance( @@ -4881,7 +4881,7 @@ public struct SageMaker: AWSService { return try await self.deleteSpace(input, logger: logger) } - /// Deletes the Amazon SageMaker Studio Lifecycle Configuration. In order to delete the Lifecycle Configuration, there must be no running apps using the Lifecycle Configuration. You must also remove the Lifecycle Configuration from UserSettings in all Domains and UserProfiles. + /// Deletes the Amazon SageMaker AI Studio Lifecycle Configuration. In order to delete the Lifecycle Configuration, there must be no running apps using the Lifecycle Configuration. You must also remove the Lifecycle Configuration from UserSettings in all Domains and UserProfiles. @Sendable @inlinable public func deleteStudioLifecycleConfig(_ input: DeleteStudioLifecycleConfigRequest, logger: Logger = AWSClient.loggingDisabled) async throws { @@ -4894,10 +4894,10 @@ public struct SageMaker: AWSService { logger: logger ) } - /// Deletes the Amazon SageMaker Studio Lifecycle Configuration. In order to delete the Lifecycle Configuration, there must be no running apps using the Lifecycle Configuration. You must also remove the Lifecycle Configuration from UserSettings in all Domains and UserProfiles. + /// Deletes the Amazon SageMaker AI Studio Lifecycle Configuration. In order to delete the Lifecycle Configuration, there must be no running apps using the Lifecycle Configuration. You must also remove the Lifecycle Configuration from UserSettings in all Domains and UserProfiles. /// /// Parameters: - /// - studioLifecycleConfigName: The name of the Amazon SageMaker Studio Lifecycle Configuration to delete. + /// - studioLifecycleConfigName: The name of the Amazon SageMaker AI Studio Lifecycle Configuration to delete. /// - logger: Logger use during operation @inlinable public func deleteStudioLifecycleConfig( @@ -6040,7 +6040,7 @@ public struct SageMaker: AWSService { return try await self.describeHyperParameterTuningJob(input, logger: logger) } - /// Describes a SageMaker image. + /// Describes a SageMaker AI image. @Sendable @inlinable public func describeImage(_ input: DescribeImageRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeImageResponse { @@ -6053,7 +6053,7 @@ public struct SageMaker: AWSService { logger: logger ) } - /// Describes a SageMaker image. + /// Describes a SageMaker AI image. /// /// Parameters: /// - imageName: The name of the image to describe. @@ -6069,7 +6069,7 @@ public struct SageMaker: AWSService { return try await self.describeImage(input, logger: logger) } - /// Describes a version of a SageMaker image. + /// Describes a version of a SageMaker AI image. @Sendable @inlinable public func describeImageVersion(_ input: DescribeImageVersionRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeImageVersionResponse { @@ -6082,7 +6082,7 @@ public struct SageMaker: AWSService { logger: logger ) } - /// Describes a version of a SageMaker image. + /// Describes a version of a SageMaker AI image. /// /// Parameters: /// - alias: The alias of the image version. @@ -6835,7 +6835,7 @@ public struct SageMaker: AWSService { return try await self.describeSpace(input, logger: logger) } - /// Describes the Amazon SageMaker Studio Lifecycle Configuration. + /// Describes the Amazon SageMaker AI Studio Lifecycle Configuration. @Sendable @inlinable public func describeStudioLifecycleConfig(_ input: DescribeStudioLifecycleConfigRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeStudioLifecycleConfigResponse { @@ -6848,10 +6848,10 @@ public struct SageMaker: AWSService { logger: logger ) } - /// Describes the Amazon SageMaker Studio Lifecycle Configuration. + /// Describes the Amazon SageMaker AI Studio Lifecycle Configuration. /// /// Parameters: - /// - studioLifecycleConfigName: The name of the Amazon SageMaker Studio Lifecycle Configuration to describe. + /// - studioLifecycleConfigName: The name of the Amazon SageMaker AI Studio Lifecycle Configuration to describe. /// - logger: Logger use during operation @inlinable public func describeStudioLifecycleConfig( @@ -10370,7 +10370,7 @@ public struct SageMaker: AWSService { return try await self.listNotebookInstanceLifecycleConfigs(input, logger: logger) } - /// Returns a list of the SageMaker notebook instances in the requester's account in an Amazon Web Services Region. + /// Returns a list of the SageMaker AI notebook instances in the requester's account in an Amazon Web Services Region. @Sendable @inlinable public func listNotebookInstances(_ input: ListNotebookInstancesInput, logger: Logger = AWSClient.loggingDisabled) async throws -> ListNotebookInstancesOutput { @@ -10383,7 +10383,7 @@ public struct SageMaker: AWSService { logger: logger ) } - /// Returns a list of the SageMaker notebook instances in the requester's account in an Amazon Web Services Region. + /// Returns a list of the SageMaker AI notebook instances in the requester's account in an Amazon Web Services Region. /// /// Parameters: /// - additionalCodeRepositoryEquals: A filter that returns only notebook instances with associated with the specified git repository. @@ -10928,7 +10928,7 @@ public struct SageMaker: AWSService { return try await self.listStageDevices(input, logger: logger) } - /// Lists the Amazon SageMaker Studio Lifecycle Configurations in your Amazon Web Services Account. + /// Lists the Amazon SageMaker AI Studio Lifecycle Configurations in your Amazon Web Services Account. @Sendable @inlinable public func listStudioLifecycleConfigs(_ input: ListStudioLifecycleConfigsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListStudioLifecycleConfigsResponse { @@ -10941,7 +10941,7 @@ public struct SageMaker: AWSService { logger: logger ) } - /// Lists the Amazon SageMaker Studio Lifecycle Configurations in your Amazon Web Services Account. + /// Lists the Amazon SageMaker AI Studio Lifecycle Configurations in your Amazon Web Services Account. /// /// Parameters: /// - appTypeEquals: A parameter to search for the App Type to which the Lifecycle Configuration is attached. @@ -11962,7 +11962,7 @@ public struct SageMaker: AWSService { return try await self.startMonitoringSchedule(input, logger: logger) } - /// Launches an ML compute instance with the latest version of the libraries and attaches your ML storage volume. After configuring the notebook instance, SageMaker sets the notebook instance status to InService. A notebook instance's status must be InService before you can connect to your Jupyter notebook. + /// Launches an ML compute instance with the latest version of the libraries and attaches your ML storage volume. After configuring the notebook instance, SageMaker AI sets the notebook instance status to InService. A notebook instance's status must be InService before you can connect to your Jupyter notebook. @Sendable @inlinable public func startNotebookInstance(_ input: StartNotebookInstanceInput, logger: Logger = AWSClient.loggingDisabled) async throws { @@ -11975,7 +11975,7 @@ public struct SageMaker: AWSService { logger: logger ) } - /// Launches an ML compute instance with the latest version of the libraries and attaches your ML storage volume. After configuring the notebook instance, SageMaker sets the notebook instance status to InService. A notebook instance's status must be InService before you can connect to your Jupyter notebook. + /// Launches an ML compute instance with the latest version of the libraries and attaches your ML storage volume. After configuring the notebook instance, SageMaker AI sets the notebook instance status to InService. A notebook instance's status must be InService before you can connect to your Jupyter notebook. /// /// Parameters: /// - notebookInstanceName: The name of the notebook instance to start. @@ -12067,7 +12067,7 @@ public struct SageMaker: AWSService { return try await self.stopAutoMLJob(input, logger: logger) } - /// Stops a model compilation job. To stop a job, Amazon SageMaker sends the algorithm the SIGTERM signal. This gracefully shuts the job down. If the job hasn't stopped, it sends the SIGKILL signal. When it receives a StopCompilationJob request, Amazon SageMaker changes the CompilationJobStatus of the job to Stopping. After Amazon SageMaker stops the job, it sets the CompilationJobStatus to Stopped. + /// Stops a model compilation job. To stop a job, Amazon SageMaker AI sends the algorithm the SIGTERM signal. This gracefully shuts the job down. If the job hasn't stopped, it sends the SIGKILL signal. When it receives a StopCompilationJob request, Amazon SageMaker AI changes the CompilationJobStatus of the job to Stopping. After Amazon SageMaker stops the job, it sets the CompilationJobStatus to Stopped. @Sendable @inlinable public func stopCompilationJob(_ input: StopCompilationJobRequest, logger: Logger = AWSClient.loggingDisabled) async throws { @@ -12080,7 +12080,7 @@ public struct SageMaker: AWSService { logger: logger ) } - /// Stops a model compilation job. To stop a job, Amazon SageMaker sends the algorithm the SIGTERM signal. This gracefully shuts the job down. If the job hasn't stopped, it sends the SIGKILL signal. When it receives a StopCompilationJob request, Amazon SageMaker changes the CompilationJobStatus of the job to Stopping. After Amazon SageMaker stops the job, it sets the CompilationJobStatus to Stopped. + /// Stops a model compilation job. To stop a job, Amazon SageMaker AI sends the algorithm the SIGTERM signal. This gracefully shuts the job down. If the job hasn't stopped, it sends the SIGKILL signal. When it receives a StopCompilationJob request, Amazon SageMaker AI changes the CompilationJobStatus of the job to Stopping. After Amazon SageMaker stops the job, it sets the CompilationJobStatus to Stopped. /// /// Parameters: /// - compilationJobName: The name of the model compilation job to stop. @@ -12343,7 +12343,7 @@ public struct SageMaker: AWSService { return try await self.stopMonitoringSchedule(input, logger: logger) } - /// Terminates the ML compute instance. Before terminating the instance, SageMaker disconnects the ML storage volume from it. SageMaker preserves the ML storage volume. SageMaker stops charging you for the ML compute instance when you call StopNotebookInstance. To access data on the ML storage volume for a notebook instance that has been terminated, call the StartNotebookInstance API. StartNotebookInstance launches another ML compute instance, configures it, and attaches the preserved ML storage volume so you can continue your work. + /// Terminates the ML compute instance. Before terminating the instance, SageMaker AI disconnects the ML storage volume from it. SageMaker AI preserves the ML storage volume. SageMaker AI stops charging you for the ML compute instance when you call StopNotebookInstance. To access data on the ML storage volume for a notebook instance that has been terminated, call the StartNotebookInstance API. StartNotebookInstance launches another ML compute instance, configures it, and attaches the preserved ML storage volume so you can continue your work. @Sendable @inlinable public func stopNotebookInstance(_ input: StopNotebookInstanceInput, logger: Logger = AWSClient.loggingDisabled) async throws { @@ -12356,7 +12356,7 @@ public struct SageMaker: AWSService { logger: logger ) } - /// Terminates the ML compute instance. Before terminating the instance, SageMaker disconnects the ML storage volume from it. SageMaker preserves the ML storage volume. SageMaker stops charging you for the ML compute instance when you call StopNotebookInstance. To access data on the ML storage volume for a notebook instance that has been terminated, call the StartNotebookInstance API. StartNotebookInstance launches another ML compute instance, configures it, and attaches the preserved ML storage volume so you can continue your work. + /// Terminates the ML compute instance. Before terminating the instance, SageMaker AI disconnects the ML storage volume from it. SageMaker AI preserves the ML storage volume. SageMaker AI stops charging you for the ML compute instance when you call StopNotebookInstance. To access data on the ML storage volume for a notebook instance that has been terminated, call the StartNotebookInstance API. StartNotebookInstance launches another ML compute instance, configures it, and attaches the preserved ML storage volume so you can continue your work. /// /// Parameters: /// - notebookInstanceName: The name of the notebook instance to terminate. @@ -12942,7 +12942,7 @@ public struct SageMaker: AWSService { /// Updates the default settings for new user profiles in the domain. /// /// Parameters: - /// - appNetworkAccessType: Specifies the VPC used for non-EFS traffic. PublicInternetOnly - Non-EFS traffic is through a VPC managed by Amazon SageMaker, which allows direct internet access. VpcOnly - All Studio traffic is through the specified VPC and subnets. This configuration can only be modified if there are no apps in the InService, Pending, or Deleting state. The configuration cannot be updated if DomainSettings.RStudioServerProDomainSettings.DomainExecutionRoleArn is already set or DomainSettings.RStudioServerProDomainSettings.DomainExecutionRoleArn is provided as part of the same request. + /// - appNetworkAccessType: Specifies the VPC used for non-EFS traffic. PublicInternetOnly - Non-EFS traffic is through a VPC managed by Amazon SageMaker AI, which allows direct internet access. VpcOnly - All Studio traffic is through the specified VPC and subnets. This configuration can only be modified if there are no apps in the InService, Pending, or Deleting state. The configuration cannot be updated if DomainSettings.RStudioServerProDomainSettings.DomainExecutionRoleArn is already set or DomainSettings.RStudioServerProDomainSettings.DomainExecutionRoleArn is provided as part of the same request. /// - appSecurityGroupManagement: The entity that creates and manages the required security groups for inter-app communication in VPCOnly mode. Required when CreateDomain.AppNetworkAccessType is VPCOnly and DomainSettings.RStudioServerProDomainSettings.DomainExecutionRoleArn is provided. If setting up the domain for use with RStudio, this value must be set to Service. /// - defaultSpaceSettings: The default settings for shared spaces that users create in the domain. /// - defaultUserSettings: A collection of settings. @@ -13204,7 +13204,7 @@ public struct SageMaker: AWSService { return try await self.updateHub(input, logger: logger) } - /// Updates the properties of a SageMaker image. To change the image's tags, use the AddTags and DeleteTags APIs. + /// Updates the properties of a SageMaker AI image. To change the image's tags, use the AddTags and DeleteTags APIs. @Sendable @inlinable public func updateImage(_ input: UpdateImageRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateImageResponse { @@ -13217,14 +13217,14 @@ public struct SageMaker: AWSService { logger: logger ) } - /// Updates the properties of a SageMaker image. To change the image's tags, use the AddTags and DeleteTags APIs. + /// Updates the properties of a SageMaker AI image. To change the image's tags, use the AddTags and DeleteTags APIs. /// /// Parameters: /// - deleteProperties: A list of properties to delete. Only the Description and DisplayName properties can be deleted. /// - description: The new description for the image. /// - displayName: The new display name for the image. /// - imageName: The name of the image to update. - /// - roleArn: The new ARN for the IAM role that enables Amazon SageMaker to perform tasks on your behalf. + /// - roleArn: The new ARN for the IAM role that enables Amazon SageMaker AI to perform tasks on your behalf. /// - logger: Logger use during operation @inlinable public func updateImage( @@ -13245,7 +13245,7 @@ public struct SageMaker: AWSService { return try await self.updateImage(input, logger: logger) } - /// Updates the properties of a SageMaker image version. + /// Updates the properties of a SageMaker AI image version. @Sendable @inlinable public func updateImageVersion(_ input: UpdateImageVersionRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateImageVersionResponse { @@ -13258,7 +13258,7 @@ public struct SageMaker: AWSService { logger: logger ) } - /// Updates the properties of a SageMaker image version. + /// Updates the properties of a SageMaker AI image version. /// /// Parameters: /// - alias: The alias of the image version. @@ -13266,7 +13266,7 @@ public struct SageMaker: AWSService { /// - aliasesToDelete: A list of aliases to delete. /// - horovod: Indicates Horovod compatibility. /// - imageName: The name of the image. - /// - jobType: Indicates SageMaker job type compatibility. TRAINING: The image version is compatible with SageMaker training jobs. INFERENCE: The image version is compatible with SageMaker inference jobs. NOTEBOOK_KERNEL: The image version is compatible with SageMaker notebook kernels. + /// - jobType: Indicates SageMaker AI job type compatibility. TRAINING: The image version is compatible with SageMaker AI training jobs. INFERENCE: The image version is compatible with SageMaker AI inference jobs. NOTEBOOK_KERNEL: The image version is compatible with SageMaker AI notebook kernels. /// - mlFramework: The machine learning framework vended in the image version. /// - processor: Indicates CPU or GPU compatibility. CPU: The image version is compatible with CPU. GPU: The image version is compatible with GPU. /// - programmingLang: The supported programming language and its version. @@ -13640,8 +13640,8 @@ public struct SageMaker: AWSService { /// /// Parameters: /// - acceleratorTypes: This parameter is no longer supported. Elastic Inference (EI) is no longer available. This parameter was used to specify a list of the EI instance types to associate with this notebook instance. - /// - additionalCodeRepositories: An array of up to three Git repositories to associate with the notebook instance. These can be either the names of Git repositories stored as resources in your account, or the URL of Git repositories in Amazon Web Services CodeCommit or in any other Git repository. These repositories are cloned at the same level as the default repository of your notebook instance. For more information, see Associating Git Repositories with SageMaker Notebook Instances. - /// - defaultCodeRepository: The Git repository to associate with the notebook instance as its default code repository. This can be either the name of a Git repository stored as a resource in your account, or the URL of a Git repository in Amazon Web Services CodeCommit or in any other Git repository. When you open a notebook instance, it opens in the directory that contains this repository. For more information, see Associating Git Repositories with SageMaker Notebook Instances. + /// - additionalCodeRepositories: An array of up to three Git repositories to associate with the notebook instance. These can be either the names of Git repositories stored as resources in your account, or the URL of Git repositories in Amazon Web Services CodeCommit or in any other Git repository. These repositories are cloned at the same level as the default repository of your notebook instance. For more information, see Associating Git Repositories with SageMaker AI Notebook Instances. + /// - defaultCodeRepository: The Git repository to associate with the notebook instance as its default code repository. This can be either the name of a Git repository stored as a resource in your account, or the URL of a Git repository in Amazon Web Services CodeCommit or in any other Git repository. When you open a notebook instance, it opens in the directory that contains this repository. For more information, see Associating Git Repositories with SageMaker AI Notebook Instances. /// - disassociateAcceleratorTypes: This parameter is no longer supported. Elastic Inference (EI) is no longer available. This parameter was used to specify a list of the EI instance types to remove from this notebook instance. /// - disassociateAdditionalCodeRepositories: A list of names or URLs of the default Git repositories to remove from this notebook instance. This operation is idempotent. If you specify a Git repository that is not associated with the notebook instance when you call this method, it does not throw an error. /// - disassociateDefaultCodeRepository: The name or URL of the default Git repository to remove from this notebook instance. This operation is idempotent. If you specify a Git repository that is not associated with the notebook instance when you call this method, it does not throw an error. @@ -13650,9 +13650,9 @@ public struct SageMaker: AWSService { /// - instanceType: The Amazon ML compute instance type. /// - lifecycleConfigName: The name of a lifecycle configuration to associate with the notebook instance. For information about lifestyle configurations, see Step 2.1: (Optional) Customize a Notebook Instance. /// - notebookInstanceName: The name of the notebook instance to update. - /// - roleArn: The Amazon Resource Name (ARN) of the IAM role that SageMaker can assume to access the notebook instance. For more information, see SageMaker Roles. To be able to pass this role to SageMaker, the caller of this API must have the iam:PassRole permission. + /// - roleArn: The Amazon Resource Name (ARN) of the IAM role that SageMaker AI can assume to access the notebook instance. For more information, see SageMaker AI Roles. To be able to pass this role to SageMaker AI, the caller of this API must have the iam:PassRole permission. /// - rootAccess: Whether root access is enabled or disabled for users of the notebook instance. The default value is Enabled. If you set this to Disabled, users don't have root access on the notebook instance, but lifecycle configuration scripts still run with root permissions. - /// - volumeSizeInGB: The size, in GB, of the ML storage volume to attach to the notebook instance. The default value is 5 GB. ML storage volumes are encrypted, so SageMaker can't determine the amount of available free space on the volume. Because of this, you can increase the volume size when you update a notebook instance, but you can't decrease the volume size. If you want to decrease the size of the ML storage volume in use, create a new notebook instance with the desired size. + /// - volumeSizeInGB: The size, in GB, of the ML storage volume to attach to the notebook instance. The default value is 5 GB. ML storage volumes are encrypted, so SageMaker AI can't determine the amount of available free space on the volume. Because of this, you can increase the volume size when you update a notebook instance, but you can't decrease the volume size. If you want to decrease the size of the ML storage volume in use, create a new notebook instance with the desired size. /// - logger: Logger use during operation @inlinable public func updateNotebookInstance( @@ -13896,7 +13896,7 @@ public struct SageMaker: AWSService { return try await self.updateProject(input, logger: logger) } - /// Updates the settings of a space. + /// Updates the settings of a space. You can't edit the app type of a space in the SpaceSettings. @Sendable @inlinable public func updateSpace(_ input: UpdateSpaceRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateSpaceResponse { @@ -13909,7 +13909,7 @@ public struct SageMaker: AWSService { logger: logger ) } - /// Updates the settings of a space. + /// Updates the settings of a space. You can't edit the app type of a space in the SpaceSettings. /// /// Parameters: /// - domainId: The ID of the associated domain. diff --git a/Sources/Soto/Services/SageMaker/SageMaker_shapes.swift b/Sources/Soto/Services/SageMaker/SageMaker_shapes.swift index 9071012b96..99c73c0b88 100644 --- a/Sources/Soto/Services/SageMaker/SageMaker_shapes.swift +++ b/Sources/Soto/Services/SageMaker/SageMaker_shapes.swift @@ -671,6 +671,15 @@ extension SageMaker { case mlC5N9Xlarge = "ml.c5n.9xlarge" case mlC5NLarge = "ml.c5n.large" case mlC5Xlarge = "ml.c5.xlarge" + case mlC6I12Xlarge = "ml.c6i.12xlarge" + case mlC6I16Xlarge = "ml.c6i.16xlarge" + case mlC6I24Xlarge = "ml.c6i.24xlarge" + case mlC6I2Xlarge = "ml.c6i.2xlarge" + case mlC6I32Xlarge = "ml.c6i.32xlarge" + case mlC6I4Xlarge = "ml.c6i.4xlarge" + case mlC6I8Xlarge = "ml.c6i.8xlarge" + case mlC6ILarge = "ml.c6i.large" + case mlC6IXlarge = "ml.c6i.xlarge" case mlG512Xlarge = "ml.g5.12xlarge" case mlG516Xlarge = "ml.g5.16xlarge" case mlG524Xlarge = "ml.g5.24xlarge" @@ -705,11 +714,29 @@ extension SageMaker { case mlM58Xlarge = "ml.m5.8xlarge" case mlM5Large = "ml.m5.large" case mlM5Xlarge = "ml.m5.xlarge" + case mlM6I12Xlarge = "ml.m6i.12xlarge" + case mlM6I16Xlarge = "ml.m6i.16xlarge" + case mlM6I24Xlarge = "ml.m6i.24xlarge" + case mlM6I2Xlarge = "ml.m6i.2xlarge" + case mlM6I32Xlarge = "ml.m6i.32xlarge" + case mlM6I4Xlarge = "ml.m6i.4xlarge" + case mlM6I8Xlarge = "ml.m6i.8xlarge" + case mlM6ILarge = "ml.m6i.large" + case mlM6IXlarge = "ml.m6i.xlarge" case mlP4D24Xlarge = "ml.p4d.24xlarge" case mlP4De24Xlarge = "ml.p4de.24xlarge" case mlP548Xlarge = "ml.p5.48xlarge" case mlP5E48Xlarge = "ml.p5e.48xlarge" case mlP5En48Xlarge = "ml.p5en.48xlarge" + case mlR6I12Xlarge = "ml.r6i.12xlarge" + case mlR6I16Xlarge = "ml.r6i.16xlarge" + case mlR6I24Xlarge = "ml.r6i.24xlarge" + case mlR6I2Xlarge = "ml.r6i.2xlarge" + case mlR6I32Xlarge = "ml.r6i.32xlarge" + case mlR6I4Xlarge = "ml.r6i.4xlarge" + case mlR6I8Xlarge = "ml.r6i.8xlarge" + case mlR6ILarge = "ml.r6i.large" + case mlR6IXlarge = "ml.r6i.xlarge" case mlT32Xlarge = "ml.t3.2xlarge" case mlT3Large = "ml.t3.large" case mlT3Medium = "ml.t3.medium" @@ -2432,6 +2459,7 @@ extension SageMaker { case mlP548Xlarge = "ml.p5.48xlarge" case mlP5E48Xlarge = "ml.p5e.48xlarge" case mlP5En48Xlarge = "ml.p5en.48xlarge" + case mlTrn132Xlarge = "ml.trn1.32xlarge" case mlTrn248Xlarge = "ml.trn2.48xlarge" public var description: String { return self.rawValue } } @@ -3196,6 +3224,8 @@ extension SageMaker { case mlR7I8Xlarge = "ml.r7i.8xlarge" case mlR7ILarge = "ml.r7i.large" case mlR7IXlarge = "ml.r7i.xlarge" + case mlTrn12Xlarge = "ml.trn1.2xlarge" + case mlTrn132Xlarge = "ml.trn1.32xlarge" public var description: String { return self.rawValue } } @@ -3882,14 +3912,17 @@ extension SageMaker { public struct AdditionalS3DataSource: AWSEncodableShape & AWSDecodableShape { /// The type of compression used for an additional data source used in inference or training. Specify None if your additional data source is not compressed. public let compressionType: CompressionType? + /// The ETag associated with S3 URI. + public let eTag: String? /// The data type of the additional data source that you specify for use in inference or training. public let s3DataType: AdditionalS3DataSourceDataType? /// The uniform resource identifier (URI) used to identify an additional data source used in inference or training. public let s3Uri: String? @inlinable - public init(compressionType: CompressionType? = nil, s3DataType: AdditionalS3DataSourceDataType? = nil, s3Uri: String? = nil) { + public init(compressionType: CompressionType? = nil, eTag: String? = nil, s3DataType: AdditionalS3DataSourceDataType? = nil, s3Uri: String? = nil) { self.compressionType = compressionType + self.eTag = eTag self.s3DataType = s3DataType self.s3Uri = s3Uri } @@ -3901,6 +3934,7 @@ extension SageMaker { private enum CodingKeys: String, CodingKey { case compressionType = "CompressionType" + case eTag = "ETag" case s3DataType = "S3DataType" case s3Uri = "S3Uri" } @@ -4232,7 +4266,7 @@ extension SageMaker { public let creationTime: Date? /// The configuration for the file system and the runtime, such as the environment variables and entry point. public let jupyterLabAppImageConfig: JupyterLabAppImageConfig? - /// The configuration for the file system and kernels in the SageMaker image. + /// The configuration for the file system and kernels in the SageMaker AI image. public let kernelGatewayImageConfig: KernelGatewayImageConfig? /// When the AppImageConfig was last modified. public let lastModifiedTime: Date? @@ -5147,7 +5181,7 @@ extension SageMaker { } public struct AutoMLS3DataSource: AWSEncodableShape & AWSDecodableShape { - /// The data type. If you choose S3Prefix, S3Uri identifies a key name prefix. SageMaker uses all objects that match the specified key name prefix for model training. The S3Prefix should have the following format: s3://DOC-EXAMPLE-BUCKET/DOC-EXAMPLE-FOLDER-OR-FILE If you choose ManifestFile, S3Uri identifies an object that is a manifest file containing a list of object keys that you want SageMaker to use for model training. A ManifestFile should have the format shown below: [ {"prefix": "s3://DOC-EXAMPLE-BUCKET/DOC-EXAMPLE-FOLDER/DOC-EXAMPLE-PREFIX/"}, "DOC-EXAMPLE-RELATIVE-PATH/DOC-EXAMPLE-FOLDER/DATA-1", "DOC-EXAMPLE-RELATIVE-PATH/DOC-EXAMPLE-FOLDER/DATA-2", ... "DOC-EXAMPLE-RELATIVE-PATH/DOC-EXAMPLE-FOLDER/DATA-N" ] If you choose AugmentedManifestFile, S3Uri identifies an object that is an augmented manifest file in JSON lines format. This file contains the data you want to use for model training. AugmentedManifestFile is available for V2 API jobs only (for example, for jobs created by calling CreateAutoMLJobV2). Here is a minimal, single-record example of an AugmentedManifestFile: {"source-ref": "s3://DOC-EXAMPLE-BUCKET/DOC-EXAMPLE-FOLDER/cats/cat.jpg", "label-metadata": {"class-name": "cat" } For more information on AugmentedManifestFile, see Provide Dataset Metadata to Training Jobs with an Augmented Manifest File. + /// The data type. If you choose S3Prefix, S3Uri identifies a key name prefix. SageMaker AI uses all objects that match the specified key name prefix for model training. The S3Prefix should have the following format: s3://DOC-EXAMPLE-BUCKET/DOC-EXAMPLE-FOLDER-OR-FILE If you choose ManifestFile, S3Uri identifies an object that is a manifest file containing a list of object keys that you want SageMaker AI to use for model training. A ManifestFile should have the format shown below: [ {"prefix": "s3://DOC-EXAMPLE-BUCKET/DOC-EXAMPLE-FOLDER/DOC-EXAMPLE-PREFIX/"}, "DOC-EXAMPLE-RELATIVE-PATH/DOC-EXAMPLE-FOLDER/DATA-1", "DOC-EXAMPLE-RELATIVE-PATH/DOC-EXAMPLE-FOLDER/DATA-2", ... "DOC-EXAMPLE-RELATIVE-PATH/DOC-EXAMPLE-FOLDER/DATA-N" ] If you choose AugmentedManifestFile, S3Uri identifies an object that is an augmented manifest file in JSON lines format. This file contains the data you want to use for model training. AugmentedManifestFile is available for V2 API jobs only (for example, for jobs created by calling CreateAutoMLJobV2). Here is a minimal, single-record example of an AugmentedManifestFile: {"source-ref": "s3://DOC-EXAMPLE-BUCKET/DOC-EXAMPLE-FOLDER/cats/cat.jpg", "label-metadata": {"class-name": "cat" } For more information on AugmentedManifestFile, see Provide Dataset Metadata to Training Jobs with an Augmented Manifest File. public let s3DataType: AutoMLS3DataType? /// The URL to the Amazon S3 data source. The Uri refers to the Amazon S3 prefix or ManifestFile depending on the data type. public let s3Uri: String? @@ -5778,9 +5812,9 @@ extension SageMaker { } public struct CaptureContentTypeHeader: AWSEncodableShape & AWSDecodableShape { - /// The list of all content type headers that Amazon SageMaker will treat as CSV and capture accordingly. + /// The list of all content type headers that Amazon SageMaker AI will treat as CSV and capture accordingly. public let csvContentTypes: [String]? - /// The list of all content type headers that SageMaker will treat as JSON and capture accordingly. + /// The list of all content type headers that SageMaker AI will treat as JSON and capture accordingly. public let jsonContentTypes: [String]? @inlinable @@ -6390,6 +6424,7 @@ extension SageMaker { try self.validate(self.executionRole, name: "executionRole", parent: name, max: 2048) try self.validate(self.executionRole, name: "executionRole", parent: name, min: 20) try self.validate(self.executionRole, name: "executionRole", parent: name, pattern: "^arn:aws[a-z\\-]*:iam::\\d{12}:role/?[a-zA-Z_0-9+=,.@\\-_/]+$") + try self.validate(self.instanceCount, name: "instanceCount", parent: name, max: 6758) try self.validate(self.instanceCount, name: "instanceCount", parent: name, min: 0) try self.validate(self.instanceGroupName, name: "instanceGroupName", parent: name, max: 63) try self.validate(self.instanceGroupName, name: "instanceGroupName", parent: name, min: 1) @@ -6507,11 +6542,13 @@ extension SageMaker { public let privateDnsHostname: String? /// The private primary IP address of the SageMaker HyperPod cluster node. public let privatePrimaryIp: String? + /// The private primary IPv6 address of the SageMaker HyperPod cluster node. + public let privatePrimaryIpv6: String? /// The number of threads per CPU core you specified under CreateCluster. public let threadsPerCore: Int? @inlinable - public init(instanceGroupName: String? = nil, instanceId: String? = nil, instanceStatus: ClusterInstanceStatusDetails? = nil, instanceStorageConfigs: [ClusterInstanceStorageConfig]? = nil, instanceType: ClusterInstanceType? = nil, launchTime: Date? = nil, lifeCycleConfig: ClusterLifeCycleConfig? = nil, overrideVpcConfig: VpcConfig? = nil, placement: ClusterInstancePlacement? = nil, privateDnsHostname: String? = nil, privatePrimaryIp: String? = nil, threadsPerCore: Int? = nil) { + public init(instanceGroupName: String? = nil, instanceId: String? = nil, instanceStatus: ClusterInstanceStatusDetails? = nil, instanceStorageConfigs: [ClusterInstanceStorageConfig]? = nil, instanceType: ClusterInstanceType? = nil, launchTime: Date? = nil, lifeCycleConfig: ClusterLifeCycleConfig? = nil, overrideVpcConfig: VpcConfig? = nil, placement: ClusterInstancePlacement? = nil, privateDnsHostname: String? = nil, privatePrimaryIp: String? = nil, privatePrimaryIpv6: String? = nil, threadsPerCore: Int? = nil) { self.instanceGroupName = instanceGroupName self.instanceId = instanceId self.instanceStatus = instanceStatus @@ -6523,6 +6560,7 @@ extension SageMaker { self.placement = placement self.privateDnsHostname = privateDnsHostname self.privatePrimaryIp = privatePrimaryIp + self.privatePrimaryIpv6 = privatePrimaryIpv6 self.threadsPerCore = threadsPerCore } @@ -6538,6 +6576,7 @@ extension SageMaker { case placement = "Placement" case privateDnsHostname = "PrivateDnsHostname" case privatePrimaryIp = "PrivatePrimaryIp" + case privatePrimaryIpv6 = "PrivatePrimaryIpv6" case threadsPerCore = "ThreadsPerCore" } } @@ -7555,7 +7594,7 @@ extension SageMaker { public let appType: AppType? /// The domain ID. public let domainId: String? - /// The instance type and the Amazon Resource Name (ARN) of the SageMaker image created on the instance. The value of InstanceType passed as part of the ResourceSpec in the CreateApp call overrides the value passed as part of the ResourceSpec configured for the user profile or the domain. If InstanceType is not specified in any of those three ResourceSpec values for a KernelGateway app, the CreateApp call fails with a request validation error. + /// The instance type and the Amazon Resource Name (ARN) of the SageMaker AI image created on the instance. The value of InstanceType passed as part of the ResourceSpec in the CreateApp call overrides the value passed as part of the ResourceSpec configured for the user profile or the domain. If InstanceType is not specified in any of those three ResourceSpec values for a KernelGateway app, the CreateApp call fails with a request validation error. public let resourceSpec: ResourceSpec? /// The name of the space. If this value is not set, then UserProfileName must be set. public let spaceName: String? @@ -8043,9 +8082,9 @@ extension SageMaker { public let modelPackageVersionArn: String? /// Provides information about the output location for the compiled model and the target device the model runs on. public let outputConfig: OutputConfig? - /// The Amazon Resource Name (ARN) of an IAM role that enables Amazon SageMaker to perform tasks on your behalf. During model compilation, Amazon SageMaker needs your permission to: Read input data from an S3 bucket Write model artifacts to an S3 bucket Write logs to Amazon CloudWatch Logs Publish metrics to Amazon CloudWatch You grant permissions for all of these tasks to an IAM role. To pass this role to Amazon SageMaker, the caller of this API must have the iam:PassRole permission. For more information, see Amazon SageMaker Roles. + /// The Amazon Resource Name (ARN) of an IAM role that enables Amazon SageMaker AI to perform tasks on your behalf. During model compilation, Amazon SageMaker AI needs your permission to: Read input data from an S3 bucket Write model artifacts to an S3 bucket Write logs to Amazon CloudWatch Logs Publish metrics to Amazon CloudWatch You grant permissions for all of these tasks to an IAM role. To pass this role to Amazon SageMaker AI, the caller of this API must have the iam:PassRole permission. For more information, see Amazon SageMaker AI Roles. public let roleArn: String? - /// Specifies a limit to how long a model compilation job can run. When the job reaches the time limit, Amazon SageMaker ends the compilation job. Use this API to cap model training costs. + /// Specifies a limit to how long a model compilation job can run. When the job reaches the time limit, Amazon SageMaker AI ends the compilation job. Use this API to cap model training costs. public let stoppingCondition: StoppingCondition? /// An array of key-value pairs. You can use tags to categorize your Amazon Web Services resources in different ways, for example, by purpose, owner, or environment. For more information, see Tagging Amazon Web Services Resources. public let tags: [Tag]? @@ -8097,7 +8136,7 @@ extension SageMaker { } public struct CreateCompilationJobResponse: AWSDecodableShape { - /// If the action is successful, the service sends back an HTTP 200 response. Amazon SageMaker returns the following data in JSON format: CompilationJobArn: The Amazon Resource Name (ARN) of the compiled job. + /// If the action is successful, the service sends back an HTTP 200 response. Amazon SageMaker AI returns the following data in JSON format: CompilationJobArn: The Amazon Resource Name (ARN) of the compiled job. public let compilationJobArn: String? @inlinable @@ -8264,7 +8303,7 @@ extension SageMaker { public let jobResources: MonitoringResources? /// Specifies networking configuration for the monitoring job. public let networkConfig: MonitoringNetworkConfig? - /// The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker can assume to perform tasks on your behalf. + /// The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker AI can assume to perform tasks on your behalf. public let roleArn: String? public let stoppingCondition: MonitoringStoppingCondition? /// (Optional) An array of key-value pairs. For more information, see Using Cost Allocation Tags in the Amazon Web Services Billing and Cost Management User Guide. @@ -8384,7 +8423,7 @@ extension SageMaker { } public struct CreateDomainRequest: AWSEncodableShape { - /// Specifies the VPC used for non-EFS traffic. The default value is PublicInternetOnly. PublicInternetOnly - Non-EFS traffic is through a VPC managed by Amazon SageMaker, which allows direct internet access VpcOnly - All traffic is through the specified VPC and subnets + /// Specifies the VPC used for non-EFS traffic. The default value is PublicInternetOnly. PublicInternetOnly - Non-EFS traffic is through a VPC managed by Amazon SageMaker AI, which allows direct internet access VpcOnly - All traffic is through the specified VPC and subnets public let appNetworkAccessType: AppNetworkAccessType? /// The entity that creates and manages the required security groups for inter-app communication in VPCOnly mode. Required when CreateDomain.AppNetworkAccessType is VPCOnly and DomainSettings.RStudioServerProDomainSettings.DomainExecutionRoleArn is provided. If setting up the domain for use with RStudio, this value must be set to Service. public let appSecurityGroupManagement: AppSecurityGroupManagement? @@ -8400,7 +8439,7 @@ extension SageMaker { public let domainSettings: DomainSettings? /// Use KmsKeyId. public let homeEfsFileSystemKmsKeyId: String? - /// SageMaker uses Amazon Web Services KMS to encrypt EFS and EBS volumes attached to the domain with an Amazon Web Services managed key by default. For more control, specify a customer managed key. + /// SageMaker AI uses Amazon Web Services KMS to encrypt EFS and EBS volumes attached to the domain with an Amazon Web Services managed key by default. For more control, specify a customer managed key. public let kmsKeyId: String? /// The VPC subnets that the domain uses for communication. public let subnetIds: [String]? @@ -8670,7 +8709,7 @@ extension SageMaker { public let enableNetworkIsolation: Bool? /// The name of the endpoint configuration. You specify this name in a CreateEndpoint request. public let endpointConfigName: String? - /// The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker can assume to perform actions on your behalf. For more information, see SageMaker Roles. To be able to pass this role to Amazon SageMaker, the caller of this action must have the iam:PassRole permission. + /// The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker AI can assume to perform actions on your behalf. For more information, see SageMaker AI Roles. To be able to pass this role to Amazon SageMaker AI, the caller of this action must have the iam:PassRole permission. public let executionRoleArn: String? /// A member of CreateEndpointConfig that enables explainers. public let explainerConfig: ExplainerConfig? @@ -9274,7 +9313,7 @@ extension SageMaker { public let displayName: String? /// The name of the image. Must be unique to your account. public let imageName: String? - /// The ARN of an IAM role that enables Amazon SageMaker to perform tasks on your behalf. + /// The ARN of an IAM role that enables Amazon SageMaker AI to perform tasks on your behalf. public let roleArn: String? /// A list of tags to apply to the image. public let tags: [Tag]? @@ -9341,7 +9380,7 @@ extension SageMaker { public let horovod: Bool? /// The ImageName of the Image to create a version of. public let imageName: String? - /// Indicates SageMaker job type compatibility. TRAINING: The image version is compatible with SageMaker training jobs. INFERENCE: The image version is compatible with SageMaker inference jobs. NOTEBOOK_KERNEL: The image version is compatible with SageMaker notebook kernels. + /// Indicates SageMaker AI job type compatibility. TRAINING: The image version is compatible with SageMaker AI training jobs. INFERENCE: The image version is compatible with SageMaker AI inference jobs. NOTEBOOK_KERNEL: The image version is compatible with SageMaker AI notebook kernels. public let jobType: JobType? /// The machine learning framework vended in the image version. public let mlFramework: String? @@ -9829,7 +9868,7 @@ extension SageMaker { public let modelBiasJobOutputConfig: MonitoringOutputConfig? /// Networking options for a model bias job. public let networkConfig: MonitoringNetworkConfig? - /// The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker can assume to perform tasks on your behalf. + /// The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker AI can assume to perform tasks on your behalf. public let roleArn: String? public let stoppingCondition: MonitoringStoppingCondition? /// (Optional) An array of key-value pairs. For more information, see Using Cost Allocation Tags in the Amazon Web Services Billing and Cost Management User Guide. @@ -10017,7 +10056,7 @@ extension SageMaker { public let modelExplainabilityJobOutputConfig: MonitoringOutputConfig? /// Networking options for a model explainability job. public let networkConfig: MonitoringNetworkConfig? - /// The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker can assume to perform tasks on your behalf. + /// The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker AI can assume to perform tasks on your behalf. public let roleArn: String? public let stoppingCondition: MonitoringStoppingCondition? /// (Optional) An array of key-value pairs. For more information, see Using Cost Allocation Tags in the Amazon Web Services Billing and Cost Management User Guide. @@ -10383,7 +10422,7 @@ extension SageMaker { public let modelQualityJobOutputConfig: MonitoringOutputConfig? /// Specifies the network configuration for the monitoring job. public let networkConfig: MonitoringNetworkConfig? - /// The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker can assume to perform tasks on your behalf. + /// The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker AI can assume to perform tasks on your behalf. public let roleArn: String? public let stoppingCondition: MonitoringStoppingCondition? /// (Optional) An array of key-value pairs. For more information, see Using Cost Allocation Tags in the Amazon Web Services Billing and Cost Management User Guide. @@ -10501,17 +10540,17 @@ extension SageMaker { public struct CreateNotebookInstanceInput: AWSEncodableShape { /// This parameter is no longer supported. Elastic Inference (EI) is no longer available. This parameter was used to specify a list of EI instance types to associate with this notebook instance. public let acceleratorTypes: [NotebookInstanceAcceleratorType]? - /// An array of up to three Git repositories to associate with the notebook instance. These can be either the names of Git repositories stored as resources in your account, or the URL of Git repositories in Amazon Web Services CodeCommit or in any other Git repository. These repositories are cloned at the same level as the default repository of your notebook instance. For more information, see Associating Git Repositories with SageMaker Notebook Instances. + /// An array of up to three Git repositories to associate with the notebook instance. These can be either the names of Git repositories stored as resources in your account, or the URL of Git repositories in Amazon Web Services CodeCommit or in any other Git repository. These repositories are cloned at the same level as the default repository of your notebook instance. For more information, see Associating Git Repositories with SageMaker AI Notebook Instances. public let additionalCodeRepositories: [String]? - /// A Git repository to associate with the notebook instance as its default code repository. This can be either the name of a Git repository stored as a resource in your account, or the URL of a Git repository in Amazon Web Services CodeCommit or in any other Git repository. When you open a notebook instance, it opens in the directory that contains this repository. For more information, see Associating Git Repositories with SageMaker Notebook Instances. + /// A Git repository to associate with the notebook instance as its default code repository. This can be either the name of a Git repository stored as a resource in your account, or the URL of a Git repository in Amazon Web Services CodeCommit or in any other Git repository. When you open a notebook instance, it opens in the directory that contains this repository. For more information, see Associating Git Repositories with SageMaker AI Notebook Instances. public let defaultCodeRepository: String? - /// Sets whether SageMaker provides internet access to the notebook instance. If you set this to Disabled this notebook instance is able to access resources only in your VPC, and is not be able to connect to SageMaker training and endpoint services unless you configure a NAT Gateway in your VPC. For more information, see Notebook Instances Are Internet-Enabled by Default. You can set the value of this parameter to Disabled only if you set a value for the SubnetId parameter. + /// Sets whether SageMaker AI provides internet access to the notebook instance. If you set this to Disabled this notebook instance is able to access resources only in your VPC, and is not be able to connect to SageMaker AI training and endpoint services unless you configure a NAT Gateway in your VPC. For more information, see Notebook Instances Are Internet-Enabled by Default. You can set the value of this parameter to Disabled only if you set a value for the SubnetId parameter. public let directInternetAccess: DirectInternetAccess? /// Information on the IMDS configuration of the notebook instance public let instanceMetadataServiceConfiguration: InstanceMetadataServiceConfiguration? /// The type of ML compute instance to launch for the notebook instance. public let instanceType: InstanceType? - /// The Amazon Resource Name (ARN) of a Amazon Web Services Key Management Service key that SageMaker uses to encrypt data on the storage volume attached to your notebook instance. The KMS key you provide must be enabled. For information, see Enabling and Disabling Keys in the Amazon Web Services Key Management Service Developer Guide. + /// The Amazon Resource Name (ARN) of a Amazon Web Services Key Management Service key that SageMaker AI uses to encrypt data on the storage volume attached to your notebook instance. The KMS key you provide must be enabled. For information, see Enabling and Disabling Keys in the Amazon Web Services Key Management Service Developer Guide. public let kmsKeyId: String? /// The name of a lifecycle configuration to associate with the notebook instance. For information about lifestyle configurations, see Step 2.1: (Optional) Customize a Notebook Instance. public let lifecycleConfigName: String? @@ -10519,7 +10558,7 @@ extension SageMaker { public let notebookInstanceName: String? /// The platform identifier of the notebook instance runtime environment. public let platformIdentifier: String? - /// When you send any requests to Amazon Web Services resources from the notebook instance, SageMaker assumes this role to perform tasks on your behalf. You must grant this role necessary permissions so SageMaker can perform these tasks. The policy must allow the SageMaker service principal (sagemaker.amazonaws.com) permissions to assume this role. For more information, see SageMaker Roles. To be able to pass this role to SageMaker, the caller of this API must have the iam:PassRole permission. + /// When you send any requests to Amazon Web Services resources from the notebook instance, SageMaker AI assumes this role to perform tasks on your behalf. You must grant this role necessary permissions so SageMaker AI can perform these tasks. The policy must allow the SageMaker AI service principal (sagemaker.amazonaws.com) permissions to assume this role. For more information, see SageMaker AI Roles. To be able to pass this role to SageMaker AI, the caller of this API must have the iam:PassRole permission. public let roleArn: String? /// Whether root access is enabled or disabled for users of the notebook instance. The default value is Enabled. Lifecycle configurations need root access to be able to set up a notebook instance. Because of this, lifecycle configurations associated with a notebook instance always run with root access even if you disable root access for users. public let rootAccess: RootAccess? @@ -10685,7 +10724,7 @@ extension SageMaker { public let optimizationJobName: String? /// Details for where to store the optimized model that you create with the optimization job. public let outputConfig: OptimizationJobOutputConfig? - /// The Amazon Resource Name (ARN) of an IAM role that enables Amazon SageMaker to perform tasks on your behalf. During model optimization, Amazon SageMaker needs your permission to: Read input data from an S3 bucket Write model artifacts to an S3 bucket Write logs to Amazon CloudWatch Logs Publish metrics to Amazon CloudWatch You grant permissions for all of these tasks to an IAM role. To pass this role to Amazon SageMaker, the caller of this API must have the iam:PassRole permission. For more information, see Amazon SageMaker Roles. + /// The Amazon Resource Name (ARN) of an IAM role that enables Amazon SageMaker AI to perform tasks on your behalf. During model optimization, Amazon SageMaker AI needs your permission to: Read input data from an S3 bucket Write model artifacts to an S3 bucket Write logs to Amazon CloudWatch Logs Publish metrics to Amazon CloudWatch You grant permissions for all of these tasks to an IAM role. To pass this role to Amazon SageMaker AI, the caller of this API must have the iam:PassRole permission. For more information, see Amazon SageMaker AI Roles. public let roleArn: String? public let stoppingCondition: StoppingCondition? /// A list of key-value pairs associated with the optimization job. For more information, see Tagging Amazon Web Services resources in the Amazon Web Services General Reference Guide. @@ -11348,9 +11387,9 @@ extension SageMaker { public struct CreateStudioLifecycleConfigRequest: AWSEncodableShape { /// The App type that the Lifecycle Configuration is attached to. public let studioLifecycleConfigAppType: StudioLifecycleConfigAppType? - /// The content of your Amazon SageMaker Studio Lifecycle Configuration script. This content must be base64 encoded. + /// The content of your Amazon SageMaker AI Studio Lifecycle Configuration script. This content must be base64 encoded. public let studioLifecycleConfigContent: String? - /// The name of the Amazon SageMaker Studio Lifecycle Configuration to create. + /// The name of the Amazon SageMaker AI Studio Lifecycle Configuration to create. public let studioLifecycleConfigName: String? /// Tags to be associated with the Lifecycle Configuration. Each tag consists of a key and an optional value. Tag keys must be unique per resource. Tags are searchable using the Search API. public let tags: [Tag]? @@ -12156,7 +12195,7 @@ extension SageMaker { } public struct DataCaptureConfig: AWSEncodableShape & AWSDecodableShape { - /// Configuration specifying how to treat different headers. If no headers are specified SageMaker will by default base64 encode when capturing the data. + /// Configuration specifying how to treat different headers. If no headers are specified SageMaker AI will by default base64 encode when capturing the data. public let captureContentTypeHeader: CaptureContentTypeHeader? /// Specifies data Model Monitor will capture. You can configure whether to collect only input, only output, or both public let captureOptions: [CaptureOption]? @@ -12164,9 +12203,9 @@ extension SageMaker { public let destinationS3Uri: String? /// Whether data capture should be enabled or disabled (defaults to enabled). public let enableCapture: Bool? - /// The percentage of requests SageMaker will capture. A lower value is recommended for Endpoints with high traffic. + /// The percentage of requests SageMaker AI will capture. A lower value is recommended for Endpoints with high traffic. public let initialSamplingPercentage: Int? - /// The Amazon Resource Name (ARN) of an Key Management Service key that SageMaker uses to encrypt the captured data at rest using Amazon S3 server-side encryption. The KmsKeyId can be any of the following formats: Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab Key ARN: arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab Alias name: alias/ExampleAlias Alias name ARN: arn:aws:kms:us-west-2:111122223333:alias/ExampleAlias + /// The Amazon Resource Name (ARN) of an Key Management Service key that SageMaker AI uses to encrypt the captured data at rest using Amazon S3 server-side encryption. The KmsKeyId can be any of the following formats: Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab Key ARN: arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab Alias name: alias/ExampleAlias Alias name ARN: arn:aws:kms:us-west-2:111122223333:alias/ExampleAlias public let kmsKeyId: String? @inlinable @@ -12622,7 +12661,7 @@ extension SageMaker { } public struct DefaultSpaceSettings: AWSEncodableShape & AWSDecodableShape { - /// The settings for assigning a custom file system to a domain. Permitted users can access this file system in Amazon SageMaker Studio. + /// The settings for assigning a custom file system to a domain. Permitted users can access this file system in Amazon SageMaker AI Studio. public let customFileSystemConfigs: [CustomFileSystemConfig]? public let customPosixUserConfig: CustomPosixUserConfig? /// The ARN of the execution role for the space. @@ -13713,7 +13752,7 @@ extension SageMaker { } public struct DeleteNotebookInstanceInput: AWSEncodableShape { - /// The name of the SageMaker notebook instance to delete. + /// The name of the SageMaker AI notebook instance to delete. public let notebookInstanceName: String? @inlinable @@ -13897,7 +13936,7 @@ extension SageMaker { } public struct DeleteStudioLifecycleConfigRequest: AWSEncodableShape { - /// The name of the Amazon SageMaker Studio Lifecycle Configuration to delete. + /// The name of the Amazon SageMaker AI Studio Lifecycle Configuration to delete. public let studioLifecycleConfigName: String? @inlinable @@ -14526,7 +14565,7 @@ extension SageMaker { public let appType: AppType? /// The lifecycle configuration that runs before the default lifecycle configuration public let builtInLifecycleConfigArn: String? - /// The creation time of the application. After an application has been shut down for 24 hours, SageMaker deletes all metadata for the application. To be considered an update and retain application metadata, applications must be restarted within 24 hours after the previous application has been shut down. After this time window, creation of an application is considered a new application rather than an update of the previous application. + /// The creation time of the application. After an application has been shut down for 24 hours, SageMaker AI deletes all metadata for the application. To be considered an update and retain application metadata, applications must be restarted within 24 hours after the previous application has been shut down. After this time window, creation of an application is considered a new application rather than an update of the previous application. public let creationTime: Date? /// The domain ID. public let domainId: String? @@ -14534,9 +14573,9 @@ extension SageMaker { public let failureReason: String? /// The timestamp of the last health check. public let lastHealthCheckTimestamp: Date? - /// The timestamp of the last user's activity. LastUserActivityTimestamp is also updated when SageMaker performs health checks without user activity. As a result, this value is set to the same value as LastHealthCheckTimestamp. + /// The timestamp of the last user's activity. LastUserActivityTimestamp is also updated when SageMaker AI performs health checks without user activity. As a result, this value is set to the same value as LastHealthCheckTimestamp. public let lastUserActivityTimestamp: Date? - /// The instance type and the Amazon Resource Name (ARN) of the SageMaker image created on the instance. + /// The instance type and the Amazon Resource Name (ARN) of the SageMaker AI image created on the instance. public let resourceSpec: ResourceSpec? /// The name of the space. If this value is not set, then UserProfileName must be set. public let spaceName: String? @@ -14684,7 +14723,7 @@ extension SageMaker { public let autoMLJobSecondaryStatus: AutoMLJobSecondaryStatus? /// Returns the status of the AutoML job. public let autoMLJobStatus: AutoMLJobStatus? - /// The best model candidate selected by SageMaker Autopilot using both the best objective metric and lowest InferenceLatency for an experiment. + /// The best model candidate selected by SageMaker AI Autopilot using both the best objective metric and lowest InferenceLatency for an experiment. public let bestCandidate: AutoMLCandidate? /// Returns the creation time of the AutoML job. public let creationTime: Date? @@ -15142,7 +15181,7 @@ extension SageMaker { } public struct DescribeCompilationJobResponse: AWSDecodableShape { - /// The time when the model compilation job on a compilation job instance ended. For a successful or stopped job, this is when the job's model artifacts have finished uploading. For a failed job, this is when Amazon SageMaker detected that the job failed. + /// The time when the model compilation job on a compilation job instance ended. For a successful or stopped job, this is when the job's model artifacts have finished uploading. For a failed job, this is when Amazon SageMaker AI detected that the job failed. public let compilationEndTime: Date? /// The Amazon Resource Name (ARN) of the model compilation job. public let compilationJobArn: String? @@ -15172,9 +15211,9 @@ extension SageMaker { public let modelPackageVersionArn: String? /// Information about the output location for the compiled model and the target device that the model runs on. public let outputConfig: OutputConfig? - /// The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker assumes to perform the model compilation job. + /// The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker AI assumes to perform the model compilation job. public let roleArn: String? - /// Specifies a limit to how long a model compilation job can run. When the job reaches the time limit, Amazon SageMaker ends the compilation job. Use this API to cap model training costs. + /// Specifies a limit to how long a model compilation job can run. When the job reaches the time limit, Amazon SageMaker AI ends the compilation job. Use this API to cap model training costs. public let stoppingCondition: StoppingCondition? /// A VpcConfig object that specifies the VPC that you want your compilation job to connect to. Control access to your models by configuring the VPC. For more information, see Protect Compilation Jobs by Using an Amazon Virtual Private Cloud. public let vpcConfig: NeoVpcConfig? @@ -15422,7 +15461,7 @@ extension SageMaker { public let jobResources: MonitoringResources? /// The networking configuration for the data quality monitoring job. public let networkConfig: MonitoringNetworkConfig? - /// The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker can assume to perform tasks on your behalf. + /// The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker AI can assume to perform tasks on your behalf. public let roleArn: String? public let stoppingCondition: MonitoringStoppingCondition? @@ -15625,7 +15664,7 @@ extension SageMaker { } public struct DescribeDomainResponse: AWSDecodableShape { - /// Specifies the VPC used for non-EFS traffic. The default value is PublicInternetOnly. PublicInternetOnly - Non-EFS traffic is through a VPC managed by Amazon SageMaker, which allows direct internet access VpcOnly - All traffic is through the specified VPC and subnets + /// Specifies the VPC used for non-EFS traffic. The default value is PublicInternetOnly. PublicInternetOnly - Non-EFS traffic is through a VPC managed by Amazon SageMaker AI, which allows direct internet access VpcOnly - All traffic is through the specified VPC and subnets public let appNetworkAccessType: AppNetworkAccessType? /// The entity that creates and manages the required security groups for inter-app communication in VPCOnly mode. Required when CreateDomain.AppNetworkAccessType is VPCOnly and DomainSettings.RStudioServerProDomainSettings.DomainExecutionRoleArn is provided. public let appSecurityGroupManagement: AppSecurityGroupManagement? @@ -15657,7 +15696,7 @@ extension SageMaker { public let lastModifiedTime: Date? /// The ID of the security group that authorizes traffic between the RSessionGateway apps and the RStudioServerPro app. public let securityGroupIdForDomainBoundary: String? - /// The ARN of the application managed by SageMaker in IAM Identity Center. This value is only returned for domains created after October 1, 2023. + /// The ARN of the application managed by SageMaker AI in IAM Identity Center. This value is only returned for domains created after October 1, 2023. public let singleSignOnApplicationArn: String? /// The IAM Identity Center managed application instance ID. public let singleSignOnManagedApplicationInstanceId: String? @@ -16776,7 +16815,7 @@ extension SageMaker { public let imageStatus: ImageStatus? /// When the image was last modified. public let lastModifiedTime: Date? - /// The ARN of the IAM role that enables Amazon SageMaker to perform tasks on your behalf. + /// The ARN of the IAM role that enables Amazon SageMaker AI to perform tasks on your behalf. public let roleArn: String? @inlinable @@ -16854,7 +16893,7 @@ extension SageMaker { public let imageVersionArn: String? /// The status of the version. public let imageVersionStatus: ImageVersionStatus? - /// Indicates SageMaker job type compatibility. TRAINING: The image version is compatible with SageMaker training jobs. INFERENCE: The image version is compatible with SageMaker inference jobs. NOTEBOOK_KERNEL: The image version is compatible with SageMaker notebook kernels. + /// Indicates SageMaker AI job type compatibility. TRAINING: The image version is compatible with SageMaker AI training jobs. INFERENCE: The image version is compatible with SageMaker AI inference jobs. NOTEBOOK_KERNEL: The image version is compatible with SageMaker AI notebook kernels. public let jobType: JobType? /// When the version was last modified. public let lastModifiedTime: Date? @@ -18007,7 +18046,7 @@ extension SageMaker { public let modelQualityJobOutputConfig: MonitoringOutputConfig? /// Networking options for a model quality job. public let networkConfig: MonitoringNetworkConfig? - /// The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker can assume to perform tasks on your behalf. + /// The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker AI can assume to perform tasks on your behalf. public let roleArn: String? public let stoppingCondition: MonitoringStoppingCondition? @@ -18186,13 +18225,13 @@ extension SageMaker { public struct DescribeNotebookInstanceOutput: AWSDecodableShape { /// This parameter is no longer supported. Elastic Inference (EI) is no longer available. This parameter was used to specify a list of the EI instance types associated with this notebook instance. public let acceleratorTypes: [NotebookInstanceAcceleratorType]? - /// An array of up to three Git repositories associated with the notebook instance. These can be either the names of Git repositories stored as resources in your account, or the URL of Git repositories in Amazon Web Services CodeCommit or in any other Git repository. These repositories are cloned at the same level as the default repository of your notebook instance. For more information, see Associating Git Repositories with SageMaker Notebook Instances. + /// An array of up to three Git repositories associated with the notebook instance. These can be either the names of Git repositories stored as resources in your account, or the URL of Git repositories in Amazon Web Services CodeCommit or in any other Git repository. These repositories are cloned at the same level as the default repository of your notebook instance. For more information, see Associating Git Repositories with SageMaker AI Notebook Instances. public let additionalCodeRepositories: [String]? /// A timestamp. Use this parameter to return the time when the notebook instance was created public let creationTime: Date? - /// The Git repository associated with the notebook instance as its default code repository. This can be either the name of a Git repository stored as a resource in your account, or the URL of a Git repository in Amazon Web Services CodeCommit or in any other Git repository. When you open a notebook instance, it opens in the directory that contains this repository. For more information, see Associating Git Repositories with SageMaker Notebook Instances. + /// The Git repository associated with the notebook instance as its default code repository. This can be either the name of a Git repository stored as a resource in your account, or the URL of a Git repository in Amazon Web Services CodeCommit or in any other Git repository. When you open a notebook instance, it opens in the directory that contains this repository. For more information, see Associating Git Repositories with SageMaker AI Notebook Instances. public let defaultCodeRepository: String? - /// Describes whether SageMaker provides internet access to the notebook instance. If this value is set to Disabled, the notebook instance does not have internet access, and cannot connect to SageMaker training and endpoint services. For more information, see Notebook Instances Are Internet-Enabled by Default. + /// Describes whether SageMaker AI provides internet access to the notebook instance. If this value is set to Disabled, the notebook instance does not have internet access, and cannot connect to SageMaker AI training and endpoint services. For more information, see Notebook Instances Are Internet-Enabled by Default. public let directInternetAccess: DirectInternetAccess? /// If status is Failed, the reason it failed. public let failureReason: String? @@ -18200,17 +18239,17 @@ extension SageMaker { public let instanceMetadataServiceConfiguration: InstanceMetadataServiceConfiguration? /// The type of ML compute instance running on the notebook instance. public let instanceType: InstanceType? - /// The Amazon Web Services KMS key ID SageMaker uses to encrypt data when storing it on the ML storage volume attached to the instance. + /// The Amazon Web Services KMS key ID SageMaker AI uses to encrypt data when storing it on the ML storage volume attached to the instance. public let kmsKeyId: String? /// A timestamp. Use this parameter to retrieve the time when the notebook instance was last modified. public let lastModifiedTime: Date? - /// The network interface IDs that SageMaker created at the time of creating the instance. + /// The network interface IDs that SageMaker AI created at the time of creating the instance. public let networkInterfaceId: String? /// The Amazon Resource Name (ARN) of the notebook instance. public let notebookInstanceArn: String? /// Returns the name of a notebook instance lifecycle configuration. For information about notebook instance lifestyle configurations, see Step 2.1: (Optional) Customize a Notebook Instance public let notebookInstanceLifecycleConfigName: String? - /// The name of the SageMaker notebook instance. + /// The name of the SageMaker AI notebook instance. public let notebookInstanceName: String? /// The status of the notebook instance. public let notebookInstanceStatus: NotebookInstanceStatus? @@ -18933,7 +18972,7 @@ extension SageMaker { } public struct DescribeStudioLifecycleConfigRequest: AWSEncodableShape { - /// The name of the Amazon SageMaker Studio Lifecycle Configuration to describe. + /// The name of the Amazon SageMaker AI Studio Lifecycle Configuration to describe. public let studioLifecycleConfigName: String? @inlinable @@ -18952,17 +18991,17 @@ extension SageMaker { } public struct DescribeStudioLifecycleConfigResponse: AWSDecodableShape { - /// The creation time of the Amazon SageMaker Studio Lifecycle Configuration. + /// The creation time of the Amazon SageMaker AI Studio Lifecycle Configuration. public let creationTime: Date? - /// This value is equivalent to CreationTime because Amazon SageMaker Studio Lifecycle Configurations are immutable. + /// This value is equivalent to CreationTime because Amazon SageMaker AI Studio Lifecycle Configurations are immutable. public let lastModifiedTime: Date? /// The App type that the Lifecycle Configuration is attached to. public let studioLifecycleConfigAppType: StudioLifecycleConfigAppType? /// The ARN of the Lifecycle Configuration to describe. public let studioLifecycleConfigArn: String? - /// The content of your Amazon SageMaker Studio Lifecycle Configuration script. + /// The content of your Amazon SageMaker AI Studio Lifecycle Configuration script. public let studioLifecycleConfigContent: String? - /// The name of the Amazon SageMaker Studio Lifecycle Configuration that is described. + /// The name of the Amazon SageMaker AI Studio Lifecycle Configuration that is described. public let studioLifecycleConfigName: String? @inlinable @@ -20120,7 +20159,7 @@ extension SageMaker { public let amazonQSettings: AmazonQSettings? /// A collection of settings that configure the domain's Docker interaction. public let dockerSettings: DockerSettings? - /// The configuration for attaching a SageMaker user profile name to the execution role as a sts:SourceIdentity key. + /// The configuration for attaching a SageMaker AI user profile name to the execution role as a sts:SourceIdentity key. public let executionRoleIdentityConfig: ExecutionRoleIdentityConfig? /// A collection of settings that configure the RStudioServerPro Domain-level app. public let rStudioServerProDomainSettings: RStudioServerProDomainSettings? @@ -20161,7 +20200,7 @@ extension SageMaker { public let amazonQSettings: AmazonQSettings? /// A collection of settings that configure the domain's Docker interaction. public let dockerSettings: DockerSettings? - /// The configuration for attaching a SageMaker user profile name to the execution role as a sts:SourceIdentity key. This configuration can only be modified if there are no apps in the InService or Pending state. + /// The configuration for attaching a SageMaker AI user profile name to the execution role as a sts:SourceIdentity key. This configuration can only be modified if there are no apps in the InService or Pending state. public let executionRoleIdentityConfig: ExecutionRoleIdentityConfig? /// A collection of RStudioServerPro Domain-level app settings to update. A single RStudioServerPro application is created for a domain. public let rStudioServerProDomainSettingsForUpdate: RStudioServerProDomainSettingsForUpdate? @@ -20380,7 +20419,7 @@ extension SageMaker { public struct EFSFileSystemConfig: AWSEncodableShape & AWSDecodableShape { /// The ID of your Amazon EFS file system. public let fileSystemId: String? - /// The path to the file system directory that is accessible in Amazon SageMaker Studio. Permitted users can access only this directory and below. + /// The path to the file system directory that is accessible in Amazon SageMaker AI Studio. Permitted users can access only this directory and below. public let fileSystemPath: String? @inlinable @@ -23758,7 +23797,7 @@ extension SageMaker { public let computeResourceRequirements: InferenceComponentComputeResourceRequirements? /// Defines a container that provides the runtime environment for a model that you deploy with an inference component. public let container: InferenceComponentContainerSpecification? - /// The name of an existing SageMaker model object in your account that you want to deploy with the inference component. + /// The name of an existing SageMaker AI model object in your account that you want to deploy with the inference component. public let modelName: String? /// Settings that take effect while the model container starts up. public let startupParameters: InferenceComponentStartupParameters? @@ -23798,7 +23837,7 @@ extension SageMaker { public let computeResourceRequirements: InferenceComponentComputeResourceRequirements? /// Details about the container that provides the runtime environment for the model that is deployed with the inference component. public let container: InferenceComponentContainerSpecificationSummary? - /// The name of the SageMaker model object that is deployed with the inference component. + /// The name of the SageMaker AI model object that is deployed with the inference component. public let modelName: String? /// Settings that take effect while the model container starts up. public let startupParameters: InferenceComponentStartupParameters? @@ -24443,9 +24482,9 @@ extension SageMaker { } public struct JupyterServerAppSettings: AWSEncodableShape & AWSDecodableShape { - /// A list of Git repositories that SageMaker automatically displays to users for cloning in the JupyterServer application. + /// A list of Git repositories that SageMaker AI automatically displays to users for cloning in the JupyterServer application. public let codeRepositories: [CodeRepository]? - /// The default instance type and the Amazon Resource Name (ARN) of the default SageMaker image used by the JupyterServer app. If you use the LifecycleConfigArns parameter, then this parameter is also required. + /// The default instance type and the Amazon Resource Name (ARN) of the default SageMaker AI image used by the JupyterServer app. If you use the LifecycleConfigArns parameter, then this parameter is also required. public let defaultResourceSpec: ResourceSpec? /// The Amazon Resource Name (ARN) of the Lifecycle Configurations attached to the JupyterServerApp. If you use this parameter, the DefaultResourceSpec parameter is also required. To remove a Lifecycle Config, you must set LifecycleConfigArns to an empty list. public let lifecycleConfigArns: [String]? @@ -24491,9 +24530,9 @@ extension SageMaker { } public struct KernelGatewayAppSettings: AWSEncodableShape & AWSDecodableShape { - /// A list of custom SageMaker images that are configured to run as a KernelGateway app. + /// A list of custom SageMaker AI images that are configured to run as a KernelGateway app. public let customImages: [CustomImage]? - /// The default instance type and the Amazon Resource Name (ARN) of the default SageMaker image used by the KernelGateway app. The Amazon SageMaker Studio UI does not use the default instance type value set here. The default instance type set here is used when Apps are created using the CLI or CloudFormation and the instance type parameter value is not passed. + /// The default instance type and the Amazon Resource Name (ARN) of the default SageMaker AI image used by the KernelGateway app. The Amazon SageMaker AI Studio UI does not use the default instance type value set here. The default instance type set here is used when Apps are created using the CLI or CloudFormation and the instance type parameter value is not passed. public let defaultResourceSpec: ResourceSpec? /// The Amazon Resource Name (ARN) of the Lifecycle Configurations attached to the the user profile or domain. To remove a Lifecycle Config, you must set LifecycleConfigArns to an empty list. public let lifecycleConfigArns: [String]? @@ -24525,7 +24564,7 @@ extension SageMaker { } public struct KernelGatewayImageConfig: AWSEncodableShape & AWSDecodableShape { - /// The Amazon Elastic File System storage configuration for a SageMaker image. + /// The Amazon Elastic File System storage configuration for a SageMaker AI image. public let fileSystemConfig: FileSystemConfig? /// The specification of the Jupyter kernels in the image. public let kernelSpecs: [KernelSpec]? @@ -25196,7 +25235,7 @@ extension SageMaker { public struct ListAliasesResponse: AWSDecodableShape { /// A token for getting the next set of aliases, if more aliases exist. public let nextToken: String? - /// A list of SageMaker image version aliases. + /// A list of SageMaker AI image version aliases. public let sageMakerImageVersionAliases: [String]? @inlinable @@ -26009,7 +26048,7 @@ extension SageMaker { public struct ListCompilationJobsResponse: AWSDecodableShape { /// An array of CompilationJobSummary objects, each describing a model compilation job. public let compilationJobSummaries: [CompilationJobSummary]? - /// If the response is truncated, Amazon SageMaker returns this NextToken. To retrieve the next set of model compilation jobs, use this token in the next request. + /// If the response is truncated, Amazon SageMaker AI returns this NextToken. To retrieve the next set of model compilation jobs, use this token in the next request. public let nextToken: String? @inlinable @@ -28640,7 +28679,7 @@ extension SageMaker { public struct ListModelQualityJobDefinitionsResponse: AWSDecodableShape { /// A list of summaries of model quality monitoring job definitions. public let jobDefinitionSummaries: [MonitoringJobDefinitionSummary]? - /// If the response is truncated, Amazon SageMaker returns this token. To retrieve the next set of model quality monitoring job definitions, use it in the next request. + /// If the response is truncated, Amazon SageMaker AI returns this token. To retrieve the next set of model quality monitoring job definitions, use it in the next request. public let nextToken: String? @inlinable @@ -29100,7 +29139,7 @@ extension SageMaker { } public struct ListNotebookInstanceLifecycleConfigsOutput: AWSDecodableShape { - /// If the response is truncated, SageMaker returns this token. To get the next set of lifecycle configurations, use it in the next request. + /// If the response is truncated, SageMaker AI returns this token. To get the next set of lifecycle configurations, use it in the next request. public let nextToken: String? /// An array of NotebookInstanceLifecycleConfiguration objects, each listing a lifecycle configuration. public let notebookInstanceLifecycleConfigs: [NotebookInstanceLifecycleConfigSummary]? @@ -29196,7 +29235,7 @@ extension SageMaker { } public struct ListNotebookInstancesOutput: AWSDecodableShape { - /// If the response to the previous ListNotebookInstances request was truncated, SageMaker returns this token. To retrieve the next set of notebook instances, use the token in the next request. + /// If the response to the previous ListNotebookInstances request was truncated, SageMaker AI returns this token. To retrieve the next set of notebook instances, use the token in the next request. public let nextToken: String? /// An array of NotebookInstanceSummary objects, one for each notebook instance. public let notebookInstances: [NotebookInstanceSummary]? @@ -32033,10 +32072,12 @@ extension SageMaker { public let framework: String? /// The framework version of the Model Package Container Image. public let frameworkVersion: String? - /// The Amazon EC2 Container Registry (Amazon ECR) path where inference code is stored. If you are using your own custom algorithm instead of an algorithm provided by SageMaker, the inference code must meet SageMaker requirements. SageMaker supports both registry/repository[:tag] and registry/repository[@digest] image path formats. For more information, see Using Your Own Algorithms with Amazon SageMaker. + /// The Amazon Elastic Container Registry (Amazon ECR) path where inference code is stored. If you are using your own custom algorithm instead of an algorithm provided by SageMaker, the inference code must meet SageMaker requirements. SageMaker supports both registry/repository[:tag] and registry/repository[@digest] image path formats. For more information, see Using Your Own Algorithms with Amazon SageMaker. public let image: String? /// An MD5 hash of the training algorithm that identifies the Docker image used for training. public let imageDigest: String? + /// The ETag associated with Model Data URL. + public let modelDataETag: String? /// Specifies the location of ML model data to deploy during endpoint creation. public let modelDataSource: ModelDataSource? /// The Amazon S3 path where the model artifacts, which result from model training, are stored. This path must point to a single gzip compressed tar archive (.tar.gz suffix). The model artifacts must be in an S3 bucket that is in the same region as the model package. @@ -32049,7 +32090,7 @@ extension SageMaker { public let productId: String? @inlinable - public init(additionalS3DataSource: AdditionalS3DataSource? = nil, containerHostname: String? = nil, environment: [String: String]? = nil, framework: String? = nil, frameworkVersion: String? = nil, image: String? = nil, imageDigest: String? = nil, modelDataSource: ModelDataSource? = nil, modelDataUrl: String? = nil, modelInput: ModelInput? = nil, nearestModelName: String? = nil, productId: String? = nil) { + public init(additionalS3DataSource: AdditionalS3DataSource? = nil, containerHostname: String? = nil, environment: [String: String]? = nil, framework: String? = nil, frameworkVersion: String? = nil, image: String? = nil, imageDigest: String? = nil, modelDataETag: String? = nil, modelDataSource: ModelDataSource? = nil, modelDataUrl: String? = nil, modelInput: ModelInput? = nil, nearestModelName: String? = nil, productId: String? = nil) { self.additionalS3DataSource = additionalS3DataSource self.containerHostname = containerHostname self.environment = environment @@ -32057,6 +32098,7 @@ extension SageMaker { self.frameworkVersion = frameworkVersion self.image = image self.imageDigest = imageDigest + self.modelDataETag = modelDataETag self.modelDataSource = modelDataSource self.modelDataUrl = modelDataUrl self.modelInput = modelInput @@ -32098,6 +32140,7 @@ extension SageMaker { case frameworkVersion = "FrameworkVersion" case image = "Image" case imageDigest = "ImageDigest" + case modelDataETag = "ModelDataETag" case modelDataSource = "ModelDataSource" case modelDataUrl = "ModelDataUrl" case modelInput = "ModelInput" @@ -32828,7 +32871,7 @@ extension SageMaker { public let instanceCount: Int? /// The ML compute instance type for the processing job. public let instanceType: ProcessingInstanceType? - /// The Key Management Service (KMS) key that Amazon SageMaker uses to encrypt data on the storage volume attached to the ML compute instance(s) that run the model monitoring job. + /// The Key Management Service (KMS) key that Amazon SageMaker AI uses to encrypt data on the storage volume attached to the ML compute instance(s) that run the model monitoring job. public let volumeKmsKeyId: String? /// The size of the ML storage volume, in gigabytes, that you want to provision. You must specify sufficient ML storage for your scenario. public let volumeSizeInGB: Int? @@ -33012,7 +33055,7 @@ extension SageMaker { public let environment: [String: String]? /// Configures the monitoring job to run a specified Docker container image. public let monitoringAppSpecification: MonitoringAppSpecification? - /// The array of inputs for the monitoring job. Currently we support monitoring an Amazon SageMaker Endpoint. + /// The array of inputs for the monitoring job. Currently we support monitoring an Amazon SageMaker AI Endpoint. public let monitoringInputs: [MonitoringInput]? /// The array of outputs from the monitoring job to be uploaded to Amazon S3. public let monitoringOutputConfig: MonitoringOutputConfig? @@ -33020,7 +33063,7 @@ extension SageMaker { public let monitoringResources: MonitoringResources? /// Specifies networking options for an monitoring job. public let networkConfig: NetworkConfig? - /// The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker can assume to perform tasks on your behalf. + /// The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker AI can assume to perform tasks on your behalf. public let roleArn: String? /// Specifies a time limit for how long the monitoring job is allowed to run. public let stoppingCondition: MonitoringStoppingCondition? @@ -33159,7 +33202,7 @@ extension SageMaker { } public struct MonitoringOutputConfig: AWSEncodableShape & AWSDecodableShape { - /// The Key Management Service (KMS) key that Amazon SageMaker uses to encrypt the model artifacts at rest using Amazon S3 server-side encryption. + /// The Key Management Service (KMS) key that Amazon SageMaker AI uses to encrypt the model artifacts at rest using Amazon S3 server-side encryption. public let kmsKeyId: String? /// Monitoring outputs for monitoring jobs. This is where the output of the periodic monitoring jobs is uploaded. public let monitoringOutputs: [MonitoringOutput]? @@ -33209,11 +33252,11 @@ extension SageMaker { } public struct MonitoringS3Output: AWSEncodableShape & AWSDecodableShape { - /// The local path to the Amazon S3 storage location where Amazon SageMaker saves the results of a monitoring job. LocalPath is an absolute path for the output data. + /// The local path to the Amazon S3 storage location where Amazon SageMaker AI saves the results of a monitoring job. LocalPath is an absolute path for the output data. public let localPath: String? /// Whether to upload the results of the monitoring job continuously or after the job completes. public let s3UploadMode: ProcessingS3UploadMode? - /// A URI that identifies the Amazon S3 storage location where Amazon SageMaker saves the results of a monitoring job. + /// A URI that identifies the Amazon S3 storage location where Amazon SageMaker AI saves the results of a monitoring job. public let s3Uri: String? @inlinable @@ -33551,11 +33594,11 @@ extension SageMaker { } public struct NotebookInstanceSummary: AWSDecodableShape { - /// An array of up to three Git repositories associated with the notebook instance. These can be either the names of Git repositories stored as resources in your account, or the URL of Git repositories in Amazon Web Services CodeCommit or in any other Git repository. These repositories are cloned at the same level as the default repository of your notebook instance. For more information, see Associating Git Repositories with SageMaker Notebook Instances. + /// An array of up to three Git repositories associated with the notebook instance. These can be either the names of Git repositories stored as resources in your account, or the URL of Git repositories in Amazon Web Services CodeCommit or in any other Git repository. These repositories are cloned at the same level as the default repository of your notebook instance. For more information, see Associating Git Repositories with SageMaker AI Notebook Instances. public let additionalCodeRepositories: [String]? /// A timestamp that shows when the notebook instance was created. public let creationTime: Date? - /// The Git repository associated with the notebook instance as its default code repository. This can be either the name of a Git repository stored as a resource in your account, or the URL of a Git repository in Amazon Web Services CodeCommit or in any other Git repository. When you open a notebook instance, it opens in the directory that contains this repository. For more information, see Associating Git Repositories with SageMaker Notebook Instances. + /// The Git repository associated with the notebook instance as its default code repository. This can be either the name of a Git repository stored as a resource in your account, or the URL of a Git repository in Amazon Web Services CodeCommit or in any other Git repository. When you open a notebook instance, it opens in the directory that contains this repository. For more information, see Associating Git Repositories with SageMaker AI Notebook Instances. public let defaultCodeRepository: String? /// The type of ML compute instance that the notebook instance is running on. public let instanceType: InstanceType? @@ -34083,9 +34126,9 @@ extension SageMaker { public struct OutputConfig: AWSEncodableShape & AWSDecodableShape { /// Specifies additional parameters for compiler options in JSON format. The compiler options are TargetPlatform specific. It is required for NVIDIA accelerators and highly recommended for CPU compilations. For any other cases, it is optional to specify CompilerOptions. DTYPE: Specifies the data type for the input. When compiling for ml_* (except for ml_inf) instances using PyTorch framework, provide the data type (dtype) of the model's input. "float32" is used if "DTYPE" is not specified. Options for data type are: float32: Use either "float" or "float32". int64: Use either "int64" or "long". For example, {"dtype" : "float32"}. CPU: Compilation for CPU supports the following compiler options. mcpu: CPU micro-architecture. For example, {'mcpu': 'skylake-avx512'} mattr: CPU flags. For example, {'mattr': ['+neon', '+vfpv4']} ARM: Details of ARM CPU compilations. NEON: NEON is an implementation of the Advanced SIMD extension used in ARMv7 processors. For example, add {'mattr': ['+neon']} to the compiler options if compiling for ARM 32-bit platform with the NEON support. NVIDIA: Compilation for NVIDIA GPU supports the following compiler options. gpu_code: Specifies the targeted architecture. trt-ver: Specifies the TensorRT versions in x.y.z. format. cuda-ver: Specifies the CUDA version in x.y format. For example, {'gpu-code': 'sm_72', 'trt-ver': '6.0.1', 'cuda-ver': '10.1'} ANDROID: Compilation for the Android OS supports the following compiler options: ANDROID_PLATFORM: Specifies the Android API levels. Available levels range from 21 to 29. For example, {'ANDROID_PLATFORM': 28}. mattr: Add {'mattr': ['+neon']} to compiler options if compiling for ARM 32-bit platform with NEON support. INFERENTIA: Compilation for target ml_inf1 uses compiler options passed in as a JSON string. For example, "CompilerOptions": "\"--verbose 1 --num-neuroncores 2 -O2\"". For information about supported compiler options, see Neuron Compiler CLI Reference Guide. CoreML: Compilation for the CoreML OutputConfig TargetDevice supports the following compiler options: class_labels: Specifies the classification labels file name inside input tar.gz file. For example, {"class_labels": "imagenet_labels_1000.txt"}. Labels inside the txt file should be separated by newlines. public let compilerOptions: String? - /// The Amazon Web Services Key Management Service key (Amazon Web Services KMS) that Amazon SageMaker uses to encrypt your output models with Amazon S3 server-side encryption after compilation job. If you don't provide a KMS key ID, Amazon SageMaker uses the default KMS key for Amazon S3 for your role's account. For more information, see KMS-Managed Encryption Keys in the Amazon Simple Storage Service Developer Guide. The KmsKeyId can be any of the following formats: Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab Key ARN: arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab Alias name: alias/ExampleAlias Alias name ARN: arn:aws:kms:us-west-2:111122223333:alias/ExampleAlias + /// The Amazon Web Services Key Management Service key (Amazon Web Services KMS) that Amazon SageMaker AI uses to encrypt your output models with Amazon S3 server-side encryption after compilation job. If you don't provide a KMS key ID, Amazon SageMaker AI uses the default KMS key for Amazon S3 for your role's account. For more information, see KMS-Managed Encryption Keys in the Amazon Simple Storage Service Developer Guide. The KmsKeyId can be any of the following formats: Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab Key ARN: arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab Alias name: alias/ExampleAlias Alias name ARN: arn:aws:kms:us-west-2:111122223333:alias/ExampleAlias public let kmsKeyId: String? - /// Identifies the S3 bucket where you want Amazon SageMaker to store the model artifacts. For example, s3://bucket-name/key-name-prefix. + /// Identifies the S3 bucket where you want Amazon SageMaker AI to store the model artifacts. For example, s3://bucket-name/key-name-prefix. public let s3OutputLocation: String? /// Identifies the target device or the machine learning instance that you want to run your model on after the compilation has completed. Alternatively, you can specify OS, architecture, and accelerator using TargetPlatform fields. It can be used instead of TargetPlatform. Currently ml_trn1 is available only in US East (N. Virginia) Region, and ml_inf2 is available only in US East (Ohio) Region. public let targetDevice: TargetDevice? @@ -36206,7 +36249,7 @@ extension SageMaker { } public struct RSessionAppSettings: AWSEncodableShape & AWSDecodableShape { - /// A list of custom SageMaker images that are configured to run as a RSession app. + /// A list of custom SageMaker AI images that are configured to run as a RSession app. public let customImages: [CustomImage]? public let defaultResourceSpec: ResourceSpec? @@ -37233,7 +37276,7 @@ extension SageMaker { public let instanceType: AppInstanceType? /// The Amazon Resource Name (ARN) of the Lifecycle Configuration attached to the Resource. public let lifecycleConfigArn: String? - /// The ARN of the SageMaker image that the image version belongs to. + /// The ARN of the SageMaker AI image that the image version belongs to. public let sageMakerImageArn: String? /// The SageMakerImageVersionAlias of the image to launch with. This value is in SemVer 2.0.0 versioning format. public let sageMakerImageVersionAlias: String? @@ -37432,8 +37475,12 @@ extension SageMaker { public struct S3ModelDataSource: AWSEncodableShape & AWSDecodableShape { /// Specifies how the ML model data is prepared. If you choose Gzip and choose S3Object as the value of S3DataType, S3Uri identifies an object that is a gzip-compressed TAR archive. SageMaker will attempt to decompress and untar the object during model deployment. If you choose None and chooose S3Object as the value of S3DataType, S3Uri identifies an object that represents an uncompressed ML model to deploy. If you choose None and choose S3Prefix as the value of S3DataType, S3Uri identifies a key name prefix, under which all objects represents the uncompressed ML model to deploy. If you choose None, then SageMaker will follow rules below when creating model data files under /opt/ml/model directory for use by your inference code: If you choose S3Object as the value of S3DataType, then SageMaker will split the key of the S3 object referenced by S3Uri by slash (/), and use the last part as the filename of the file holding the content of the S3 object. If you choose S3Prefix as the value of S3DataType, then for each S3 object under the key name pefix referenced by S3Uri, SageMaker will trim its key by the prefix, and use the remainder as the path (relative to /opt/ml/model) of the file holding the content of the S3 object. SageMaker will split the remainder by slash (/), using intermediate parts as directory names and the last part as filename of the file holding the content of the S3 object. Do not use any of the following as file names or directory names: An empty or blank string A string which contains null bytes A string longer than 255 bytes A single dot (.) A double dot (..) Ambiguous file names will result in model deployment failure. For example, if your uncompressed ML model consists of two S3 objects s3://mybucket/model/weights and s3://mybucket/model/weights/part1 and you specify s3://mybucket/model/ as the value of S3Uri and S3Prefix as the value of S3DataType, then it will result in name clash between /opt/ml/model/weights (a regular file) and /opt/ml/model/weights/ (a directory). Do not organize the model artifacts in S3 console using folders. When you create a folder in S3 console, S3 creates a 0-byte object with a key set to the folder name you provide. They key of the 0-byte object ends with a slash (/) which violates SageMaker restrictions on model artifact file names, leading to model deployment failure. public let compressionType: ModelCompressionType? + /// The ETag associated with S3 URI. + public let eTag: String? /// Configuration information for hub access. public let hubAccessConfig: InferenceHubAccessConfig? + /// The ETag associated with Manifest S3 URI. + public let manifestEtag: String? /// The Amazon S3 URI of the manifest file. The manifest file is a CSV file that stores the artifact locations. public let manifestS3Uri: String? /// Specifies the access configuration file for the ML model. You can explicitly accept the model end-user license agreement (EULA) within the ModelAccessConfig. You are responsible for reviewing and complying with any applicable license terms and making sure they are acceptable for your use case before downloading or using a model. @@ -37444,9 +37491,11 @@ extension SageMaker { public let s3Uri: String? @inlinable - public init(compressionType: ModelCompressionType? = nil, hubAccessConfig: InferenceHubAccessConfig? = nil, manifestS3Uri: String? = nil, modelAccessConfig: ModelAccessConfig? = nil, s3DataType: S3ModelDataType? = nil, s3Uri: String? = nil) { + public init(compressionType: ModelCompressionType? = nil, eTag: String? = nil, hubAccessConfig: InferenceHubAccessConfig? = nil, manifestEtag: String? = nil, manifestS3Uri: String? = nil, modelAccessConfig: ModelAccessConfig? = nil, s3DataType: S3ModelDataType? = nil, s3Uri: String? = nil) { self.compressionType = compressionType + self.eTag = eTag self.hubAccessConfig = hubAccessConfig + self.manifestEtag = manifestEtag self.manifestS3Uri = manifestS3Uri self.modelAccessConfig = modelAccessConfig self.s3DataType = s3DataType @@ -37463,7 +37512,9 @@ extension SageMaker { private enum CodingKeys: String, CodingKey { case compressionType = "CompressionType" + case eTag = "ETag" case hubAccessConfig = "HubAccessConfig" + case manifestEtag = "ManifestEtag" case manifestS3Uri = "ManifestS3Uri" case modelAccessConfig = "ModelAccessConfig" case s3DataType = "S3DataType" @@ -37557,7 +37608,7 @@ extension SageMaker { public let dataAnalysisEndTime: String? /// Sets the start time for a monitoring job window. Express this time as an offset to the times that you schedule your monitoring jobs to run. You schedule monitoring jobs with the ScheduleExpression parameter. Specify this offset in ISO 8601 duration format. For example, if you want to monitor the five hours of data in your dataset that precede the start of each monitoring job, you would specify: "-PT5H". The start time that you specify must not precede the end time that you specify by more than 24 hours. You specify the end time with the DataAnalysisEndTime parameter. If you set ScheduleExpression to NOW, this parameter is required. public let dataAnalysisStartTime: String? - /// A cron expression that describes details about the monitoring schedule. The supported cron expressions are: If you want to set the job to start every hour, use the following: Hourly: cron(0 * ? * * *) If you want to start the job daily: cron(0 [00-23] ? * * *) If you want to run the job one time, immediately, use the following keyword: NOW For example, the following are valid cron expressions: Daily at noon UTC: cron(0 12 ? * * *) Daily at midnight UTC: cron(0 0 ? * * *) To support running every 6, 12 hours, the following are also supported: cron(0 [00-23]/[01-24] ? * * *) For example, the following are valid cron expressions: Every 12 hours, starting at 5pm UTC: cron(0 17/12 ? * * *) Every two hours starting at midnight: cron(0 0/2 ? * * *) Even though the cron expression is set to start at 5PM UTC, note that there could be a delay of 0-20 minutes from the actual requested time to run the execution. We recommend that if you would like a daily schedule, you do not provide this parameter. Amazon SageMaker will pick a time for running every day. You can also specify the keyword NOW to run the monitoring job immediately, one time, without recurring. + /// A cron expression that describes details about the monitoring schedule. The supported cron expressions are: If you want to set the job to start every hour, use the following: Hourly: cron(0 * ? * * *) If you want to start the job daily: cron(0 [00-23] ? * * *) If you want to run the job one time, immediately, use the following keyword: NOW For example, the following are valid cron expressions: Daily at noon UTC: cron(0 12 ? * * *) Daily at midnight UTC: cron(0 0 ? * * *) To support running every 6, 12 hours, the following are also supported: cron(0 [00-23]/[01-24] ? * * *) For example, the following are valid cron expressions: Every 12 hours, starting at 5pm UTC: cron(0 17/12 ? * * *) Every two hours starting at midnight: cron(0 0/2 ? * * *) Even though the cron expression is set to start at 5PM UTC, note that there could be a delay of 0-20 minutes from the actual requested time to run the execution. We recommend that if you would like a daily schedule, you do not provide this parameter. Amazon SageMaker AI will pick a time for running every day. You can also specify the keyword NOW to run the monitoring job immediately, one time, without recurring. public let scheduleExpression: String? @inlinable @@ -38220,14 +38271,17 @@ extension SageMaker { public struct SourceAlgorithm: AWSEncodableShape & AWSDecodableShape { /// The name of an algorithm that was used to create the model package. The algorithm must be either an algorithm resource in your SageMaker account or an algorithm in Amazon Web Services Marketplace that you are subscribed to. public let algorithmName: String? + /// The ETag associated with Model Data URL. + public let modelDataETag: String? /// Specifies the location of ML model data to deploy during endpoint creation. public let modelDataSource: ModelDataSource? /// The Amazon S3 path where the model artifacts, which result from model training, are stored. This path must point to a single gzip compressed tar archive (.tar.gz suffix). The model artifacts must be in an S3 bucket that is in the same Amazon Web Services region as the algorithm. public let modelDataUrl: String? @inlinable - public init(algorithmName: String? = nil, modelDataSource: ModelDataSource? = nil, modelDataUrl: String? = nil) { + public init(algorithmName: String? = nil, modelDataETag: String? = nil, modelDataSource: ModelDataSource? = nil, modelDataUrl: String? = nil) { self.algorithmName = algorithmName + self.modelDataETag = modelDataETag self.modelDataSource = modelDataSource self.modelDataUrl = modelDataUrl } @@ -38243,6 +38297,7 @@ extension SageMaker { private enum CodingKeys: String, CodingKey { case algorithmName = "AlgorithmName" + case modelDataETag = "ModelDataETag" case modelDataSource = "ModelDataSource" case modelDataUrl = "ModelDataUrl" } @@ -38428,11 +38483,11 @@ extension SageMaker { } public struct SpaceSettings: AWSEncodableShape & AWSDecodableShape { - /// The type of app created within the space. + /// The type of app created within the space. If using the UpdateSpace API, you can't change the app type of your space by specifying a different value for this field. public let appType: AppType? /// The Code Editor application settings. public let codeEditorAppSettings: SpaceCodeEditorAppSettings? - /// A file system, created by you, that you assign to a space for an Amazon SageMaker Domain. Permitted users can access this file system in Amazon SageMaker Studio. + /// A file system, created by you, that you assign to a space for an Amazon SageMaker AI Domain. Permitted users can access this file system in Amazon SageMaker AI Studio. public let customFileSystems: [CustomFileSystem]? /// The settings for the JupyterLab application. public let jupyterLabAppSettings: SpaceJupyterLabAppSettings? @@ -39205,15 +39260,15 @@ extension SageMaker { } public struct StudioLifecycleConfigDetails: AWSDecodableShape { - /// The creation time of the Amazon SageMaker Studio Lifecycle Configuration. + /// The creation time of the Amazon SageMaker AI Studio Lifecycle Configuration. public let creationTime: Date? - /// This value is equivalent to CreationTime because Amazon SageMaker Studio Lifecycle Configurations are immutable. + /// This value is equivalent to CreationTime because Amazon SageMaker AI Studio Lifecycle Configurations are immutable. public let lastModifiedTime: Date? /// The App type to which the Lifecycle Configuration is attached. public let studioLifecycleConfigAppType: StudioLifecycleConfigAppType? /// The Amazon Resource Name (ARN) of the Lifecycle Configuration. public let studioLifecycleConfigArn: String? - /// The name of the Amazon SageMaker Studio Lifecycle Configuration. + /// The name of the Amazon SageMaker AI Studio Lifecycle Configuration. public let studioLifecycleConfigName: String? @inlinable @@ -39448,7 +39503,7 @@ extension SageMaker { } public struct TensorBoardAppSettings: AWSEncodableShape & AWSDecodableShape { - /// The default instance type and the Amazon Resource Name (ARN) of the SageMaker image created on the instance. + /// The default instance type and the Amazon Resource Name (ARN) of the SageMaker AI image created on the instance. public let defaultResourceSpec: ResourceSpec? @inlinable @@ -41907,7 +41962,7 @@ extension SageMaker { } public struct UpdateDomainRequest: AWSEncodableShape { - /// Specifies the VPC used for non-EFS traffic. PublicInternetOnly - Non-EFS traffic is through a VPC managed by Amazon SageMaker, which allows direct internet access. VpcOnly - All Studio traffic is through the specified VPC and subnets. This configuration can only be modified if there are no apps in the InService, Pending, or Deleting state. The configuration cannot be updated if DomainSettings.RStudioServerProDomainSettings.DomainExecutionRoleArn is already set or DomainSettings.RStudioServerProDomainSettings.DomainExecutionRoleArn is provided as part of the same request. + /// Specifies the VPC used for non-EFS traffic. PublicInternetOnly - Non-EFS traffic is through a VPC managed by Amazon SageMaker AI, which allows direct internet access. VpcOnly - All Studio traffic is through the specified VPC and subnets. This configuration can only be modified if there are no apps in the InService, Pending, or Deleting state. The configuration cannot be updated if DomainSettings.RStudioServerProDomainSettings.DomainExecutionRoleArn is already set or DomainSettings.RStudioServerProDomainSettings.DomainExecutionRoleArn is provided as part of the same request. public let appNetworkAccessType: AppNetworkAccessType? /// The entity that creates and manages the required security groups for inter-app communication in VPCOnly mode. Required when CreateDomain.AppNetworkAccessType is VPCOnly and DomainSettings.RStudioServerProDomainSettings.DomainExecutionRoleArn is provided. If setting up the domain for use with RStudio, this value must be set to Service. public let appSecurityGroupManagement: AppSecurityGroupManagement? @@ -42286,7 +42341,7 @@ extension SageMaker { public let displayName: String? /// The name of the image to update. public let imageName: String? - /// The new ARN for the IAM role that enables Amazon SageMaker to perform tasks on your behalf. + /// The new ARN for the IAM role that enables Amazon SageMaker AI to perform tasks on your behalf. public let roleArn: String? @inlinable @@ -42353,7 +42408,7 @@ extension SageMaker { public let horovod: Bool? /// The name of the image. public let imageName: String? - /// Indicates SageMaker job type compatibility. TRAINING: The image version is compatible with SageMaker training jobs. INFERENCE: The image version is compatible with SageMaker inference jobs. NOTEBOOK_KERNEL: The image version is compatible with SageMaker notebook kernels. + /// Indicates SageMaker AI job type compatibility. TRAINING: The image version is compatible with SageMaker AI training jobs. INFERENCE: The image version is compatible with SageMaker AI inference jobs. NOTEBOOK_KERNEL: The image version is compatible with SageMaker AI notebook kernels. public let jobType: JobType? /// The machine learning framework vended in the image version. public let mlFramework: String? @@ -42888,9 +42943,9 @@ extension SageMaker { public struct UpdateNotebookInstanceInput: AWSEncodableShape { /// This parameter is no longer supported. Elastic Inference (EI) is no longer available. This parameter was used to specify a list of the EI instance types to associate with this notebook instance. public let acceleratorTypes: [NotebookInstanceAcceleratorType]? - /// An array of up to three Git repositories to associate with the notebook instance. These can be either the names of Git repositories stored as resources in your account, or the URL of Git repositories in Amazon Web Services CodeCommit or in any other Git repository. These repositories are cloned at the same level as the default repository of your notebook instance. For more information, see Associating Git Repositories with SageMaker Notebook Instances. + /// An array of up to three Git repositories to associate with the notebook instance. These can be either the names of Git repositories stored as resources in your account, or the URL of Git repositories in Amazon Web Services CodeCommit or in any other Git repository. These repositories are cloned at the same level as the default repository of your notebook instance. For more information, see Associating Git Repositories with SageMaker AI Notebook Instances. public let additionalCodeRepositories: [String]? - /// The Git repository to associate with the notebook instance as its default code repository. This can be either the name of a Git repository stored as a resource in your account, or the URL of a Git repository in Amazon Web Services CodeCommit or in any other Git repository. When you open a notebook instance, it opens in the directory that contains this repository. For more information, see Associating Git Repositories with SageMaker Notebook Instances. + /// The Git repository to associate with the notebook instance as its default code repository. This can be either the name of a Git repository stored as a resource in your account, or the URL of a Git repository in Amazon Web Services CodeCommit or in any other Git repository. When you open a notebook instance, it opens in the directory that contains this repository. For more information, see Associating Git Repositories with SageMaker AI Notebook Instances. public let defaultCodeRepository: String? /// This parameter is no longer supported. Elastic Inference (EI) is no longer available. This parameter was used to specify a list of the EI instance types to remove from this notebook instance. public let disassociateAcceleratorTypes: Bool? @@ -42908,11 +42963,11 @@ extension SageMaker { public let lifecycleConfigName: String? /// The name of the notebook instance to update. public let notebookInstanceName: String? - /// The Amazon Resource Name (ARN) of the IAM role that SageMaker can assume to access the notebook instance. For more information, see SageMaker Roles. To be able to pass this role to SageMaker, the caller of this API must have the iam:PassRole permission. + /// The Amazon Resource Name (ARN) of the IAM role that SageMaker AI can assume to access the notebook instance. For more information, see SageMaker AI Roles. To be able to pass this role to SageMaker AI, the caller of this API must have the iam:PassRole permission. public let roleArn: String? /// Whether root access is enabled or disabled for users of the notebook instance. The default value is Enabled. If you set this to Disabled, users don't have root access on the notebook instance, but lifecycle configuration scripts still run with root permissions. public let rootAccess: RootAccess? - /// The size, in GB, of the ML storage volume to attach to the notebook instance. The default value is 5 GB. ML storage volumes are encrypted, so SageMaker can't determine the amount of available free space on the volume. Because of this, you can increase the volume size when you update a notebook instance, but you can't decrease the volume size. If you want to decrease the size of the ML storage volume in use, create a new notebook instance with the desired size. + /// The size, in GB, of the ML storage volume to attach to the notebook instance. The default value is 5 GB. ML storage volumes are encrypted, so SageMaker AI can't determine the amount of available free space on the volume. Because of this, you can increase the volume size when you update a notebook instance, but you can't decrease the volume size. If you want to decrease the size of the ML storage volume in use, create a new notebook instance with the desired size. public let volumeSizeInGB: Int? @inlinable @@ -43730,7 +43785,7 @@ extension SageMaker { public let canvasAppSettings: CanvasAppSettings? /// The Code Editor application settings. SageMaker applies these settings only to private spaces that the user creates in the domain. SageMaker doesn't apply these settings to shared spaces. public let codeEditorAppSettings: CodeEditorAppSettings? - /// The settings for assigning a custom file system to a user profile. Permitted users can access this file system in Amazon SageMaker Studio. SageMaker applies these settings only to private spaces that the user creates in the domain. SageMaker doesn't apply these settings to shared spaces. + /// The settings for assigning a custom file system to a user profile. Permitted users can access this file system in Amazon SageMaker AI Studio. SageMaker applies these settings only to private spaces that the user creates in the domain. SageMaker doesn't apply these settings to shared spaces. public let customFileSystemConfigs: [CustomFileSystemConfig]? /// Details about the POSIX identity that is used for file system operations. SageMaker applies these settings only to private spaces that the user creates in the domain. SageMaker doesn't apply these settings to shared spaces. public let customPosixUserConfig: CustomPosixUserConfig? @@ -43748,9 +43803,9 @@ extension SageMaker { public let rSessionAppSettings: RSessionAppSettings? /// A collection of settings that configure user interaction with the RStudioServerPro app. public let rStudioServerProAppSettings: RStudioServerProAppSettings? - /// The security groups for the Amazon Virtual Private Cloud (VPC) that the domain uses for communication. Optional when the CreateDomain.AppNetworkAccessType parameter is set to PublicInternetOnly. Required when the CreateDomain.AppNetworkAccessType parameter is set to VpcOnly, unless specified as part of the DefaultUserSettings for the domain. Amazon SageMaker adds a security group to allow NFS traffic from Amazon SageMaker Studio. Therefore, the number of security groups that you can specify is one less than the maximum number shown. SageMaker applies these settings only to private spaces that the user creates in the domain. SageMaker doesn't apply these settings to shared spaces. + /// The security groups for the Amazon Virtual Private Cloud (VPC) that the domain uses for communication. Optional when the CreateDomain.AppNetworkAccessType parameter is set to PublicInternetOnly. Required when the CreateDomain.AppNetworkAccessType parameter is set to VpcOnly, unless specified as part of the DefaultUserSettings for the domain. Amazon SageMaker AI adds a security group to allow NFS traffic from Amazon SageMaker AI Studio. Therefore, the number of security groups that you can specify is one less than the maximum number shown. SageMaker applies these settings only to private spaces that the user creates in the domain. SageMaker doesn't apply these settings to shared spaces. public let securityGroups: [String]? - /// Specifies options for sharing Amazon SageMaker Studio notebooks. + /// Specifies options for sharing Amazon SageMaker AI Studio notebooks. public let sharingSettings: SharingSettings? /// The storage settings for a space. SageMaker applies these settings only to private spaces that the user creates in the domain. SageMaker doesn't apply these settings to shared spaces. public let spaceStorageSettings: DefaultSpaceStorageSettings? diff --git a/Sources/Soto/Services/SecretsManager/SecretsManager_api.swift b/Sources/Soto/Services/SecretsManager/SecretsManager_api.swift index 8d46795823..f3e19ac1c0 100644 --- a/Sources/Soto/Services/SecretsManager/SecretsManager_api.swift +++ b/Sources/Soto/Services/SecretsManager/SecretsManager_api.swift @@ -93,6 +93,7 @@ public struct SecretsManager: AWSService { "ap-southeast-3": "secretsmanager.ap-southeast-3.api.aws", "ap-southeast-4": "secretsmanager.ap-southeast-4.api.aws", "ap-southeast-5": "secretsmanager.ap-southeast-5.api.aws", + "ap-southeast-7": "secretsmanager.ap-southeast-7.api.aws", "ca-central-1": "secretsmanager.ca-central-1.api.aws", "ca-west-1": "secretsmanager.ca-west-1.api.aws", "cn-north-1": "secretsmanager.cn-north-1.api.amazonwebservices.com.cn", @@ -108,6 +109,7 @@ public struct SecretsManager: AWSService { "il-central-1": "secretsmanager.il-central-1.api.aws", "me-central-1": "secretsmanager.me-central-1.api.aws", "me-south-1": "secretsmanager.me-south-1.api.aws", + "mx-central-1": "secretsmanager.mx-central-1.api.aws", "sa-east-1": "secretsmanager.sa-east-1.api.aws", "us-east-1": "secretsmanager.us-east-1.api.aws", "us-east-2": "secretsmanager.us-east-2.api.aws", diff --git a/Sources/Soto/Services/SecurityHub/SecurityHub_api.swift b/Sources/Soto/Services/SecurityHub/SecurityHub_api.swift index 3ded5a417b..1451cedac0 100644 --- a/Sources/Soto/Services/SecurityHub/SecurityHub_api.swift +++ b/Sources/Soto/Services/SecurityHub/SecurityHub_api.swift @@ -1582,11 +1582,11 @@ public struct SecurityHub: AWSService { /// Returns history for a Security Hub finding in the last 90 days. The history includes changes made to any fields in the Amazon Web Services Security Finding Format (ASFF). /// /// Parameters: - /// - endTime: An ISO 8601-formatted timestamp that indicates the end time of the requested finding history. If you provide values for both StartTime and EndTime, Security Hub returns finding history for the specified time period. If you provide a value for StartTime but not for EndTime, Security Hub returns finding history from the StartTime to the time at which the API is called. If you provide a value for EndTime but not for StartTime, Security Hub returns finding history from the CreatedAt timestamp of the finding to the EndTime. If you provide neither StartTime nor EndTime, Security Hub returns finding history from the CreatedAt timestamp of the finding to the time at which the API is called. In all of these scenarios, the response is limited to 100 results, and the maximum time period is limited to 90 days. This field accepts only the specified formats. Timestamps + /// - endTime: An ISO 8601-formatted timestamp that indicates the end time of the requested finding history. If you provide values for both StartTime and EndTime, Security Hub returns finding history for the specified time period. If you provide a value for StartTime but not for EndTime, Security Hub returns finding history from the StartTime to the time at which the API is called. If you provide a value for EndTime but not for StartTime, Security Hub returns finding history from the CreatedAt timestamp of the finding to the EndTime. If you provide neither StartTime nor EndTime, Security Hub returns finding history from the CreatedAt timestamp of the finding to the time at which the API is called. In all of these scenarios, the response is limited to 100 results, and the maximum time period is limited to 90 days. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. /// - findingIdentifier: /// - maxResults: The maximum number of results to be returned. If you don’t provide it, Security Hub returns up to 100 results of finding history. /// - nextToken: A token for pagination purposes. Provide NULL as the initial value. In subsequent requests, provide the token included in the response to get up to an additional 100 results of finding history. If you don’t provide NextToken, Security Hub returns up to 100 results of finding history for each request. - /// - startTime: A timestamp that indicates the start time of the requested finding history. If you provide values for both StartTime and EndTime, Security Hub returns finding history for the specified time period. If you provide a value for StartTime but not for EndTime, Security Hub returns finding history from the StartTime to the time at which the API is called. If you provide a value for EndTime but not for StartTime, Security Hub returns finding history from the CreatedAt timestamp of the finding to the EndTime. If you provide neither StartTime nor EndTime, Security Hub returns finding history from the CreatedAt timestamp of the finding to the time at which the API is called. In all of these scenarios, the response is limited to 100 results, and the maximum time period is limited to 90 days. This field accepts only the specified formats. Timestamps + /// - startTime: A timestamp that indicates the start time of the requested finding history. If you provide values for both StartTime and EndTime, Security Hub returns finding history for the specified time period. If you provide a value for StartTime but not for EndTime, Security Hub returns finding history from the StartTime to the time at which the API is called. If you provide a value for EndTime but not for StartTime, Security Hub returns finding history from the CreatedAt timestamp of the finding to the EndTime. If you provide neither StartTime nor EndTime, Security Hub returns finding history from the CreatedAt timestamp of the finding to the time at which the API is called. In all of these scenarios, the response is limited to 100 results, and the maximum time period is limited to 90 days. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. /// - logger: Logger use during operation @inlinable public func getFindingHistory( @@ -2611,7 +2611,7 @@ public struct SecurityHub: AWSService { /// Updates configuration options for Security Hub. /// /// Parameters: - /// - autoEnableControls: Whether to automatically enable new controls when they are added to standards that are enabled. By default, this is set to true, and new controls are enabled automatically. To not automatically enable new controls, set this to false. + /// - autoEnableControls: Whether to automatically enable new controls when they are added to standards that are enabled. By default, this is set to true, and new controls are enabled automatically. To not automatically enable new controls, set this to false. When you automatically enable new controls, you can interact with the controls in the console and programmatically immediately after release. However, automatically enabled controls have a temporary default status of DISABLED. It can take up to several days for Security Hub to process the control release and designate the control as ENABLED in your account. During the processing period, you can manually enable or disable a control, and Security Hub will maintain that designation regardless of whether you have AutoEnableControls set to true. /// - controlFindingGenerator: Updates whether the calling account has consolidated control findings turned on. If the value for this field is set to SECURITY_CONTROL, Security Hub generates a single finding for a control check even when the check applies to multiple enabled standards. If the value for this field is set to STANDARD_CONTROL, Security Hub generates separate findings for a control check when the check applies to multiple enabled standards. For accounts that are part of an organization, this value can only be updated in the administrator account. /// - logger: Logger use during operation @inlinable @@ -2879,10 +2879,10 @@ extension SecurityHub { /// Return PaginatorSequence for operation ``getFindingHistory(_:logger:)``. /// /// - Parameters: - /// - endTime: An ISO 8601-formatted timestamp that indicates the end time of the requested finding history. If you provide values for both StartTime and EndTime, Security Hub returns finding history for the specified time period. If you provide a value for StartTime but not for EndTime, Security Hub returns finding history from the StartTime to the time at which the API is called. If you provide a value for EndTime but not for StartTime, Security Hub returns finding history from the CreatedAt timestamp of the finding to the EndTime. If you provide neither StartTime nor EndTime, Security Hub returns finding history from the CreatedAt timestamp of the finding to the time at which the API is called. In all of these scenarios, the response is limited to 100 results, and the maximum time period is limited to 90 days. This field accepts only the specified formats. Timestamps + /// - endTime: An ISO 8601-formatted timestamp that indicates the end time of the requested finding history. If you provide values for both StartTime and EndTime, Security Hub returns finding history for the specified time period. If you provide a value for StartTime but not for EndTime, Security Hub returns finding history from the StartTime to the time at which the API is called. If you provide a value for EndTime but not for StartTime, Security Hub returns finding history from the CreatedAt timestamp of the finding to the EndTime. If you provide neither StartTime nor EndTime, Security Hub returns finding history from the CreatedAt timestamp of the finding to the time at which the API is called. In all of these scenarios, the response is limited to 100 results, and the maximum time period is limited to 90 days. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. /// - findingIdentifier: /// - maxResults: The maximum number of results to be returned. If you don’t provide it, Security Hub returns up to 100 results of finding history. - /// - startTime: A timestamp that indicates the start time of the requested finding history. If you provide values for both StartTime and EndTime, Security Hub returns finding history for the specified time period. If you provide a value for StartTime but not for EndTime, Security Hub returns finding history from the StartTime to the time at which the API is called. If you provide a value for EndTime but not for StartTime, Security Hub returns finding history from the CreatedAt timestamp of the finding to the EndTime. If you provide neither StartTime nor EndTime, Security Hub returns finding history from the CreatedAt timestamp of the finding to the time at which the API is called. In all of these scenarios, the response is limited to 100 results, and the maximum time period is limited to 90 days. This field accepts only the specified formats. Timestamps + /// - startTime: A timestamp that indicates the start time of the requested finding history. If you provide values for both StartTime and EndTime, Security Hub returns finding history for the specified time period. If you provide a value for StartTime but not for EndTime, Security Hub returns finding history from the StartTime to the time at which the API is called. If you provide a value for EndTime but not for StartTime, Security Hub returns finding history from the CreatedAt timestamp of the finding to the EndTime. If you provide neither StartTime nor EndTime, Security Hub returns finding history from the CreatedAt timestamp of the finding to the time at which the API is called. In all of these scenarios, the response is limited to 100 results, and the maximum time period is limited to 90 days. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. /// - logger: Logger used for logging @inlinable public func getFindingHistoryPaginator( diff --git a/Sources/Soto/Services/SecurityHub/SecurityHub_shapes.swift b/Sources/Soto/Services/SecurityHub/SecurityHub_shapes.swift index fc10d2ed21..e2ce661377 100644 --- a/Sources/Soto/Services/SecurityHub/SecurityHub_shapes.swift +++ b/Sources/Soto/Services/SecurityHub/SecurityHub_shapes.swift @@ -1089,9 +1089,7 @@ extension SecurityHub { public struct AutomationRulesConfig: AWSDecodableShape { /// One or more actions to update finding fields if a finding matches the defined criteria of the rule. public let actions: [AutomationRulesAction]? - /// A timestamp that indicates when the rule was created. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// A timestamp that indicates when the rule was created. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. @OptionalCustomCoding public var createdAt: Date? /// The principal that created a rule. @@ -1110,9 +1108,7 @@ extension SecurityHub { public let ruleOrder: Int? /// Whether the rule is active after it is created. If this parameter is equal to ENABLED, Security Hub starts applying the rule to findings and finding updates after the rule is created. public let ruleStatus: RuleStatus? - /// A timestamp that indicates when the rule was most recently updated. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// A timestamp that indicates when the rule was most recently updated. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. @OptionalCustomCoding public var updatedAt: Date? @@ -1223,31 +1219,23 @@ extension SecurityHub { public let complianceStatus: [StringFilter]? /// The likelihood that a finding accurately identifies the behavior or issue that it was intended to identify. Confidence is scored on a 0–100 basis using a ratio scale. A value of 0 means 0 percent confidence, and a value of 100 means 100 percent confidence. For example, a data exfiltration detection based on a statistical deviation of network traffic has low confidence because an actual exfiltration hasn't been verified. For more information, see Confidence in the Security Hub User Guide. Array Members: Minimum number of 1 item. Maximum number of 20 items. public let confidence: [NumberFilter]? - /// A timestamp that indicates when this finding record was created. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) Array Members: Minimum number of 1 item. Maximum number of 20 items. + /// A timestamp that indicates when this finding record was created. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. Array Members: Minimum number of 1 item. Maximum number of 20 items. public let createdAt: [DateFilter]? /// The level of importance that is assigned to the resources that are associated with a finding. Criticality is scored on a 0–100 basis, using a ratio scale that supports only full integers. A score of 0 means that the underlying resources have no criticality, and a score of 100 is reserved for the most critical resources. For more information, see Criticality in the Security Hub User Guide. Array Members: Minimum number of 1 item. Maximum number of 20 items. public let criticality: [NumberFilter]? /// A finding's description. Array Members: Minimum number of 1 item. Maximum number of 20 items. public let description: [StringFilter]? - /// A timestamp that indicates when the potential security issue captured by a finding was first observed by the security findings product. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) Array Members: Minimum number of 1 item. Maximum number of 20 items. + /// A timestamp that indicates when the potential security issue captured by a finding was first observed by the security findings product. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. Array Members: Minimum number of 1 item. Maximum number of 20 items. public let firstObservedAt: [DateFilter]? /// The identifier for the solution-specific component that generated a finding. Array Members: Minimum number of 1 item. Maximum number of 100 items. public let generatorId: [StringFilter]? /// The product-specific identifier for a finding. Array Members: Minimum number of 1 item. Maximum number of 20 items. public let id: [StringFilter]? - /// A timestamp that indicates when the potential security issue captured by a finding was most recently observed by the security findings product. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) Array Members: Minimum number of 1 item. Maximum number of 20 items. + /// A timestamp that indicates when the security findings provider most recently observed a change in the resource that is involved in the finding. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. Array Members: Minimum number of 1 item. Maximum number of 20 items. public let lastObservedAt: [DateFilter]? /// The text of a user-defined note that's added to a finding. Array Members: Minimum number of 1 item. Maximum number of 20 items. public let noteText: [StringFilter]? - /// The timestamp of when the note was updated. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) Array Members: Minimum number of 1 item. Maximum number of 20 items. + /// The timestamp of when the note was updated. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. Array Members: Minimum number of 1 item. Maximum number of 20 items. public let noteUpdatedAt: [DateFilter]? /// The principal that created a note. Array Members: Minimum number of 1 item. Maximum number of 20 items. public let noteUpdatedBy: [StringFilter]? @@ -1285,9 +1273,7 @@ extension SecurityHub { public let title: [StringFilter]? /// One or more finding types in the format of namespace/category/classifier that classify a finding. For a list of namespaces, classifiers, and categories, see Types taxonomy for ASFF in the Security Hub User Guide. Array Members: Minimum number of 1 item. Maximum number of 20 items. public let type: [StringFilter]? - /// A timestamp that indicates when the finding record was most recently updated. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) Array Members: Minimum number of 1 item. Maximum number of 20 items. + /// A timestamp that indicates when the finding record was most recently updated. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. Array Members: Minimum number of 1 item. Maximum number of 20 items. public let updatedAt: [DateFilter]? /// A list of user-defined name and value string pairs added to a finding. Array Members: Minimum number of 1 item. Maximum number of 20 items. public let userDefinedFields: [MapFilter]? @@ -1492,9 +1478,7 @@ extension SecurityHub { } public struct AutomationRulesMetadata: AWSDecodableShape { - /// A timestamp that indicates when the rule was created. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// A timestamp that indicates when the rule was created. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. @OptionalCustomCoding public var createdAt: Date? /// The principal that created a rule. @@ -1511,9 +1495,7 @@ extension SecurityHub { public let ruleOrder: Int? /// Whether the rule is active after it is created. If this parameter is equal to ENABLED, Security Hub starts applying the rule to findings and finding updates after the rule is created. To change the value of this parameter after creating a rule, use BatchUpdateAutomationRules . public let ruleStatus: RuleStatus? - /// A timestamp that indicates when the rule was most recently updated. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// A timestamp that indicates when the rule was most recently updated. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. @OptionalCustomCoding public var updatedAt: Date? @@ -1871,13 +1853,9 @@ extension SecurityHub { public let callerType: String? /// Provided if CallerType is domain. Provides information about the DNS domain that the API call originated from. public let domainDetails: AwsApiCallActionDomainDetails? - /// A timestamp that indicates when the API call was first observed. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// A timestamp that indicates when the API call was first observed. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let firstSeen: String? - /// A timestamp that indicates when the API call was most recently observed. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// A timestamp that indicates when the API call was most recently observed. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let lastSeen: String? /// Provided if CallerType is remoteip. Provides information about the remote IP address that the API call originated from. public let remoteIpDetails: ActionRemoteIpDetails? @@ -2087,9 +2065,7 @@ extension SecurityHub { public let apiKeySource: String? /// The list of binary media types supported by the REST API. public let binaryMediaTypes: [String]? - /// Indicates when the API was created. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// Indicates when the API was created. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let createdDate: String? /// A description of the REST API. public let description: String? @@ -2156,9 +2132,7 @@ extension SecurityHub { public let canarySettings: AwsApiGatewayCanarySettings? /// The identifier of the client certificate for the stage. public let clientCertificateId: String? - /// Indicates when the stage was created. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// Indicates when the stage was created. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let createdDate: String? /// The identifier of the deployment that the stage points to. public let deploymentId: String? @@ -2166,9 +2140,7 @@ extension SecurityHub { public let description: String? /// The version of the API documentation that is associated with the stage. public let documentationVersion: String? - /// Indicates when the stage was most recently updated. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// Indicates when the stage was most recently updated. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let lastUpdatedDate: String? /// Defines the method settings for the stage. public let methodSettings: [AwsApiGatewayMethodSettings]? @@ -2252,9 +2224,7 @@ extension SecurityHub { public let apiKeySelectionExpression: String? /// A cross-origin resource sharing (CORS) configuration. Supported only for HTTP APIs. public let corsConfiguration: AwsCorsConfiguration? - /// Indicates when the API was created. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// Indicates when the API was created. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let createdDate: String? /// A description of the API. public let description: String? @@ -2351,9 +2321,7 @@ extension SecurityHub { public let autoDeploy: Bool? /// The identifier of a client certificate for a stage. Supported only for WebSocket API calls. public let clientCertificateId: String? - /// Indicates when the stage was created. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// Indicates when the stage was created. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let createdDate: String? /// Default route settings for the stage. public let defaultRouteSettings: AwsApiGatewayV2RouteSettings? @@ -2363,9 +2331,7 @@ extension SecurityHub { public let description: String? /// The status of the last deployment of a stage. Supported only if the stage has automatic deployment enabled. public let lastDeploymentStatusMessage: String? - /// Indicates when the stage was most recently updated. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// Indicates when the stage was most recently updated. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let lastUpdatedDate: String? /// The route settings for the stage. public let routeSettings: AwsApiGatewayV2RouteSettings? @@ -2765,9 +2731,7 @@ extension SecurityHub { public let availabilityZones: [AwsAutoScalingAutoScalingGroupAvailabilityZonesListDetails]? /// Indicates whether capacity rebalancing is enabled. public let capacityRebalance: Bool? - /// Indicates when the auto scaling group was created. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// Indicates when the auto scaling group was created. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let createdTime: String? /// The amount of time, in seconds, that Amazon EC2 Auto Scaling waits before it checks the health status of an EC2 instance that has come into service. public let healthCheckGracePeriod: Int? @@ -3069,9 +3033,7 @@ extension SecurityHub { public let classicLinkVpcId: String? /// The identifiers of one or more security groups for the VPC that is specified in ClassicLinkVPCId. public let classicLinkVpcSecurityGroups: [String]? - /// The creation date and time for the launch configuration. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// The creation date and time for the launch configuration. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let createdTime: String? /// Whether the launch configuration is optimized for Amazon EBS I/O. public let ebsOptimized: Bool? @@ -3667,9 +3629,7 @@ extension SecurityHub { public struct AwsCertificateManagerCertificateDetails: AWSEncodableShape & AWSDecodableShape { /// The ARN of the private certificate authority (CA) that will be used to issue the certificate. public let certificateAuthorityArn: String? - /// Indicates when the certificate was requested. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// Indicates when the certificate was requested. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let createdAt: String? /// The fully qualified domain name (FQDN), such as www.example.com, that is secured by the certificate. public let domainName: String? @@ -3679,15 +3639,11 @@ extension SecurityHub { public let extendedKeyUsages: [AwsCertificateManagerCertificateExtendedKeyUsage]? /// For a failed certificate request, the reason for the failure. Valid values: NO_AVAILABLE_CONTACTS | ADDITIONAL_VERIFICATION_REQUIRED | DOMAIN_NOT_ALLOWED | INVALID_PUBLIC_DOMAIN | DOMAIN_VALIDATION_DENIED | CAA_ERROR | PCA_LIMIT_EXCEEDED | PCA_INVALID_ARN | PCA_INVALID_STATE | PCA_REQUEST_FAILED | PCA_NAME_CONSTRAINTS_VALIDATION | PCA_RESOURCE_NOT_FOUND | PCA_INVALID_ARGS | PCA_INVALID_DURATION | PCA_ACCESS_DENIED | SLR_NOT_FOUND | OTHER public let failureReason: String? - /// Indicates when the certificate was imported. Provided if the certificate type is IMPORTED. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// Indicates when the certificate was imported. Provided if the certificate type is IMPORTED. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let importedAt: String? /// The list of ARNs for the Amazon Web Services resources that use the certificate. public let inUseBy: [String]? - /// Indicates when the certificate was issued. Provided if the certificate type is AMAZON_ISSUED. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// Indicates when the certificate was issued. Provided if the certificate type is AMAZON_ISSUED. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let issuedAt: String? /// The name of the certificate authority that issued and signed the certificate. public let issuer: String? @@ -3695,13 +3651,9 @@ extension SecurityHub { public let keyAlgorithm: String? /// A list of key usage X.509 v3 extension objects. public let keyUsages: [AwsCertificateManagerCertificateKeyUsage]? - /// The time after which the certificate becomes invalid. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// The time after which the certificate becomes invalid. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let notAfter: String? - /// The time before which the certificate is not valid. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// The time before which the certificate is not valid. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let notBefore: String? /// Provides a value that specifies whether to add the certificate to a transparency log. public let options: AwsCertificateManagerCertificateOptions? @@ -3923,9 +3875,7 @@ extension SecurityHub { public let renewalStatus: String? /// The reason that a renewal request was unsuccessful. This attribute is used only when RenewalStatus is FAILED. Valid values: NO_AVAILABLE_CONTACTS | ADDITIONAL_VERIFICATION_REQUIRED | DOMAIN_NOT_ALLOWED | INVALID_PUBLIC_DOMAIN | DOMAIN_VALIDATION_DENIED | CAA_ERROR | PCA_LIMIT_EXCEEDED | PCA_INVALID_ARN | PCA_INVALID_STATE | PCA_REQUEST_FAILED | PCA_NAME_CONSTRAINTS_VALIDATION | PCA_RESOURCE_NOT_FOUND | PCA_INVALID_ARGS | PCA_INVALID_DURATION | PCA_ACCESS_DENIED | SLR_NOT_FOUND | OTHER public let renewalStatusReason: String? - /// Indicates when the renewal summary was last updated. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// Indicates when the renewal summary was last updated. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let updatedAt: String? @inlinable @@ -4188,9 +4138,7 @@ extension SecurityHub { public let domainName: String? /// The entity tag is a hash of the object. public let eTag: String? - /// Indicates when that the distribution was last modified. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// Indicates when that the distribution was last modified. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let lastModifiedTime: String? /// A complex type that controls whether access logs are written for the distribution. public let logging: AwsCloudFrontDistributionLogging? @@ -5464,9 +5412,7 @@ extension SecurityHub { public struct AwsDynamoDbTableBillingModeSummary: AWSEncodableShape & AWSDecodableShape { /// The method used to charge for read and write throughput and to manage capacity. public let billingMode: String? - /// If the billing mode is PAY_PER_REQUEST, indicates when the billing mode was set to that value. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// If the billing mode is PAY_PER_REQUEST, indicates when the billing mode was set to that value. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let lastUpdateToPayPerRequestDateTime: String? @inlinable @@ -5491,9 +5437,7 @@ extension SecurityHub { public let attributeDefinitions: [AwsDynamoDbTableAttributeDefinition]? /// Information about the billing for read/write capacity on the table. public let billingModeSummary: AwsDynamoDbTableBillingModeSummary? - /// Indicates when the table was created. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// Indicates when the table was created. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let creationDateTime: String? /// Indicates whether deletion protection is to be enabled (true) or disabled (false) on the table. public let deletionProtectionEnabled: Bool? @@ -5749,13 +5693,9 @@ extension SecurityHub { } public struct AwsDynamoDbTableProvisionedThroughput: AWSEncodableShape & AWSDecodableShape { - /// Indicates when the provisioned throughput was last decreased. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// Indicates when the provisioned throughput was last decreased. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let lastDecreaseDateTime: String? - /// Indicates when the provisioned throughput was last increased. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// Indicates when the provisioned throughput was last increased. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let lastIncreaseDateTime: String? /// The number of times during the current UTC calendar day that the provisioned throughput was decreased. public let numberOfDecreasesToday: Int? @@ -5868,9 +5808,7 @@ extension SecurityHub { } public struct AwsDynamoDbTableRestoreSummary: AWSEncodableShape & AWSDecodableShape { - /// Indicates the point in time that the table was restored to. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// Indicates the point in time that the table was restored to. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let restoreDateTime: String? /// Whether a restore is currently in progress. public let restoreInProgress: Bool? @@ -5902,9 +5840,7 @@ extension SecurityHub { } public struct AwsDynamoDbTableSseDescription: AWSEncodableShape & AWSDecodableShape { - /// If the key is inaccessible, the date and time when DynamoDB detected that the key was inaccessible. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// If the key is inaccessible, the date and time when DynamoDB detected that the key was inaccessible. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let inaccessibleEncryptionDateTime: String? /// The ARN of the KMS key that is used for the KMS encryption. public let kmsMasterKeyArn: String? @@ -6319,9 +6255,7 @@ extension SecurityHub { public let ipV6Addresses: [String]? /// The key name associated with the instance. public let keyName: String? - /// Indicates when the instance was launched. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// Indicates when the instance was launched. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let launchedAt: String? /// Details about the metadata options for the Amazon EC2 instance. public let metadataOptions: AwsEc2InstanceMetadataOptions? @@ -7729,9 +7663,7 @@ extension SecurityHub { public struct AwsEc2NetworkInterfaceAttachment: AWSEncodableShape & AWSDecodableShape { /// The identifier of the network interface attachment public let attachmentId: String? - /// Indicates when the attachment initiated. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// Indicates when the attachment initiated. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let attachTime: String? /// Indicates whether the network interface is deleted when the instance is terminated. public let deleteOnTermination: Bool? @@ -8328,9 +8260,7 @@ extension SecurityHub { public struct AwsEc2VolumeDetails: AWSEncodableShape & AWSDecodableShape { /// The volume attachments. public let attachments: [AwsEc2VolumeAttachment]? - /// Indicates when the volume was created. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// Indicates when the volume was created. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let createTime: String? /// The device name for the volume that is attached to the instance. public let deviceName: String? @@ -8857,9 +8787,7 @@ extension SecurityHub { public let acceptedRouteCount: Int? /// The ARN of the VPN tunnel endpoint certificate. public let certificateArn: String? - /// The date and time of the last change in status. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// The date and time of the last change in status. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let lastStatusChange: String? /// The Internet-routable IP address of the virtual private gateway's outside interface. public let outsideIpAddress: String? @@ -8901,9 +8829,7 @@ extension SecurityHub { public let architecture: String? /// The sha256 digest of the image manifest. public let imageDigest: String? - /// The date and time when the image was pushed to the repository. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// The date and time when the image was pushed to the repository. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let imagePublishedAt: String? /// The list of tags that are associated with the image. public let imageTags: [String]? @@ -11837,9 +11763,7 @@ extension SecurityHub { public let canonicalHostedZoneName: String? /// The ID of the Amazon Route 53 hosted zone for the load balancer. public let canonicalHostedZoneNameID: String? - /// Indicates when the load balancer was created. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// Indicates when the load balancer was created. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let createdTime: String? /// The DNS name of the load balancer. public let dnsName: String? @@ -12138,9 +12062,7 @@ extension SecurityHub { public let availabilityZones: [AvailabilityZone]? /// The ID of the Amazon Route 53 hosted zone associated with the load balancer. public let canonicalHostedZoneId: String? - /// Indicates when the load balancer was created. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// Indicates when the load balancer was created. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let createdTime: String? /// The public DNS name of the load balancer. public let dnsName: String? @@ -12731,9 +12653,7 @@ extension SecurityHub { public let accessKeyId: String? /// The Amazon Web Services account ID of the account for the key. public let accountId: String? - /// Indicates when the IAM access key was created. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// Indicates when the IAM access key was created. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let createdAt: String? /// The ID of the principal associated with an access key. public let principalId: String? @@ -12823,9 +12743,7 @@ extension SecurityHub { } public struct AwsIamAccessKeySessionContextAttributes: AWSEncodableShape & AWSDecodableShape { - /// Indicates when the session was created. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// Indicates when the session was created. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let creationDate: String? /// Indicates whether the session used multi-factor authentication (MFA). public let mfaAuthenticated: Bool? @@ -12910,9 +12828,7 @@ extension SecurityHub { public struct AwsIamGroupDetails: AWSEncodableShape & AWSDecodableShape { /// A list of the managed policies that are attached to the IAM group. public let attachedManagedPolicies: [AwsIamAttachedManagedPolicy]? - /// Indicates when the IAM group was created. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// Indicates when the IAM group was created. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let createDate: String? /// The identifier of the IAM group. public let groupId: String? @@ -12977,9 +12893,7 @@ extension SecurityHub { public struct AwsIamInstanceProfile: AWSEncodableShape & AWSDecodableShape { /// The ARN of the instance profile. public let arn: String? - /// Indicates when the instance profile was created. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// Indicates when the instance profile was created. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let createDate: String? /// The identifier of the instance profile. public let instanceProfileId: String? @@ -13026,9 +12940,7 @@ extension SecurityHub { public let arn: String? /// The policy that grants an entity permission to assume the role. public let assumeRolePolicyDocument: String? - /// Indicates when the role was created. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// Indicates when the role was created. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let createDate: String? /// The path to the role. public let path: String? @@ -13094,9 +13006,7 @@ extension SecurityHub { public struct AwsIamPolicyDetails: AWSEncodableShape & AWSDecodableShape { /// The number of users, groups, and roles that the policy is attached to. public let attachmentCount: Int? - /// When the policy was created. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// When the policy was created. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let createDate: String? /// The identifier of the default version of the policy. public let defaultVersionId: String? @@ -13114,9 +13024,7 @@ extension SecurityHub { public let policyName: String? /// List of versions of the policy. public let policyVersionList: [AwsIamPolicyVersion]? - /// When the policy was most recently updated. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// When the policy was most recently updated. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let updateDate: String? @inlinable @@ -13163,9 +13071,7 @@ extension SecurityHub { } public struct AwsIamPolicyVersion: AWSEncodableShape & AWSDecodableShape { - /// Indicates when the version was created. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// Indicates when the version was created. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let createDate: String? /// Whether the version is the default version. public let isDefaultVersion: Bool? @@ -13196,9 +13102,7 @@ extension SecurityHub { public let assumeRolePolicyDocument: String? /// The list of the managed policies that are attached to the role. public let attachedManagedPolicies: [AwsIamAttachedManagedPolicy]? - /// Indicates when the role was created. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// Indicates when the role was created. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let createDate: String? /// The list of instance profiles that contain this role. public let instanceProfileList: [AwsIamInstanceProfile]? @@ -13283,9 +13187,7 @@ extension SecurityHub { public struct AwsIamUserDetails: AWSEncodableShape & AWSDecodableShape { /// A list of the managed policies that are attached to the user. public let attachedManagedPolicies: [AwsIamAttachedManagedPolicy]? - /// Indicates when the user was created. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// Indicates when the user was created. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let createDate: String? /// A list of IAM groups that the user belongs to. public let groupList: [String]? @@ -13423,9 +13325,7 @@ extension SecurityHub { public struct AwsKmsKeyDetails: AWSEncodableShape & AWSDecodableShape { /// The twelve-digit account ID of the Amazon Web Services account that owns the KMS key. public let awsAccountId: String? - /// Indicates when the KMS key was created. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// Indicates when the KMS key was created. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let creationDate: Double? /// A description of the KMS key. public let description: String? @@ -13542,9 +13442,7 @@ extension SecurityHub { public let handler: String? /// The KMS key that is used to encrypt the function's environment variables. This key is only returned if you've configured a customer managed customer managed key. public let kmsKeyArn: String? - /// Indicates when the function was last updated. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// Indicates when the function was last updated. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let lastModified: String? /// The function's layers. public let layers: [AwsLambdaFunctionLayer]? @@ -13766,9 +13664,7 @@ extension SecurityHub { public struct AwsLambdaLayerVersionDetails: AWSEncodableShape & AWSDecodableShape { /// The layer's compatible function runtimes. The following list includes deprecated runtimes. For more information, see Runtime deprecation policy in the Lambda Developer Guide. Array Members: Maximum number of 5 items. Valid Values: nodejs | nodejs4.3 | nodejs6.10 | nodejs8.10 | nodejs10.x | nodejs12.x | nodejs14.x | nodejs16.x | java8 | java8.al2 | java11 | python2.7 | python3.6 | python3.7 | python3.8 | python3.9 | dotnetcore1.0 | dotnetcore2.0 | dotnetcore2.1 | dotnetcore3.1 | dotnet6 | nodejs4.3-edge | go1.x | ruby2.5 | ruby2.7 | provided | provided.al2 | nodejs18.x | python3.10 | java17 | ruby3.2 | python3.11 | nodejs20.x | provided.al2023 | python3.12 | java21 public let compatibleRuntimes: [String]? - /// Indicates when the version was created. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// Indicates when the version was created. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let createdDate: String? /// The version number. public let version: Int64? @@ -14669,9 +14565,7 @@ extension SecurityHub { public let availabilityZones: [String]? /// The number of days for which automated backups are retained. public let backupRetentionPeriod: Int? - /// Indicates when the DB cluster was created, in Universal Coordinated Time (UTC). This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// Indicates when the DB cluster was created, in Universal Coordinated Time (UTC). For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let clusterCreateTime: String? /// Whether tags are copied from the DB cluster to snapshots of the DB cluster. public let copyTagsToSnapshot: Bool? @@ -14952,9 +14846,7 @@ extension SecurityHub { public let allocatedStorage: Int? /// A list of Availability Zones where instances in the DB cluster can be created. public let availabilityZones: [String]? - /// Indicates when the DB cluster was created, in Universal Coordinated Time (UTC). This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// Indicates when the DB cluster was created, in Universal Coordinated Time (UTC). For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let clusterCreateTime: String? /// The DB cluster identifier. public let dbClusterIdentifier: String? @@ -14978,9 +14870,7 @@ extension SecurityHub { public let percentProgress: Int? /// The port number on which the DB instances in the DB cluster accept connections. public let port: Int? - /// Indicates when the snapshot was taken. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// Indicates when the snapshot was taken. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let snapshotCreateTime: String? /// The type of DB cluster snapshot. public let snapshotType: String? @@ -15172,17 +15062,13 @@ extension SecurityHub { public let enhancedMonitoringResourceArn: String? /// True if mapping of IAM accounts to database accounts is enabled, and otherwise false. IAM database authentication can be enabled for the following database engines. For MySQL 5.6, minor version 5.6.34 or higher For MySQL 5.7, minor version 5.7.16 or higher Aurora 5.6 or higher public let iamDatabaseAuthenticationEnabled: Bool? - /// Indicates when the DB instance was created. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// Indicates when the DB instance was created. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let instanceCreateTime: String? /// Specifies the provisioned IOPS (I/O operations per second) for this DB instance. public let iops: Int? /// If StorageEncrypted is true, the KMS key identifier for the encrypted DB instance. public let kmsKeyId: String? - /// Specifies the latest time to which a database can be restored with point-in-time restore. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// Specifies the latest time to which a database can be restored with point-in-time restore. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let latestRestorableTime: String? /// License model information for this DB instance. public let licenseModel: String? @@ -16024,9 +15910,7 @@ extension SecurityHub { public let sourceType: String? /// The status of the event notification subscription. Valid values: creating | modifying | deleting | active | no-permission | topic-not-exist public let status: String? - /// The datetime when the event notification subscription was created. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// The datetime when the event notification subscription was created. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let subscriptionCreationTime: String? @inlinable @@ -16241,15 +16125,11 @@ extension SecurityHub { } public struct AwsRedshiftClusterDeferredMaintenanceWindow: AWSEncodableShape & AWSDecodableShape { - /// The end of the time window for which maintenance was deferred. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// The end of the time window for which maintenance was deferred. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let deferMaintenanceEndTime: String? /// The identifier of the maintenance window. public let deferMaintenanceIdentifier: String? - /// The start of the time window for which maintenance was deferred. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// The start of the time window for which maintenance was deferred. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let deferMaintenanceStartTime: String? @inlinable @@ -16281,9 +16161,7 @@ extension SecurityHub { public let availabilityZone: String? /// The availability status of the cluster for queries. Possible values are the following: Available - The cluster is available for queries. Unavailable - The cluster is not available for queries. Maintenance - The cluster is intermittently available for queries due to maintenance activities. Modifying -The cluster is intermittently available for queries due to changes that modify the cluster. Failed - The cluster failed and is not available for queries. public let clusterAvailabilityStatus: String? - /// Indicates when the cluster was created. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// Indicates when the cluster was created. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let clusterCreateTime: String? /// The unique identifier of the cluster. public let clusterIdentifier: String? @@ -16319,9 +16197,7 @@ extension SecurityHub { public let endpoint: AwsRedshiftClusterEndpoint? /// Indicates whether to create the cluster with enhanced VPC routing enabled. public let enhancedVpcRouting: Bool? - /// Indicates when the next snapshot is expected to be taken. The cluster must have a valid snapshot schedule and have backups enabled. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// Indicates when the next snapshot is expected to be taken. The cluster must have a valid snapshot schedule and have backups enabled. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let expectedNextSnapshotScheduleTime: String? /// The status of the next expected snapshot. Valid values: OnTrack | Pending public let expectedNextSnapshotScheduleTimeStatus: String? @@ -16339,9 +16215,7 @@ extension SecurityHub { public let manualSnapshotRetentionPeriod: Int? /// The master user name for the cluster. This name is used to connect to the database that is specified in as the value of DBName. public let masterUsername: String? - /// Indicates the start of the next maintenance window. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// Indicates the start of the next maintenance window. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let nextMaintenanceWindowStartTime: String? /// The node type for the nodes in the cluster. public let nodeType: String? @@ -16619,13 +16493,9 @@ extension SecurityHub { public let bucketName: String? /// The message indicating that the logs failed to be delivered. public let lastFailureMessage: String? - /// The last time when logs failed to be delivered. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// The last time when logs failed to be delivered. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let lastFailureTime: String? - /// The last time that logs were delivered successfully. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// The last time that logs were delivered successfully. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let lastSuccessfulDeliveryTime: String? /// Indicates whether logging is enabled. public let loggingEnabled: Bool? @@ -17065,9 +16935,7 @@ extension SecurityHub { public struct AwsS3BucketBucketLifecycleConfigurationRulesDetails: AWSEncodableShape & AWSDecodableShape { /// How Amazon S3 responds when a multipart upload is incomplete. Specifically, provides a number of days before Amazon S3 cancels the entire upload. public let abortIncompleteMultipartUpload: AwsS3BucketBucketLifecycleConfigurationRulesAbortIncompleteMultipartUploadDetails? - /// The date when objects are moved or deleted. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// The date when objects are moved or deleted. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let expirationDate: String? /// The length in days of the lifetime for objects that are subject to the rule. public let expirationInDays: Int? @@ -17284,9 +17152,7 @@ extension SecurityHub { } public struct AwsS3BucketBucketLifecycleConfigurationRulesTransitionsDetails: AWSEncodableShape & AWSDecodableShape { - /// A date on which to transition objects to the specified storage class. If you provide Date, you cannot provide Days. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// A date on which to transition objects to the specified storage class. If you provide Date, you cannot provide Days. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let date: String? /// The number of days after which to transition the object to the specified storage class. If you provide Days, you cannot provide Date. public let days: Int? @@ -17347,9 +17213,7 @@ extension SecurityHub { public let bucketVersioningConfiguration: AwsS3BucketBucketVersioningConfiguration? /// The website configuration parameters for the S3 bucket. public let bucketWebsiteConfiguration: AwsS3BucketWebsiteConfiguration? - /// Indicates when the S3 bucket was created. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// Indicates when the S3 bucket was created. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let createdAt: String? /// The name of the bucket. public let name: String? @@ -17832,9 +17696,7 @@ extension SecurityHub { public let contentType: String? /// The opaque identifier assigned by a web server to a specific version of a resource found at a URL. public let eTag: String? - /// Indicates when the object was last modified. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// Indicates when the object was last modified. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let lastModified: String? /// If the object is stored using server-side encryption, the value of the server-side encryption algorithm used when storing this object in Amazon S3. public let serverSideEncryption: String? @@ -17875,11 +17737,11 @@ extension SecurityHub { public struct AwsSageMakerNotebookInstanceDetails: AWSEncodableShape & AWSDecodableShape { /// A list of Amazon Elastic Inference instance types to associate with the notebook instance. Currently, only one instance type can be associated with a notebook instance. public let acceleratorTypes: [String]? - /// An array of up to three Git repositories associated with the notebook instance. These can be either the names of Git repositories stored as resources in your account, or the URL of Git repositories in CodeCommit or in any other Git repository. These repositories are cloned at the same level as the default repository of your notebook instance. For more information, see Associating Git repositories with SageMaker notebook instances in the Amazon SageMaker Developer Guide. + /// An array of up to three Git repositories associated with the notebook instance. These can be either the names of Git repositories stored as resources in your account, or the URL of Git repositories in CodeCommit or in any other Git repository. These repositories are cloned at the same level as the default repository of your notebook instance. For more information, see Associating Git repositories with SageMaker AI notebook instances in the Amazon SageMaker AI Developer Guide. public let additionalCodeRepositories: [String]? - /// The Git repository associated with the notebook instance as its default code repository. This can be either the name of a Git repository stored as a resource in your account, or the URL of a Git repository in CodeCommit or in any other Git repository. When you open a notebook instance, it opens in the directory that contains this repository. For more information, see Associating Git repositories with SageMaker notebook instances in the Amazon SageMaker Developer Guide. + /// The Git repository associated with the notebook instance as its default code repository. This can be either the name of a Git repository stored as a resource in your account, or the URL of a Git repository in CodeCommit or in any other Git repository. When you open a notebook instance, it opens in the directory that contains this repository. For more information, see Associating Git repositories with SageMaker AI notebook instances in the Amazon SageMaker AI Developer Guide. public let defaultCodeRepository: String? - /// Sets whether SageMaker provides internet access to the notebook instance. If you set this to Disabled, this notebook instance is able to access resources only in your VPC, and is not be able to connect to SageMaker training and endpoint services unless you configure a Network Address Translation (NAT) Gateway in your VPC. + /// Sets whether SageMaker AI provides internet access to the notebook instance. If you set this to Disabled, this notebook instance is able to access resources only in your VPC, and is not be able to connect to SageMaker AI training and endpoint services unless you configure a Network Address Translation (NAT) Gateway in your VPC. public let directInternetAccess: String? /// If status of the instance is Failed, the reason it failed. public let failureReason: String? @@ -17887,9 +17749,9 @@ extension SecurityHub { public let instanceMetadataServiceConfiguration: AwsSageMakerNotebookInstanceMetadataServiceConfigurationDetails? /// The type of machine learning (ML) compute instance to launch for the notebook instance. public let instanceType: String? - /// The Amazon Resource Name (ARN) of an Key Management Service (KMS) key that SageMaker uses to encrypt data on the storage volume attached to your notebook instance. The KMS key you provide must be enabled. For information, see Enabling and disabling keys in the Key Management Service Developer Guide. + /// The Amazon Resource Name (ARN) of an Key Management Service (KMS) key that SageMaker AI uses to encrypt data on the storage volume attached to your notebook instance. The KMS key you provide must be enabled. For information, see Enabling and disabling keys in the Key Management Service Developer Guide. public let kmsKeyId: String? - /// The network interface ID that SageMaker created when the instance was created. + /// The network interface ID that SageMaker AI created when the instance was created. public let networkInterfaceId: String? /// The Amazon Resource Name (ARN) of the notebook instance. public let notebookInstanceArn: String? @@ -18084,22 +17946,18 @@ extension SecurityHub { public let compliance: Compliance? /// A finding's confidence. Confidence is defined as the likelihood that a finding accurately identifies the behavior or issue that it was intended to identify. Confidence is scored on a 0-100 basis using a ratio scale, where 0 means zero percent confidence and 100 means 100 percent confidence. public let confidence: Int? - /// Indicates when the security findings provider created the potential security issue that a finding captured. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// Indicates when the security findings provider created the potential security issue that a finding captured. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let createdAt: String? /// The level of importance assigned to the resources associated with the finding. A score of 0 means that the underlying resources have no criticality, and a score of 100 is reserved for the most critical resources. public let criticality: Int? /// A finding's description. Description is a required property. Length Constraints: Minimum length of 1. Maximum length of 1024. public let description: String? /// Provides details about an Amazon GuardDuty Extended Threat Detection attack sequence. GuardDuty generates an attack sequence finding when multiple events align to a potentially suspicious activity. To receive GuardDuty attack sequence findings in Security Hub, you - /// must have GuardDuty and GuardDuty S3 Protection enabled. For more information, see GuardDuty Extended Threat Detection in the Amazon GuardDuty User Guide. + /// must have GuardDuty enabled. For more information, see GuardDuty Extended Threat Detection in the Amazon GuardDuty User Guide. public let detection: Detection? /// In a BatchImportFindings request, finding providers use FindingProviderFields to provide and update their own values for confidence, criticality, related findings, severity, and types. public let findingProviderFields: FindingProviderFields? - /// Indicates when the security findings provider first observed the potential security issue that a finding captured. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// Indicates when the security findings provider first observed the potential security issue that a finding captured. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let firstObservedAt: String? /// Provides metadata for the Amazon CodeGuru detector associated with a finding. This field pertains to /// findings that relate to Lambda functions. Amazon Inspector identifies policy violations and @@ -18110,9 +17968,7 @@ extension SecurityHub { public let generatorId: String? /// The security findings provider-specific identifier for a finding. Length Constraints: Minimum length of 1. Maximum length of 512. public let id: String? - /// Indicates when the security findings provider most recently observed the potential security issue that a finding captured. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// Indicates when the security findings provider most recently observed a change in the resource that is involved in the finding. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let lastObservedAt: String? /// A list of malware related to a finding. Array Members: Maximum number of 5 items. public let malware: [Malware]? @@ -18126,9 +17982,7 @@ extension SecurityHub { public let patchSummary: PatchSummary? /// The details of process-related information about a finding. public let process: ProcessDetails? - /// A timestamp that indicates when Security Hub received a finding and begins to process it. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// A timestamp that indicates when Security Hub received a finding and begins to process it. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let processedAt: String? /// The ARN generated by Security Hub that uniquely identifies a product that generates findings. This can be the ARN for a third-party product that is integrated with Security Hub, or the ARN for a custom integration. Length Constraints: Minimum length of 12. Maximum length of 2048. public let productArn: String? @@ -18162,9 +18016,7 @@ extension SecurityHub { public let title: String? /// One or more finding types in the format of namespace/category/classifier that classify a finding. Valid namespace values are: Software and Configuration Checks | TTPs | Effects | Unusual Behaviors | Sensitive Data Identifications Array Members: Maximum number of 50 items. public let types: [String]? - /// Indicates when the security findings provider last updated the finding record. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// Indicates when the security findings provider last updated the finding record. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let updatedAt: String? /// A list of name/value string pairs associated with the finding. These are custom, user-defined fields added to a finding. Can contain up to 50 key-value pairs. For each key-value pair, the key can contain up to 128 characters, and the value can contain up to 1024 characters. public let userDefinedFields: [String: String]? @@ -18357,9 +18209,7 @@ extension SecurityHub { public let complianceStatus: [StringFilter]? /// A finding's confidence. Confidence is defined as the likelihood that a finding accurately identifies the behavior or issue that it was intended to identify. Confidence is scored on a 0-100 basis using a ratio scale, where 0 means zero percent confidence and 100 means 100 percent confidence. public let confidence: [NumberFilter]? - /// A timestamp that indicates when the security findings provider created the potential security issue that a finding reflects. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// A timestamp that indicates when the security findings provider created the potential security issue that a finding reflects. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let createdAt: [DateFilter]? /// The level of importance assigned to the resources associated with the finding. A score of 0 means that the underlying resources have no criticality, and a score of 100 is reserved for the most critical resources. public let criticality: [NumberFilter]? @@ -18379,9 +18229,7 @@ extension SecurityHub { public let findingProviderFieldsSeverityOriginal: [StringFilter]? /// One or more finding types that the finding provider assigned to the finding. Uses the format of namespace/category/classifier that classify a finding. Valid namespace values are: Software and Configuration Checks | TTPs | Effects | Unusual Behaviors | Sensitive Data Identifications public let findingProviderFieldsTypes: [StringFilter]? - /// A timestamp that indicates when the security findings provider first observed the potential security issue that a finding captured. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// A timestamp that indicates when the security findings provider first observed the potential security issue that a finding captured. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let firstObservedAt: [DateFilter]? /// The identifier for the solution-specific component (a discrete unit of logic) that generated a finding. In various security findings providers' solutions, this generator can be called a rule, a check, a detector, a plugin, etc. public let generatorId: [StringFilter]? @@ -18389,9 +18237,7 @@ extension SecurityHub { public let id: [StringFilter]? /// A keyword for a finding. public let keyword: [KeywordFilter]? - /// A timestamp that indicates when the security findings provider most recently observed the potential security issue that a finding captured. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// A timestamp that indicates when the security findings provider most recently observed a change in the resource that is involved in the finding. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let lastObservedAt: [DateFilter]? /// The name of the malware that was observed. public let malwareName: [StringFilter]? @@ -18429,9 +18275,7 @@ extension SecurityHub { public let noteUpdatedAt: [DateFilter]? /// The principal that created a note. public let noteUpdatedBy: [StringFilter]? - /// A timestamp that identifies when the process was launched. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// A timestamp that identifies when the process was launched. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let processLaunchedAt: [DateFilter]? /// The name of the process. public let processName: [StringFilter]? @@ -18441,9 +18285,7 @@ extension SecurityHub { public let processPath: [StringFilter]? /// The process ID. public let processPid: [NumberFilter]? - /// A timestamp that identifies when the process was terminated. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// A timestamp that identifies when the process was terminated. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let processTerminatedAt: [DateFilter]? /// The ARN generated by Security Hub that uniquely identifies a third-party company (security findings provider) after this provider's product (solution that generates findings) is registered with Security Hub. public let productArn: [StringFilter]? @@ -18501,9 +18343,7 @@ extension SecurityHub { public let resourceContainerImageId: [StringFilter]? /// The name of the image related to a finding. public let resourceContainerImageName: [StringFilter]? - /// A timestamp that identifies when the container was started. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// A timestamp that identifies when the container was started. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let resourceContainerLaunchedAt: [DateFilter]? /// The name of the container related to a finding. public let resourceContainerName: [StringFilter]? @@ -18531,7 +18371,7 @@ extension SecurityHub { public let sourceUrl: [StringFilter]? /// The category of a threat intelligence indicator. public let threatIntelIndicatorCategory: [StringFilter]? - /// A timestamp that identifies the last observation of a threat intelligence indicator. + /// A timestamp that identifies the last observation of a threat intelligence indicator. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let threatIntelIndicatorLastObservedAt: [DateFilter]? /// The source of the threat intelligence. public let threatIntelIndicatorSource: [StringFilter]? @@ -18545,9 +18385,7 @@ extension SecurityHub { public let title: [StringFilter]? /// A finding type in the format of namespace/category/classifier that classifies a finding. public let type: [StringFilter]? - /// A timestamp that indicates when the security findings provider last updated the finding record. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// A timestamp that indicates when the security findings provider last updated the finding record. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let updatedAt: [DateFilter]? /// A list of name/value string pairs associated with the finding. These are custom, user-defined fields added to a finding. public let userDefinedFields: [MapFilter]? @@ -21519,9 +21357,7 @@ extension SecurityHub { public let imageId: String? /// The name of the container image related to a finding. public let imageName: String? - /// Indicates when the container started. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// Indicates when the container started. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let launchedAt: String? /// The name of the container related to a finding. public let name: String? @@ -22036,13 +21872,9 @@ extension SecurityHub { public struct DateFilter: AWSEncodableShape & AWSDecodableShape { /// A date range for the date filter. public let dateRange: DateRange? - /// A timestamp that provides the end date for the date filter. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// A timestamp that provides the end date for the date filter. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let end: String? - /// A timestamp that provides the start date for the date filter. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// A timestamp that provides the start date for the date filter. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let start: String? @inlinable @@ -22379,7 +22211,7 @@ extension SecurityHub { } public struct DescribeHubResponse: AWSDecodableShape { - /// Whether to automatically enable new controls when they are added to standards that are enabled. If set to true, then new controls for enabled standards are enabled automatically. If set to false, then new controls are not enabled. + /// Whether to automatically enable new controls when they are added to standards that are enabled. If set to true, then new controls for enabled standards are enabled automatically. If set to false, then new controls are not enabled. When you automatically enable new controls, you can interact with the controls in the console and programmatically immediately after release. However, automatically enabled controls have a temporary default status of DISABLED. It can take up to several days for Security Hub to process the control release and designate the control as ENABLED in your account. During the processing period, you can manually enable or disable a control, and Security Hub will maintain that designation regardless of whether you have AutoEnableControls set to true. public let autoEnableControls: Bool? /// Specifies whether the calling account has consolidated control findings turned on. If the value for this field is set to SECURITY_CONTROL, Security Hub generates a single finding for a control check even when the check applies to multiple enabled standards. If the value for this field is set to STANDARD_CONTROL, Security Hub generates separate findings for a control check when the check applies to multiple enabled standards. The value for this field in a member account matches the value in the administrator account. For accounts that aren't part of an organization, the default value of this field is SECURITY_CONTROL if you enabled Security Hub on or after February 23, 2023. public let controlFindingGenerator: ControlFindingGenerator? @@ -22933,9 +22765,7 @@ extension SecurityHub { public let updates: [FindingHistoryUpdate]? /// Identifies the source of the event that changed the finding. For example, an integrated Amazon Web Services service or third-party partner integration may call BatchImportFindings , or an Security Hub customer may call BatchUpdateFindings . public let updateSource: FindingHistoryUpdateSource? - /// A timestamp that indicates when Security Hub processed the updated finding record. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// A timestamp that indicates when Security Hub processed the updated finding record. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. @OptionalCustomCoding public var updateTime: Date? @@ -23459,9 +23289,7 @@ extension SecurityHub { } public struct GetFindingHistoryRequest: AWSEncodableShape { - /// An ISO 8601-formatted timestamp that indicates the end time of the requested finding history. If you provide values for both StartTime and EndTime, Security Hub returns finding history for the specified time period. If you provide a value for StartTime but not for EndTime, Security Hub returns finding history from the StartTime to the time at which the API is called. If you provide a value for EndTime but not for StartTime, Security Hub returns finding history from the CreatedAt timestamp of the finding to the EndTime. If you provide neither StartTime nor EndTime, Security Hub returns finding history from the CreatedAt timestamp of the finding to the time at which the API is called. In all of these scenarios, the response is limited to 100 results, and the maximum time period is limited to 90 days. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// An ISO 8601-formatted timestamp that indicates the end time of the requested finding history. If you provide values for both StartTime and EndTime, Security Hub returns finding history for the specified time period. If you provide a value for StartTime but not for EndTime, Security Hub returns finding history from the StartTime to the time at which the API is called. If you provide a value for EndTime but not for StartTime, Security Hub returns finding history from the CreatedAt timestamp of the finding to the EndTime. If you provide neither StartTime nor EndTime, Security Hub returns finding history from the CreatedAt timestamp of the finding to the time at which the API is called. In all of these scenarios, the response is limited to 100 results, and the maximum time period is limited to 90 days. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. @OptionalCustomCoding public var endTime: Date? public let findingIdentifier: AwsSecurityFindingIdentifier? @@ -23469,9 +23297,7 @@ extension SecurityHub { public let maxResults: Int? /// A token for pagination purposes. Provide NULL as the initial value. In subsequent requests, provide the token included in the response to get up to an additional 100 results of finding history. If you don’t provide NextToken, Security Hub returns up to 100 results of finding history for each request. public let nextToken: String? - /// A timestamp that indicates the start time of the requested finding history. If you provide values for both StartTime and EndTime, Security Hub returns finding history for the specified time period. If you provide a value for StartTime but not for EndTime, Security Hub returns finding history from the StartTime to the time at which the API is called. If you provide a value for EndTime but not for StartTime, Security Hub returns finding history from the CreatedAt timestamp of the finding to the EndTime. If you provide neither StartTime nor EndTime, Security Hub returns finding history from the CreatedAt timestamp of the finding to the time at which the API is called. In all of these scenarios, the response is limited to 100 results, and the maximum time period is limited to 90 days. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// A timestamp that indicates the start time of the requested finding history. If you provide values for both StartTime and EndTime, Security Hub returns finding history for the specified time period. If you provide a value for StartTime but not for EndTime, Security Hub returns finding history from the StartTime to the time at which the API is called. If you provide a value for EndTime but not for StartTime, Security Hub returns finding history from the CreatedAt timestamp of the finding to the EndTime. If you provide neither StartTime nor EndTime, Security Hub returns finding history from the CreatedAt timestamp of the finding to the time at which the API is called. In all of these scenarios, the response is limited to 100 results, and the maximum time period is limited to 90 days. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. @OptionalCustomCoding public var startTime: Date? @@ -25055,9 +24881,7 @@ extension SecurityHub { public struct Note: AWSEncodableShape & AWSDecodableShape { /// The text of a note. Length Constraints: Minimum of 1. Maximum of 512. public let text: String? - /// A timestamp that indicates when the note was updated. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// A timestamp that indicates when the note was updated. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let updatedAt: String? /// The principal that created a note. public let updatedBy: String? @@ -25279,13 +25103,9 @@ extension SecurityHub { public let missingCount: Int? /// The type of patch operation performed. For Patch Manager, the values are SCAN and INSTALL. Length Constraints: Minimum length of 1. Maximum length of 256. public let operation: String? - /// Indicates when the operation completed. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// Indicates when the operation completed. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let operationEndTime: String? - /// Indicates when the operation started. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// Indicates when the operation started. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let operationStartTime: String? /// The reboot option specified for the instance. Length Constraints: Minimum length of 1. Maximum length of 256. public let rebootOption: String? @@ -25417,9 +25237,7 @@ extension SecurityHub { } public struct ProcessDetails: AWSEncodableShape & AWSDecodableShape { - /// Indicates when the process was launched. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// Indicates when the process was launched. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let launchedAt: String? /// The name of the process. Length Constraints: Minimum of 1. Maximum of 64. public let name: String? @@ -25429,9 +25247,7 @@ extension SecurityHub { public let path: String? /// The process ID. public let pid: Int? - /// Indicates when the process was terminated. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// Indicates when the process was terminated. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let terminatedAt: String? @inlinable @@ -27209,7 +27025,7 @@ extension SecurityHub { public let productArn: String? /// The ARN or ID of the Amazon Web Services resource associated with the signal. public let resourceIds: [String]? - /// The severity associated with the signal. For more information about severity, see Findings severity levels in the Amazon GuardDuty User Guide. + /// The severity associated with the signal. For more information about severity, see Severity levels for GuardDuty findings in the Amazon GuardDuty User Guide. public let severity: Double? /// Contains information about the indicators associated with the signals in this attack sequence finding. The values for SignalIndicators are a subset of the values for SequenceIndicators, but the values for these fields don't always match 1:1. public let signalIndicators: [Indicator]? @@ -28010,9 +27826,7 @@ extension SecurityHub { public struct ThreatIntelIndicator: AWSEncodableShape & AWSDecodableShape { /// The category of a threat intelligence indicator. public let category: ThreatIntelIndicatorCategory? - /// Indicates when the most recent instance of a threat intelligence indicator was observed. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// Indicates when the most recent instance of a threat intelligence indicator was observed. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let lastObservedAt: String? /// The source of the threat intelligence indicator. Length Constraints: Minimum of 1 length. Maximum of 64 length. public let source: String? @@ -28577,7 +28391,7 @@ extension SecurityHub { } public struct UpdateSecurityHubConfigurationRequest: AWSEncodableShape { - /// Whether to automatically enable new controls when they are added to standards that are enabled. By default, this is set to true, and new controls are enabled automatically. To not automatically enable new controls, set this to false. + /// Whether to automatically enable new controls when they are added to standards that are enabled. By default, this is set to true, and new controls are enabled automatically. To not automatically enable new controls, set this to false. When you automatically enable new controls, you can interact with the controls in the console and programmatically immediately after release. However, automatically enabled controls have a temporary default status of DISABLED. It can take up to several days for Security Hub to process the control release and designate the control as ENABLED in your account. During the processing period, you can manually enable or disable a control, and Security Hub will maintain that designation regardless of whether you have AutoEnableControls set to true. public let autoEnableControls: Bool? /// Updates whether the calling account has consolidated control findings turned on. If the value for this field is set to SECURITY_CONTROL, Security Hub generates a single finding for a control check even when the check applies to multiple enabled standards. If the value for this field is set to STANDARD_CONTROL, Security Hub generates separate findings for a control check when the check applies to multiple enabled standards. For accounts that are part of an organization, this value can only be updated in the administrator account. public let controlFindingGenerator: ControlFindingGenerator? @@ -28856,15 +28670,11 @@ extension SecurityHub { public let name: String? /// The URL of the vulnerability advisory. public let url: String? - /// Indicates when the vulnerability advisory was created. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// Indicates when the vulnerability advisory was created. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let vendorCreatedAt: String? /// The severity that the vendor assigned to the vulnerability. public let vendorSeverity: String? - /// Indicates when the vulnerability advisory was last updated. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// Indicates when the vulnerability advisory was last updated. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let vendorUpdatedAt: String? @inlinable diff --git a/Sources/Soto/Services/SecurityIR/SecurityIR_shapes.swift b/Sources/Soto/Services/SecurityIR/SecurityIR_shapes.swift index 47dc17895f..9cbb7af6ba 100644 --- a/Sources/Soto/Services/SecurityIR/SecurityIR_shapes.swift +++ b/Sources/Soto/Services/SecurityIR/SecurityIR_shapes.swift @@ -1310,6 +1310,7 @@ extension SecurityIR { public func validate(name: String) throws { try self.validate(self.ipAddress, name: "ipAddress", parent: name, pattern: "^(?:(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?))|(?:(?:[A-F0-9]{1,4}:){7}[A-F0-9]{1,4})|(?:(?:[A-F0-9]{1,4}:){6}(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?))$") try self.validate(self.userAgent, name: "userAgent", parent: name, max: 500) + try self.validate(self.userAgent, name: "userAgent", parent: name, min: 1) } private enum CodingKeys: String, CodingKey { diff --git a/Sources/Soto/Services/SecurityLake/SecurityLake_api.swift b/Sources/Soto/Services/SecurityLake/SecurityLake_api.swift index c5dd56187f..096f6fdff1 100644 --- a/Sources/Soto/Services/SecurityLake/SecurityLake_api.swift +++ b/Sources/Soto/Services/SecurityLake/SecurityLake_api.swift @@ -137,7 +137,7 @@ public struct SecurityLake: AWSService { /// /// Parameters: /// - configuration: The configuration used for the third-party custom source. - /// - eventClasses: The Open Cybersecurity Schema Framework (OCSF) event classes which describes the type of data that the custom source will send to Security Lake. The supported event classes are: ACCESS_ACTIVITY FILE_ACTIVITY KERNEL_ACTIVITY KERNEL_EXTENSION MEMORY_ACTIVITY MODULE_ACTIVITY PROCESS_ACTIVITY REGISTRY_KEY_ACTIVITY REGISTRY_VALUE_ACTIVITY RESOURCE_ACTIVITY SCHEDULED_JOB_ACTIVITY SECURITY_FINDING ACCOUNT_CHANGE AUTHENTICATION AUTHORIZATION ENTITY_MANAGEMENT_AUDIT DHCP_ACTIVITY NETWORK_ACTIVITY DNS_ACTIVITY FTP_ACTIVITY HTTP_ACTIVITY RDP_ACTIVITY SMB_ACTIVITY SSH_ACTIVITY CONFIG_STATE INVENTORY_INFO EMAIL_ACTIVITY API_ACTIVITY CLOUD_API + /// - eventClasses: The Open Cybersecurity Schema Framework (OCSF) event classes which describes the type of data that the custom source will send to Security Lake. For the list of supported event classes, see the Amazon Security Lake User Guide. /// - sourceName: Specify the name for a third-party custom source. This must be a Regionally unique value. The sourceName you enter here, is used in the LogProviderRole name which follows the convention AmazonSecurityLake-Provider-{name of the custom source}-{region}. You must use a CustomLogSource name that is shorter than or equal to 20 characters. This ensures that the LogProviderRole name is below the 64 character limit. /// - sourceVersion: Specify the source version for the third-party custom source, to limit log collection to a specific version of custom data source. /// - logger: Logger use during operation @@ -228,7 +228,7 @@ public struct SecurityLake: AWSService { return try await self.createDataLakeExceptionSubscription(input, logger: logger) } - /// Automatically enables Amazon Security Lake for new member accounts in your organization. Security Lake is not automatically enabled for any existing member accounts in your organization. + /// Automatically enables Amazon Security Lake for new member accounts in your organization. Security Lake is not automatically enabled for any existing member accounts in your organization. This operation merges the new data lake organization configuration with the existing configuration for Security Lake in your organization. If you want to create a new data lake organization configuration, you must delete the existing one using DeleteDataLakeOrganizationConfiguration. @Sendable @inlinable public func createDataLakeOrganizationConfiguration(_ input: CreateDataLakeOrganizationConfigurationRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateDataLakeOrganizationConfigurationResponse { @@ -241,7 +241,7 @@ public struct SecurityLake: AWSService { logger: logger ) } - /// Automatically enables Amazon Security Lake for new member accounts in your organization. Security Lake is not automatically enabled for any existing member accounts in your organization. + /// Automatically enables Amazon Security Lake for new member accounts in your organization. Security Lake is not automatically enabled for any existing member accounts in your organization. This operation merges the new data lake organization configuration with the existing configuration for Security Lake in your organization. If you want to create a new data lake organization configuration, you must delete the existing one using DeleteDataLakeOrganizationConfiguration. /// /// Parameters: /// - autoEnableNewAccount: Enable Security Lake with the specified configuration settings, to begin collecting security data for new accounts in your organization. diff --git a/Sources/Soto/Services/SecurityLake/SecurityLake_shapes.swift b/Sources/Soto/Services/SecurityLake/SecurityLake_shapes.swift index 00d0f601bc..8e7e3ce1bd 100644 --- a/Sources/Soto/Services/SecurityLake/SecurityLake_shapes.swift +++ b/Sources/Soto/Services/SecurityLake/SecurityLake_shapes.swift @@ -280,7 +280,7 @@ extension SecurityLake { public struct CreateCustomLogSourceRequest: AWSEncodableShape { /// The configuration used for the third-party custom source. public let configuration: CustomLogSourceConfiguration - /// The Open Cybersecurity Schema Framework (OCSF) event classes which describes the type of data that the custom source will send to Security Lake. The supported event classes are: ACCESS_ACTIVITY FILE_ACTIVITY KERNEL_ACTIVITY KERNEL_EXTENSION MEMORY_ACTIVITY MODULE_ACTIVITY PROCESS_ACTIVITY REGISTRY_KEY_ACTIVITY REGISTRY_VALUE_ACTIVITY RESOURCE_ACTIVITY SCHEDULED_JOB_ACTIVITY SECURITY_FINDING ACCOUNT_CHANGE AUTHENTICATION AUTHORIZATION ENTITY_MANAGEMENT_AUDIT DHCP_ACTIVITY NETWORK_ACTIVITY DNS_ACTIVITY FTP_ACTIVITY HTTP_ACTIVITY RDP_ACTIVITY SMB_ACTIVITY SSH_ACTIVITY CONFIG_STATE INVENTORY_INFO EMAIL_ACTIVITY API_ACTIVITY CLOUD_API + /// The Open Cybersecurity Schema Framework (OCSF) event classes which describes the type of data that the custom source will send to Security Lake. For the list of supported event classes, see the Amazon Security Lake User Guide. public let eventClasses: [String]? /// Specify the name for a third-party custom source. This must be a Regionally unique value. The sourceName you enter here, is used in the LogProviderRole name which follows the convention AmazonSecurityLake-Provider-{name of the custom source}-{region}. You must use a CustomLogSource name that is shorter than or equal to 20 characters. This ensures that the LogProviderRole name is below the 64 character limit. public let sourceName: String @@ -890,7 +890,7 @@ extension SecurityLake { public struct DataLakeSource: AWSDecodableShape { /// The ID of the Security Lake account for which logs are collected. public let account: String? - /// The Open Cybersecurity Schema Framework (OCSF) event classes which describes the type of data that the custom source will send to Security Lake. The supported event classes are: ACCESS_ACTIVITY FILE_ACTIVITY KERNEL_ACTIVITY KERNEL_EXTENSION MEMORY_ACTIVITY MODULE_ACTIVITY PROCESS_ACTIVITY REGISTRY_KEY_ACTIVITY REGISTRY_VALUE_ACTIVITY RESOURCE_ACTIVITY SCHEDULED_JOB_ACTIVITY SECURITY_FINDING ACCOUNT_CHANGE AUTHENTICATION AUTHORIZATION ENTITY_MANAGEMENT_AUDIT DHCP_ACTIVITY NETWORK_ACTIVITY DNS_ACTIVITY FTP_ACTIVITY HTTP_ACTIVITY RDP_ACTIVITY SMB_ACTIVITY SSH_ACTIVITY CONFIG_STATE INVENTORY_INFO EMAIL_ACTIVITY API_ACTIVITY CLOUD_API + /// The Open Cybersecurity Schema Framework (OCSF) event classes describes the type of data that the custom source will send to Security Lake. For the list of supported event classes, see Supported OCSF Event classes in the Amazon Security Lake User Guide. public let eventClasses: [String]? /// The supported Amazon Web Services services from which logs and events are collected. Amazon Security Lake supports log and event collection for natively supported Amazon Web Services services. public let sourceName: String? diff --git a/Sources/Soto/Services/ServiceDiscovery/ServiceDiscovery_api.swift b/Sources/Soto/Services/ServiceDiscovery/ServiceDiscovery_api.swift index b7c8c55d26..dcf5522b4a 100644 --- a/Sources/Soto/Services/ServiceDiscovery/ServiceDiscovery_api.swift +++ b/Sources/Soto/Services/ServiceDiscovery/ServiceDiscovery_api.swift @@ -93,6 +93,7 @@ public struct ServiceDiscovery: AWSService { "ap-southeast-3": "servicediscovery.ap-southeast-3.api.aws", "ap-southeast-4": "servicediscovery.ap-southeast-4.api.aws", "ap-southeast-5": "servicediscovery.ap-southeast-5.api.aws", + "ap-southeast-7": "servicediscovery.ap-southeast-7.api.aws", "ca-central-1": "servicediscovery.ca-central-1.api.aws", "ca-west-1": "servicediscovery.ca-west-1.api.aws", "cn-north-1": "servicediscovery.cn-north-1.api.amazonwebservices.com.cn", @@ -108,6 +109,7 @@ public struct ServiceDiscovery: AWSService { "il-central-1": "servicediscovery.il-central-1.api.aws", "me-central-1": "servicediscovery.me-central-1.api.aws", "me-south-1": "servicediscovery.me-south-1.api.aws", + "mx-central-1": "servicediscovery.mx-central-1.api.aws", "sa-east-1": "servicediscovery.sa-east-1.api.aws", "us-east-1": "servicediscovery.us-east-1.api.aws", "us-east-2": "servicediscovery.us-east-2.api.aws", diff --git a/Sources/Soto/Services/Snowball/Snowball_api.swift b/Sources/Soto/Services/Snowball/Snowball_api.swift index d03bc4b221..5a3b32582f 100644 --- a/Sources/Soto/Services/Snowball/Snowball_api.swift +++ b/Sources/Soto/Services/Snowball/Snowball_api.swift @@ -80,20 +80,77 @@ public struct Snowball: AWSService { /// FIPS and dualstack endpoints static var variantEndpoints: [EndpointVariantType: AWSServiceConfig.EndpointVariant] {[ + [.dualstack]: .init(endpoints: [ + "af-south-1": "snowball.af-south-1.api.aws", + "ap-east-1": "snowball.ap-east-1.api.aws", + "ap-northeast-1": "snowball.ap-northeast-1.api.aws", + "ap-northeast-2": "snowball.ap-northeast-2.api.aws", + "ap-northeast-3": "snowball.ap-northeast-3.api.aws", + "ap-south-1": "snowball.ap-south-1.api.aws", + "ap-southeast-1": "snowball.ap-southeast-1.api.aws", + "ap-southeast-2": "snowball.ap-southeast-2.api.aws", + "ap-southeast-3": "snowball.ap-southeast-3.api.aws", + "ca-central-1": "snowball.ca-central-1.api.aws", + "eu-central-1": "snowball.eu-central-1.api.aws", + "eu-north-1": "snowball.eu-north-1.api.aws", + "eu-south-1": "snowball.eu-south-1.api.aws", + "eu-west-1": "snowball.eu-west-1.api.aws", + "eu-west-2": "snowball.eu-west-2.api.aws", + "eu-west-3": "snowball.eu-west-3.api.aws", + "il-central-1": "snowball.il-central-1.api.aws", + "me-central-1": "snowball.me-central-1.api.aws", + "sa-east-1": "snowball.sa-east-1.api.aws", + "us-east-1": "snowball.us-east-1.api.aws", + "us-east-2": "snowball.us-east-2.api.aws", + "us-west-1": "snowball.us-west-1.api.aws", + "us-west-2": "snowball.us-west-2.api.aws" + ]), + [.dualstack, .fips]: .init(endpoints: [ + "af-south-1": "snowball-fips.af-south-1.api.aws", + "ap-east-1": "snowball-fips.ap-east-1.api.aws", + "ap-northeast-1": "snowball-fips.ap-northeast-1.api.aws", + "ap-northeast-2": "snowball-fips.ap-northeast-2.api.aws", + "ap-northeast-3": "snowball-fips.ap-northeast-3.api.aws", + "ap-south-1": "snowball-fips.ap-south-1.api.aws", + "ap-southeast-1": "snowball-fips.ap-southeast-1.api.aws", + "ap-southeast-2": "snowball-fips.ap-southeast-2.api.aws", + "ap-southeast-3": "snowball-fips.ap-southeast-3.api.aws", + "ca-central-1": "snowball-fips.ca-central-1.api.aws", + "eu-central-1": "snowball-fips.eu-central-1.api.aws", + "eu-north-1": "snowball-fips.eu-north-1.api.aws", + "eu-south-1": "snowball-fips.eu-south-1.api.aws", + "eu-west-1": "snowball-fips.eu-west-1.api.aws", + "eu-west-2": "snowball-fips.eu-west-2.api.aws", + "eu-west-3": "snowball-fips.eu-west-3.api.aws", + "il-central-1": "snowball-fips.il-central-1.api.aws", + "me-central-1": "snowball-fips.me-central-1.api.aws", + "sa-east-1": "snowball-fips.sa-east-1.api.aws", + "us-east-1": "snowball-fips.us-east-1.api.aws", + "us-east-2": "snowball-fips.us-east-2.api.aws", + "us-west-1": "snowball-fips.us-west-1.api.aws", + "us-west-2": "snowball-fips.us-west-2.api.aws" + ]), [.fips]: .init(endpoints: [ + "af-south-1": "snowball-fips.af-south-1.amazonaws.com", + "ap-east-1": "snowball-fips.ap-east-1.amazonaws.com", "ap-northeast-1": "snowball-fips.ap-northeast-1.amazonaws.com", "ap-northeast-2": "snowball-fips.ap-northeast-2.amazonaws.com", "ap-northeast-3": "snowball-fips.ap-northeast-3.amazonaws.com", "ap-south-1": "snowball-fips.ap-south-1.amazonaws.com", "ap-southeast-1": "snowball-fips.ap-southeast-1.amazonaws.com", "ap-southeast-2": "snowball-fips.ap-southeast-2.amazonaws.com", + "ap-southeast-3": "snowball-fips.ap-southeast-3.amazonaws.com", "ca-central-1": "snowball-fips.ca-central-1.amazonaws.com", "cn-north-1": "snowball-fips.cn-north-1.amazonaws.com.cn", "cn-northwest-1": "snowball-fips.cn-northwest-1.amazonaws.com.cn", "eu-central-1": "snowball-fips.eu-central-1.amazonaws.com", + "eu-north-1": "snowball-fips.eu-north-1.amazonaws.com", + "eu-south-1": "snowball-fips.eu-south-1.amazonaws.com", "eu-west-1": "snowball-fips.eu-west-1.amazonaws.com", "eu-west-2": "snowball-fips.eu-west-2.amazonaws.com", "eu-west-3": "snowball-fips.eu-west-3.amazonaws.com", + "il-central-1": "snowball-fips.il-central-1.amazonaws.com", + "me-central-1": "snowball-fips.me-central-1.amazonaws.com", "sa-east-1": "snowball-fips.sa-east-1.amazonaws.com", "us-east-1": "snowball-fips.us-east-1.amazonaws.com", "us-east-2": "snowball-fips.us-east-2.amazonaws.com", diff --git a/Sources/Soto/Services/SsmSap/SsmSap_api.swift b/Sources/Soto/Services/SsmSap/SsmSap_api.swift index 5f96af885d..25e6ebce1d 100644 --- a/Sources/Soto/Services/SsmSap/SsmSap_api.swift +++ b/Sources/Soto/Services/SsmSap/SsmSap_api.swift @@ -79,6 +79,43 @@ public struct SsmSap: AWSService { /// FIPS and dualstack endpoints static var variantEndpoints: [EndpointVariantType: AWSServiceConfig.EndpointVariant] {[ + [.dualstack]: .init(endpoints: [ + "af-south-1": "ssm-sap.af-south-1.api.aws", + "ap-east-1": "ssm-sap.ap-east-1.api.aws", + "ap-northeast-1": "ssm-sap.ap-northeast-1.api.aws", + "ap-northeast-2": "ssm-sap.ap-northeast-2.api.aws", + "ap-northeast-3": "ssm-sap.ap-northeast-3.api.aws", + "ap-south-1": "ssm-sap.ap-south-1.api.aws", + "ap-south-2": "ssm-sap.ap-south-2.api.aws", + "ap-southeast-1": "ssm-sap.ap-southeast-1.api.aws", + "ap-southeast-2": "ssm-sap.ap-southeast-2.api.aws", + "ap-southeast-3": "ssm-sap.ap-southeast-3.api.aws", + "ap-southeast-4": "ssm-sap.ap-southeast-4.api.aws", + "ca-central-1": "ssm-sap.ca-central-1.api.aws", + "eu-central-1": "ssm-sap.eu-central-1.api.aws", + "eu-central-2": "ssm-sap.eu-central-2.api.aws", + "eu-north-1": "ssm-sap.eu-north-1.api.aws", + "eu-south-1": "ssm-sap.eu-south-1.api.aws", + "eu-south-2": "ssm-sap.eu-south-2.api.aws", + "eu-west-1": "ssm-sap.eu-west-1.api.aws", + "eu-west-2": "ssm-sap.eu-west-2.api.aws", + "eu-west-3": "ssm-sap.eu-west-3.api.aws", + "il-central-1": "ssm-sap.il-central-1.api.aws", + "me-central-1": "ssm-sap.me-central-1.api.aws", + "me-south-1": "ssm-sap.me-south-1.api.aws", + "sa-east-1": "ssm-sap.sa-east-1.api.aws", + "us-east-1": "ssm-sap.us-east-1.api.aws", + "us-east-2": "ssm-sap.us-east-2.api.aws", + "us-west-1": "ssm-sap.us-west-1.api.aws", + "us-west-2": "ssm-sap.us-west-2.api.aws" + ]), + [.dualstack, .fips]: .init(endpoints: [ + "ca-central-1": "ssm-sap-fips.ca-central-1.api.aws", + "us-east-1": "ssm-sap-fips.us-east-1.api.aws", + "us-east-2": "ssm-sap-fips.us-east-2.api.aws", + "us-west-1": "ssm-sap-fips.us-west-1.api.aws", + "us-west-2": "ssm-sap-fips.us-west-2.api.aws" + ]), [.fips]: .init(endpoints: [ "ca-central-1": "ssm-sap-fips.ca-central-1.amazonaws.com", "us-east-1": "ssm-sap-fips.us-east-1.amazonaws.com", @@ -586,6 +623,7 @@ public struct SsmSap: AWSService { /// Parameters: /// - applicationId: The ID of the application. /// - applicationType: The type of the application. + /// - componentsInfo: This is an optional parameter for component details to which the SAP ABAP application is attached, such as Web Dispatcher. This is an array of ApplicationComponent objects. You may input 0 to 5 items. /// - credentials: The credentials of the SAP application. /// - databaseArn: The Amazon Resource Name of the SAP HANA database. /// - instances: The Amazon EC2 instances on which your SAP application is running. @@ -597,6 +635,7 @@ public struct SsmSap: AWSService { public func registerApplication( applicationId: String, applicationType: ApplicationType, + componentsInfo: [ComponentInfo]? = nil, credentials: [ApplicationCredential]? = nil, databaseArn: String? = nil, instances: [String], @@ -608,6 +647,7 @@ public struct SsmSap: AWSService { let input = RegisterApplicationInput( applicationId: applicationId, applicationType: applicationType, + componentsInfo: componentsInfo, credentials: credentials, databaseArn: databaseArn, instances: instances, diff --git a/Sources/Soto/Services/SsmSap/SsmSap_shapes.swift b/Sources/Soto/Services/SsmSap/SsmSap_shapes.swift index 4e20d98a8b..6f6fb0a39e 100644 --- a/Sources/Soto/Services/SsmSap/SsmSap_shapes.swift +++ b/Sources/Soto/Services/SsmSap/SsmSap_shapes.swift @@ -451,6 +451,33 @@ extension SsmSap { } } + public struct ComponentInfo: AWSEncodableShape { + /// This string is the type of the component. Accepted value is WD. + public let componentType: ComponentType + /// This is the Amazon EC2 instance on which your SAP component is running. Accepted values are alphanumeric. + public let ec2InstanceId: String + /// This string is the SAP System ID of the component. Accepted values are alphanumeric. + public let sid: String + + @inlinable + public init(componentType: ComponentType, ec2InstanceId: String, sid: String) { + self.componentType = componentType + self.ec2InstanceId = ec2InstanceId + self.sid = sid + } + + public func validate(name: String) throws { + try self.validate(self.ec2InstanceId, name: "ec2InstanceId", parent: name, pattern: "^i-[\\w\\d]{8}$|^i-[\\w\\d]{17}$") + try self.validate(self.sid, name: "sid", parent: name, pattern: "^[A-Z][A-Z0-9]{2}$") + } + + private enum CodingKeys: String, CodingKey { + case componentType = "ComponentType" + case ec2InstanceId = "Ec2InstanceId" + case sid = "Sid" + } + } + public struct ComponentSummary: AWSDecodableShape { /// The ID of the application. public let applicationId: String? @@ -1392,6 +1419,8 @@ extension SsmSap { public let applicationId: String /// The type of the application. public let applicationType: ApplicationType + /// This is an optional parameter for component details to which the SAP ABAP application is attached, such as Web Dispatcher. This is an array of ApplicationComponent objects. You may input 0 to 5 items. + public let componentsInfo: [ComponentInfo]? /// The credentials of the SAP application. public let credentials: [ApplicationCredential]? /// The Amazon Resource Name of the SAP HANA database. @@ -1406,9 +1435,10 @@ extension SsmSap { public let tags: [String: String]? @inlinable - public init(applicationId: String, applicationType: ApplicationType, credentials: [ApplicationCredential]? = nil, databaseArn: String? = nil, instances: [String], sapInstanceNumber: String? = nil, sid: String? = nil, tags: [String: String]? = nil) { + public init(applicationId: String, applicationType: ApplicationType, componentsInfo: [ComponentInfo]? = nil, credentials: [ApplicationCredential]? = nil, databaseArn: String? = nil, instances: [String], sapInstanceNumber: String? = nil, sid: String? = nil, tags: [String: String]? = nil) { self.applicationId = applicationId self.applicationType = applicationType + self.componentsInfo = componentsInfo self.credentials = credentials self.databaseArn = databaseArn self.instances = instances @@ -1421,6 +1451,10 @@ extension SsmSap { try self.validate(self.applicationId, name: "applicationId", parent: name, max: 60) try self.validate(self.applicationId, name: "applicationId", parent: name, min: 1) try self.validate(self.applicationId, name: "applicationId", parent: name, pattern: "^[\\w\\d\\.-]+$") + try self.componentsInfo?.forEach { + try $0.validate(name: "\(name).componentsInfo[]") + } + try self.validate(self.componentsInfo, name: "componentsInfo", parent: name, max: 5) try self.credentials?.forEach { try $0.validate(name: "\(name).credentials[]") } @@ -1443,6 +1477,7 @@ extension SsmSap { private enum CodingKeys: String, CodingKey { case applicationId = "ApplicationId" case applicationType = "ApplicationType" + case componentsInfo = "ComponentsInfo" case credentials = "Credentials" case databaseArn = "DatabaseArn" case instances = "Instances" diff --git a/Sources/Soto/Services/SupplyChain/SupplyChain_api.swift b/Sources/Soto/Services/SupplyChain/SupplyChain_api.swift index 3735c51ae1..acf0669db4 100644 --- a/Sources/Soto/Services/SupplyChain/SupplyChain_api.swift +++ b/Sources/Soto/Services/SupplyChain/SupplyChain_api.swift @@ -223,6 +223,7 @@ public struct SupplyChain: AWSService { /// - instanceName: The AWS Supply Chain instance name. /// - kmsKeyArn: The ARN (Amazon Resource Name) of the Key Management Service (KMS) key you provide for encryption. This is required if you do not want to use the Amazon Web Services owned KMS key. If you don't provide anything here, AWS Supply Chain uses the Amazon Web Services owned KMS key. /// - tags: The Amazon Web Services tags of an instance to be created. + /// - webAppDnsDomain: The DNS subdomain of the web app. This would be "example" in the URL "example.scn.global.on.aws". You can set this to a custom value, as long as the domain isn't already being used by someone else. The name may only include alphanumeric characters and hyphens. /// - logger: Logger use during operation @inlinable public func createInstance( @@ -231,6 +232,7 @@ public struct SupplyChain: AWSService { instanceName: String? = nil, kmsKeyArn: String? = nil, tags: [String: String]? = nil, + webAppDnsDomain: String? = nil, logger: Logger = AWSClient.loggingDisabled ) async throws -> CreateInstanceResponse { let input = CreateInstanceRequest( @@ -238,7 +240,8 @@ public struct SupplyChain: AWSService { instanceDescription: instanceDescription, instanceName: instanceName, kmsKeyArn: kmsKeyArn, - tags: tags + tags: tags, + webAppDnsDomain: webAppDnsDomain ) return try await self.createInstance(input, logger: logger) } @@ -292,8 +295,8 @@ public struct SupplyChain: AWSService { /// /// Parameters: /// - instanceId: The AWS Supply Chain instance identifier. - /// - name: The name of the dataset. If the namespace is asc, the name must be one of the supported data entities . - /// - namespace: The namespace of the dataset. The available values are: asc: for AWS Supply Chain supported datasets . default: for datasets with custom user-defined schemas. + /// - name: The name of the dataset. For asc name space, the name must be one of the supported data entities under https://docs.aws.amazon.com/aws-supply-chain/latest/userguide/data-model-asc.html. + /// - namespace: The name space of the dataset. The available values are: asc - For information on the Amazon Web Services Supply Chain supported datasets see https://docs.aws.amazon.com/aws-supply-chain/latest/userguide/data-model-asc.html. default - For datasets with custom user-defined schemas. /// - logger: Logger use during operation @inlinable public func deleteDataLakeDataset( @@ -310,7 +313,7 @@ public struct SupplyChain: AWSService { return try await self.deleteDataLakeDataset(input, logger: logger) } - /// Enables you to programmatically delete an Amazon Web Services Supply Chain instance by deleting the KMS keys and relevant information associated with the API without using the Amazon Web Services console. This is an asynchronous operation. Upon receiving a DeleteInstance request, Amazon Web Services Supply Chain immediately returns a response with the instance resource, delete state while cleaning up all Amazon Web Services resources created during the instance creation process. You can use the GetInstance action to check the instance status. + /// Enables you to programmatically delete an Amazon Web Services Supply Chain instance by deleting the KMS keys and relevant information associated with the API without using the Amazon Web Services console. This is an asynchronous operation. Upon receiving a DeleteInstance request, Amazon Web Services Supply Chain immediately returns a response with the instance resource, delete state while cleaning up all Amazon Web Services resources created during the instance creation process. You can use the GetInstance action to check the instance status. @Sendable @inlinable public func deleteInstance(_ input: DeleteInstanceRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DeleteInstanceResponse { @@ -323,7 +326,7 @@ public struct SupplyChain: AWSService { logger: logger ) } - /// Enables you to programmatically delete an Amazon Web Services Supply Chain instance by deleting the KMS keys and relevant information associated with the API without using the Amazon Web Services console. This is an asynchronous operation. Upon receiving a DeleteInstance request, Amazon Web Services Supply Chain immediately returns a response with the instance resource, delete state while cleaning up all Amazon Web Services resources created during the instance creation process. You can use the GetInstance action to check the instance status. + /// Enables you to programmatically delete an Amazon Web Services Supply Chain instance by deleting the KMS keys and relevant information associated with the API without using the Amazon Web Services console. This is an asynchronous operation. Upon receiving a DeleteInstance request, Amazon Web Services Supply Chain immediately returns a response with the instance resource, delete state while cleaning up all Amazon Web Services resources created during the instance creation process. You can use the GetInstance action to check the instance status. /// /// Parameters: /// - instanceId: The AWS Supply Chain instance identifier. @@ -520,7 +523,7 @@ public struct SupplyChain: AWSService { /// Parameters: /// - instanceId: The Amazon Web Services Supply Chain instance identifier. /// - maxResults: The max number of datasets to fetch in this paginated request. - /// - namespace: The namespace of the dataset. The available values are: asc: for AWS Supply Chain supported datasets . default: for datasets with custom user-defined schemas. + /// - namespace: The name space of the dataset. The available values are: asc - For information on the Amazon Web Services Supply Chain supported datasets see https://docs.aws.amazon.com/aws-supply-chain/latest/userguide/data-model-asc.html. default - For datasets with custom user-defined schemas. /// - nextToken: The pagination token to fetch next page of datasets. /// - logger: Logger use during operation @inlinable @@ -607,7 +610,7 @@ public struct SupplyChain: AWSService { return try await self.listTagsForResource(input, logger: logger) } - /// Send the transactional data payload for the event with real-time data for analysis or monitoring. The real-time data events are stored in an Amazon Web Services service before being processed and stored in data lake. New data events are synced with data lake at 5 PM GMT everyday. The updated transactional data is available in data lake after ingestion. + /// Send the transactional data payload for the event with real-time data for analysis or monitoring. The real-time data events are stored in an Amazon Web Services service before being processed and stored in data lake. New data events are synced with data lake at 5 PM GMT everyday. The updated transactional data is available in data lake after ingestion. @Sendable @inlinable public func sendDataIntegrationEvent(_ input: SendDataIntegrationEventRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> SendDataIntegrationEventResponse { @@ -620,11 +623,11 @@ public struct SupplyChain: AWSService { logger: logger ) } - /// Send the transactional data payload for the event with real-time data for analysis or monitoring. The real-time data events are stored in an Amazon Web Services service before being processed and stored in data lake. New data events are synced with data lake at 5 PM GMT everyday. The updated transactional data is available in data lake after ingestion. + /// Send the transactional data payload for the event with real-time data for analysis or monitoring. The real-time data events are stored in an Amazon Web Services service before being processed and stored in data lake. New data events are synced with data lake at 5 PM GMT everyday. The updated transactional data is available in data lake after ingestion. /// /// Parameters: /// - clientToken: The idempotent client token. - /// - data: The data payload of the event. For more information on the data schema to use, see Data entities supported in AWS Supply Chain . + /// - data: The data payload of the event. For more information on the data schema to use, see Data entities supported in AWS Supply Chain. /// - eventGroupId: Event identifier (for example, orderId for InboundOrder) used for data sharing or partitioning. /// - eventTimestamp: The event timestamp (in epoch seconds). /// - eventType: The data event type. @@ -651,7 +654,7 @@ public struct SupplyChain: AWSService { return try await self.sendDataIntegrationEvent(input, logger: logger) } - /// You can create tags during or after creating a resource such as instance, data flow, or dataset in AWS Supply chain. During the data ingestion process, you can add tags such as dev, test, or prod to data flows created during the data ingestion process in the AWS Supply Chain datasets. You can use these tags to identify a group of resources or a single resource used by the developer. + /// You can create tags during or after creating a resource such as instance, data flow, or dataset in AWS Supply chain. During the data ingestion process, you can add tags such as dev, test, or prod to data flows created during the data ingestion process in the AWS Supply Chain datasets. You can use these tags to identify a group of resources or a single resource used by the developer. @Sendable @inlinable public func tagResource(_ input: TagResourceRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> TagResourceResponse { @@ -664,7 +667,7 @@ public struct SupplyChain: AWSService { logger: logger ) } - /// You can create tags during or after creating a resource such as instance, data flow, or dataset in AWS Supply chain. During the data ingestion process, you can add tags such as dev, test, or prod to data flows created during the data ingestion process in the AWS Supply Chain datasets. You can use these tags to identify a group of resources or a single resource used by the developer. + /// You can create tags during or after creating a resource such as instance, data flow, or dataset in AWS Supply chain. During the data ingestion process, you can add tags such as dev, test, or prod to data flows created during the data ingestion process in the AWS Supply Chain datasets. You can use these tags to identify a group of resources or a single resource used by the developer. /// /// Parameters: /// - resourceArn: The Amazon Web Services Supply chain resource ARN that needs to be tagged. @@ -683,7 +686,7 @@ public struct SupplyChain: AWSService { return try await self.tagResource(input, logger: logger) } - /// You can delete tags for an Amazon Web Services Supply chain resource such as instance, data flow, or dataset in AWS Supply Chain. During the data ingestion process, you can delete tags such as dev, test, or prod to data flows created during the data ingestion process in the AWS Supply Chain datasets. + /// You can delete tags for an Amazon Web Services Supply chain resource such as instance, data flow, or dataset in AWS Supply Chain. During the data ingestion process, you can delete tags such as dev, test, or prod to data flows created during the data ingestion process in the AWS Supply Chain datasets. @Sendable @inlinable public func untagResource(_ input: UntagResourceRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UntagResourceResponse { @@ -696,7 +699,7 @@ public struct SupplyChain: AWSService { logger: logger ) } - /// You can delete tags for an Amazon Web Services Supply chain resource such as instance, data flow, or dataset in AWS Supply Chain. During the data ingestion process, you can delete tags such as dev, test, or prod to data flows created during the data ingestion process in the AWS Supply Chain datasets. + /// You can delete tags for an Amazon Web Services Supply chain resource such as instance, data flow, or dataset in AWS Supply Chain. During the data ingestion process, you can delete tags such as dev, test, or prod to data flows created during the data ingestion process in the AWS Supply Chain datasets. /// /// Parameters: /// - resourceArn: The Amazon Web Services Supply chain resource ARN that needs to be untagged. @@ -903,7 +906,7 @@ extension SupplyChain { /// - Parameters: /// - instanceId: The Amazon Web Services Supply Chain instance identifier. /// - maxResults: The max number of datasets to fetch in this paginated request. - /// - namespace: The namespace of the dataset. The available values are: asc: for AWS Supply Chain supported datasets . default: for datasets with custom user-defined schemas. + /// - namespace: The name space of the dataset. The available values are: asc - For information on the Amazon Web Services Supply Chain supported datasets see https://docs.aws.amazon.com/aws-supply-chain/latest/userguide/data-model-asc.html. default - For datasets with custom user-defined schemas. /// - logger: Logger used for logging @inlinable public func listDataLakeDatasetsPaginator( diff --git a/Sources/Soto/Services/SupplyChain/SupplyChain_shapes.swift b/Sources/Soto/Services/SupplyChain/SupplyChain_shapes.swift index 6a5b6c237b..cb53e5295d 100644 --- a/Sources/Soto/Services/SupplyChain/SupplyChain_shapes.swift +++ b/Sources/Soto/Services/SupplyChain/SupplyChain_shapes.swift @@ -359,14 +359,17 @@ extension SupplyChain { public let kmsKeyArn: String? /// The Amazon Web Services tags of an instance to be created. public let tags: [String: String]? + /// The DNS subdomain of the web app. This would be "example" in the URL "example.scn.global.on.aws". You can set this to a custom value, as long as the domain isn't already being used by someone else. The name may only include alphanumeric characters and hyphens. + public let webAppDnsDomain: String? @inlinable - public init(clientToken: String? = CreateInstanceRequest.idempotencyToken(), instanceDescription: String? = nil, instanceName: String? = nil, kmsKeyArn: String? = nil, tags: [String: String]? = nil) { + public init(clientToken: String? = CreateInstanceRequest.idempotencyToken(), instanceDescription: String? = nil, instanceName: String? = nil, kmsKeyArn: String? = nil, tags: [String: String]? = nil, webAppDnsDomain: String? = nil) { self.clientToken = clientToken self.instanceDescription = instanceDescription self.instanceName = instanceName self.kmsKeyArn = kmsKeyArn self.tags = tags + self.webAppDnsDomain = webAppDnsDomain } public func validate(name: String) throws { @@ -384,6 +387,7 @@ extension SupplyChain { try validate($0.value, name: "tags[\"\($0.key)\"]", parent: name, max: 256) } try self.validate(self.tags, name: "tags", parent: name, max: 200) + try self.validate(self.webAppDnsDomain, name: "webAppDnsDomain", parent: name, pattern: "^(?![-])[a-zA-Z0-9-]{1,62}[a-zA-Z0-9]$") } private enum CodingKeys: String, CodingKey { @@ -392,6 +396,7 @@ extension SupplyChain { case instanceName = "instanceName" case kmsKeyArn = "kmsKeyArn" case tags = "tags" + case webAppDnsDomain = "webAppDnsDomain" } } @@ -840,9 +845,9 @@ extension SupplyChain { public struct DeleteDataLakeDatasetRequest: AWSEncodableShape { /// The AWS Supply Chain instance identifier. public let instanceId: String - /// The name of the dataset. If the namespace is asc, the name must be one of the supported data entities . + /// The name of the dataset. For asc name space, the name must be one of the supported data entities under https://docs.aws.amazon.com/aws-supply-chain/latest/userguide/data-model-asc.html. public let name: String - /// The namespace of the dataset. The available values are: asc: for AWS Supply Chain supported datasets . default: for datasets with custom user-defined schemas. + /// The name space of the dataset. The available values are: asc - For information on the Amazon Web Services Supply Chain supported datasets see https://docs.aws.amazon.com/aws-supply-chain/latest/userguide/data-model-asc.html. default - For datasets with custom user-defined schemas. public let namespace: String @inlinable @@ -880,7 +885,7 @@ extension SupplyChain { public let instanceId: String /// The name of deleted dataset. public let name: String - /// The namespace of deleted dataset. + /// The name space of deleted dataset. public let namespace: String @inlinable @@ -1228,7 +1233,7 @@ extension SupplyChain { public let instanceId: String /// The max number of datasets to fetch in this paginated request. public let maxResults: Int? - /// The namespace of the dataset. The available values are: asc: for AWS Supply Chain supported datasets . default: for datasets with custom user-defined schemas. + /// The name space of the dataset. The available values are: asc - For information on the Amazon Web Services Supply Chain supported datasets see https://docs.aws.amazon.com/aws-supply-chain/latest/userguide/data-model-asc.html. default - For datasets with custom user-defined schemas. public let namespace: String /// The pagination token to fetch next page of datasets. public let nextToken: String? @@ -1386,7 +1391,7 @@ extension SupplyChain { public struct SendDataIntegrationEventRequest: AWSEncodableShape { /// The idempotent client token. public let clientToken: String? - /// The data payload of the event. For more information on the data schema to use, see Data entities supported in AWS Supply Chain . + /// The data payload of the event. For more information on the data schema to use, see Data entities supported in AWS Supply Chain. public let data: String /// Event identifier (for example, orderId for InboundOrder) used for data sharing or partitioning. public let eventGroupId: String diff --git a/Sources/Soto/Services/Synthetics/Synthetics_api.swift b/Sources/Soto/Services/Synthetics/Synthetics_api.swift index cb61a7479e..0058ea13c6 100644 --- a/Sources/Soto/Services/Synthetics/Synthetics_api.swift +++ b/Sources/Soto/Services/Synthetics/Synthetics_api.swift @@ -79,6 +79,42 @@ public struct Synthetics: AWSService { /// FIPS and dualstack endpoints static var variantEndpoints: [EndpointVariantType: AWSServiceConfig.EndpointVariant] {[ + [.dualstack]: .init(endpoints: [ + "af-south-1": "synthetics.af-south-1.api.aws", + "ap-east-1": "synthetics.ap-east-1.api.aws", + "ap-northeast-1": "synthetics.ap-northeast-1.api.aws", + "ap-northeast-2": "synthetics.ap-northeast-2.api.aws", + "ap-northeast-3": "synthetics.ap-northeast-3.api.aws", + "ap-south-1": "synthetics.ap-south-1.api.aws", + "ap-south-2": "synthetics.ap-south-2.api.aws", + "ap-southeast-1": "synthetics.ap-southeast-1.api.aws", + "ap-southeast-2": "synthetics.ap-southeast-2.api.aws", + "ap-southeast-3": "synthetics.ap-southeast-3.api.aws", + "ap-southeast-4": "synthetics.ap-southeast-4.api.aws", + "ap-southeast-5": "synthetics.ap-southeast-5.api.aws", + "ap-southeast-7": "synthetics.ap-southeast-7.api.aws", + "ca-central-1": "synthetics.ca-central-1.api.aws", + "ca-west-1": "synthetics.ca-west-1.api.aws", + "cn-north-1": "synthetics.cn-north-1.api.amazonwebservices.com.cn", + "cn-northwest-1": "synthetics.cn-northwest-1.api.amazonwebservices.com.cn", + "eu-central-1": "synthetics.eu-central-1.api.aws", + "eu-central-2": "synthetics.eu-central-2.api.aws", + "eu-north-1": "synthetics.eu-north-1.api.aws", + "eu-south-1": "synthetics.eu-south-1.api.aws", + "eu-south-2": "synthetics.eu-south-2.api.aws", + "eu-west-1": "synthetics.eu-west-1.api.aws", + "eu-west-2": "synthetics.eu-west-2.api.aws", + "eu-west-3": "synthetics.eu-west-3.api.aws", + "il-central-1": "synthetics.il-central-1.api.aws", + "me-central-1": "synthetics.me-central-1.api.aws", + "me-south-1": "synthetics.me-south-1.api.aws", + "mx-central-1": "synthetics.mx-central-1.api.aws", + "sa-east-1": "synthetics.sa-east-1.api.aws", + "us-east-1": "synthetics.us-east-1.api.aws", + "us-east-2": "synthetics.us-east-2.api.aws", + "us-west-1": "synthetics.us-west-1.api.aws", + "us-west-2": "synthetics.us-west-2.api.aws" + ]), [.fips]: .init(endpoints: [ "ca-central-1": "synthetics-fips.ca-central-1.amazonaws.com", "ca-west-1": "synthetics-fips.ca-west-1.amazonaws.com", diff --git a/Sources/Soto/Services/Transcribe/Transcribe_api.swift b/Sources/Soto/Services/Transcribe/Transcribe_api.swift index 364055e3e9..d6694f627e 100644 --- a/Sources/Soto/Services/Transcribe/Transcribe_api.swift +++ b/Sources/Soto/Services/Transcribe/Transcribe_api.swift @@ -132,18 +132,21 @@ public struct Transcribe: AWSService { /// - categoryName: A unique name, chosen by you, for your Call Analytics category. It's helpful to use a detailed naming system that will make sense to you in the future. For example, it's better to use sentiment-positive-last30seconds for a category over a generic name like test-category. Category names are case sensitive. /// - inputType: Choose whether you want to create a real-time or a post-call category for your Call Analytics transcription. Specifying POST_CALL assigns your category to post-call transcriptions; categories with this input type cannot be applied to streaming (real-time) transcriptions. Specifying REAL_TIME assigns your category to streaming transcriptions; categories with this input type cannot be applied to post-call transcriptions. If you do not include InputType, your category is created as a post-call category by default. /// - rules: Rules define a Call Analytics category. When creating a new category, you must create between 1 and 20 rules for that category. For each rule, you specify a filter you want applied to the attributes of a call. For example, you can choose a sentiment filter that detects if a customer's sentiment was positive during the last 30 seconds of the call. + /// - tags: Adds one or more custom tags, each in the form of a key:value pair, to a new call analytics category at the time you start this new job. To learn more about using tags with Amazon Transcribe, refer to Tagging resources. /// - logger: Logger use during operation @inlinable public func createCallAnalyticsCategory( categoryName: String, inputType: InputType? = nil, rules: [Rule], + tags: [Tag]? = nil, logger: Logger = AWSClient.loggingDisabled ) async throws -> CreateCallAnalyticsCategoryResponse { let input = CreateCallAnalyticsCategoryRequest( categoryName: categoryName, inputType: inputType, - rules: rules + rules: rules, + tags: tags ) return try await self.createCallAnalyticsCategory(input, logger: logger) } @@ -1222,6 +1225,7 @@ public struct Transcribe: AWSService { /// - outputEncryptionKMSKeyId: The KMS key you want to use to encrypt your Call Analytics output. If using a key located in the current Amazon Web Services account, you can specify your KMS key in one of four ways: Use the KMS key ID itself. For example, 1234abcd-12ab-34cd-56ef-1234567890ab. Use an alias for the KMS key ID. For example, alias/ExampleAlias. Use the Amazon Resource Name (ARN) for the KMS key ID. For example, arn:aws:kms:region:account-ID:key/1234abcd-12ab-34cd-56ef-1234567890ab. Use the ARN for the KMS key alias. For example, arn:aws:kms:region:account-ID:alias/ExampleAlias. If using a key located in a different Amazon Web Services account than the current Amazon Web Services account, you can specify your KMS key in one of two ways: Use the ARN for the KMS key ID. For example, arn:aws:kms:region:account-ID:key/1234abcd-12ab-34cd-56ef-1234567890ab. Use the ARN for the KMS key alias. For example, arn:aws:kms:region:account-ID:alias/ExampleAlias. If you do not specify an encryption key, your output is encrypted with the default Amazon S3 key (SSE-S3). If you specify a KMS key to encrypt your output, you must also specify an output location using the OutputLocation parameter. Note that the role making the request must have permission to use the specified KMS key. /// - outputLocation: The Amazon S3 location where you want your Call Analytics transcription output stored. You can use any of the following formats to specify the output location: s3://DOC-EXAMPLE-BUCKET s3://DOC-EXAMPLE-BUCKET/my-output-folder/ s3://DOC-EXAMPLE-BUCKET/my-output-folder/my-call-analytics-job.json Unless you specify a file name (option 3), the name of your output file has a default value that matches the name you specified for your transcription job using the CallAnalyticsJobName parameter. You can specify a KMS key to encrypt your output using the OutputEncryptionKMSKeyId parameter. If you do not specify a KMS key, Amazon Transcribe uses the default Amazon S3 key for server-side encryption. If you do not specify OutputLocation, your transcript is placed in a service-managed Amazon S3 bucket and you are provided with a URI to access your transcript. /// - settings: Specify additional optional settings in your request, including content redaction; allows you to apply custom language models, vocabulary filters, and custom vocabularies to your Call Analytics job. + /// - tags: Adds one or more custom tags, each in the form of a key:value pair, to a new call analytics job at the time you start this new job. To learn more about using tags with Amazon Transcribe, refer to Tagging resources. /// - logger: Logger use during operation @inlinable public func startCallAnalyticsJob( @@ -1232,6 +1236,7 @@ public struct Transcribe: AWSService { outputEncryptionKMSKeyId: String? = nil, outputLocation: String? = nil, settings: CallAnalyticsJobSettings? = nil, + tags: [Tag]? = nil, logger: Logger = AWSClient.loggingDisabled ) async throws -> StartCallAnalyticsJobResponse { let input = StartCallAnalyticsJobRequest( @@ -1241,7 +1246,8 @@ public struct Transcribe: AWSService { media: media, outputEncryptionKMSKeyId: outputEncryptionKMSKeyId, outputLocation: outputLocation, - settings: settings + settings: settings, + tags: tags ) return try await self.startCallAnalyticsJob(input, logger: logger) } diff --git a/Sources/Soto/Services/Transcribe/Transcribe_shapes.swift b/Sources/Soto/Services/Transcribe/Transcribe_shapes.swift index 6490bb2c90..54241108ee 100644 --- a/Sources/Soto/Services/Transcribe/Transcribe_shapes.swift +++ b/Sources/Soto/Services/Transcribe/Transcribe_shapes.swift @@ -454,10 +454,12 @@ extension Transcribe { public let settings: CallAnalyticsJobSettings? /// The date and time the specified Call Analytics job began processing. Timestamps are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC. For example, 2022-05-04T12:32:58.789000-07:00 represents a transcription job that started processing at 12:32 PM UTC-7 on May 4, 2022. public let startTime: Date? + /// The tags, each in the form of a key:value pair, assigned to the specified call analytics job. + public let tags: [Tag]? public let transcript: Transcript? @inlinable - public init(callAnalyticsJobDetails: CallAnalyticsJobDetails? = nil, callAnalyticsJobName: String? = nil, callAnalyticsJobStatus: CallAnalyticsJobStatus? = nil, channelDefinitions: [ChannelDefinition]? = nil, completionTime: Date? = nil, creationTime: Date? = nil, dataAccessRoleArn: String? = nil, failureReason: String? = nil, identifiedLanguageScore: Float? = nil, languageCode: LanguageCode? = nil, media: Media? = nil, mediaFormat: MediaFormat? = nil, mediaSampleRateHertz: Int? = nil, settings: CallAnalyticsJobSettings? = nil, startTime: Date? = nil, transcript: Transcript? = nil) { + public init(callAnalyticsJobDetails: CallAnalyticsJobDetails? = nil, callAnalyticsJobName: String? = nil, callAnalyticsJobStatus: CallAnalyticsJobStatus? = nil, channelDefinitions: [ChannelDefinition]? = nil, completionTime: Date? = nil, creationTime: Date? = nil, dataAccessRoleArn: String? = nil, failureReason: String? = nil, identifiedLanguageScore: Float? = nil, languageCode: LanguageCode? = nil, media: Media? = nil, mediaFormat: MediaFormat? = nil, mediaSampleRateHertz: Int? = nil, settings: CallAnalyticsJobSettings? = nil, startTime: Date? = nil, tags: [Tag]? = nil, transcript: Transcript? = nil) { self.callAnalyticsJobDetails = callAnalyticsJobDetails self.callAnalyticsJobName = callAnalyticsJobName self.callAnalyticsJobStatus = callAnalyticsJobStatus @@ -473,6 +475,7 @@ extension Transcribe { self.mediaSampleRateHertz = mediaSampleRateHertz self.settings = settings self.startTime = startTime + self.tags = tags self.transcript = transcript } @@ -492,6 +495,7 @@ extension Transcribe { case mediaSampleRateHertz = "MediaSampleRateHertz" case settings = "Settings" case startTime = "StartTime" + case tags = "Tags" case transcript = "Transcript" } } @@ -646,14 +650,17 @@ extension Transcribe { public let lastUpdateTime: Date? /// The rules used to define a Call Analytics category. Each category can have between 1 and 20 rules. public let rules: [Rule]? + /// The tags, each in the form of a key:value pair, assigned to the specified call analytics category. + public let tags: [Tag]? @inlinable - public init(categoryName: String? = nil, createTime: Date? = nil, inputType: InputType? = nil, lastUpdateTime: Date? = nil, rules: [Rule]? = nil) { + public init(categoryName: String? = nil, createTime: Date? = nil, inputType: InputType? = nil, lastUpdateTime: Date? = nil, rules: [Rule]? = nil, tags: [Tag]? = nil) { self.categoryName = categoryName self.createTime = createTime self.inputType = inputType self.lastUpdateTime = lastUpdateTime self.rules = rules + self.tags = tags } private enum CodingKeys: String, CodingKey { @@ -662,6 +669,7 @@ extension Transcribe { case inputType = "InputType" case lastUpdateTime = "LastUpdateTime" case rules = "Rules" + case tags = "Tags" } } @@ -721,12 +729,15 @@ extension Transcribe { public let inputType: InputType? /// Rules define a Call Analytics category. When creating a new category, you must create between 1 and 20 rules for that category. For each rule, you specify a filter you want applied to the attributes of a call. For example, you can choose a sentiment filter that detects if a customer's sentiment was positive during the last 30 seconds of the call. public let rules: [Rule] + /// Adds one or more custom tags, each in the form of a key:value pair, to a new call analytics category at the time you start this new job. To learn more about using tags with Amazon Transcribe, refer to Tagging resources. + public let tags: [Tag]? @inlinable - public init(categoryName: String, inputType: InputType? = nil, rules: [Rule]) { + public init(categoryName: String, inputType: InputType? = nil, rules: [Rule], tags: [Tag]? = nil) { self.categoryName = categoryName self.inputType = inputType self.rules = rules + self.tags = tags } public func encode(to encoder: Encoder) throws { @@ -735,6 +746,7 @@ extension Transcribe { request.encodePath(self.categoryName, key: "CategoryName") try container.encodeIfPresent(self.inputType, forKey: .inputType) try container.encode(self.rules, forKey: .rules) + try container.encodeIfPresent(self.tags, forKey: .tags) } public func validate(name: String) throws { @@ -746,11 +758,17 @@ extension Transcribe { } try self.validate(self.rules, name: "rules", parent: name, max: 20) try self.validate(self.rules, name: "rules", parent: name, min: 1) + try self.tags?.forEach { + try $0.validate(name: "\(name).tags[]") + } + try self.validate(self.tags, name: "tags", parent: name, max: 200) + try self.validate(self.tags, name: "tags", parent: name, min: 1) } private enum CodingKeys: String, CodingKey { case inputType = "InputType" case rules = "Rules" + case tags = "Tags" } } @@ -3088,9 +3106,11 @@ extension Transcribe { public let outputLocation: String? /// Specify additional optional settings in your request, including content redaction; allows you to apply custom language models, vocabulary filters, and custom vocabularies to your Call Analytics job. public let settings: CallAnalyticsJobSettings? + /// Adds one or more custom tags, each in the form of a key:value pair, to a new call analytics job at the time you start this new job. To learn more about using tags with Amazon Transcribe, refer to Tagging resources. + public let tags: [Tag]? @inlinable - public init(callAnalyticsJobName: String, channelDefinitions: [ChannelDefinition]? = nil, dataAccessRoleArn: String? = nil, media: Media, outputEncryptionKMSKeyId: String? = nil, outputLocation: String? = nil, settings: CallAnalyticsJobSettings? = nil) { + public init(callAnalyticsJobName: String, channelDefinitions: [ChannelDefinition]? = nil, dataAccessRoleArn: String? = nil, media: Media, outputEncryptionKMSKeyId: String? = nil, outputLocation: String? = nil, settings: CallAnalyticsJobSettings? = nil, tags: [Tag]? = nil) { self.callAnalyticsJobName = callAnalyticsJobName self.channelDefinitions = channelDefinitions self.dataAccessRoleArn = dataAccessRoleArn @@ -3098,6 +3118,7 @@ extension Transcribe { self.outputEncryptionKMSKeyId = outputEncryptionKMSKeyId self.outputLocation = outputLocation self.settings = settings + self.tags = tags } public func encode(to encoder: Encoder) throws { @@ -3110,6 +3131,7 @@ extension Transcribe { try container.encodeIfPresent(self.outputEncryptionKMSKeyId, forKey: .outputEncryptionKMSKeyId) try container.encodeIfPresent(self.outputLocation, forKey: .outputLocation) try container.encodeIfPresent(self.settings, forKey: .settings) + try container.encodeIfPresent(self.tags, forKey: .tags) } public func validate(name: String) throws { @@ -3132,6 +3154,11 @@ extension Transcribe { try self.validate(self.outputLocation, name: "outputLocation", parent: name, min: 1) try self.validate(self.outputLocation, name: "outputLocation", parent: name, pattern: "^(s3://|http(s*)://).+$") try self.settings?.validate(name: "\(name).settings") + try self.tags?.forEach { + try $0.validate(name: "\(name).tags[]") + } + try self.validate(self.tags, name: "tags", parent: name, max: 200) + try self.validate(self.tags, name: "tags", parent: name, min: 1) } private enum CodingKeys: String, CodingKey { @@ -3141,6 +3168,7 @@ extension Transcribe { case outputEncryptionKMSKeyId = "OutputEncryptionKMSKeyId" case outputLocation = "OutputLocation" case settings = "Settings" + case tags = "Tags" } } diff --git a/Sources/Soto/Services/Transfer/Transfer_api.swift b/Sources/Soto/Services/Transfer/Transfer_api.swift index 2fb1c33cd4..b66c556e20 100644 --- a/Sources/Soto/Services/Transfer/Transfer_api.swift +++ b/Sources/Soto/Services/Transfer/Transfer_api.swift @@ -144,7 +144,7 @@ public struct Transfer: AWSService { return try await self.createAccess(input, logger: logger) } - /// Creates an agreement. An agreement is a bilateral trading partner agreement, or partnership, between an Transfer Family server and an AS2 process. The agreement defines the file and message transfer relationship between the server and the AS2 process. To define an agreement, Transfer Family combines a server, local profile, partner profile, certificate, and other attributes. The partner is identified with the PartnerProfileId, and the AS2 process is identified with the LocalProfileId. + /// Creates an agreement. An agreement is a bilateral trading partner agreement, or partnership, between an Transfer Family server and an AS2 process. The agreement defines the file and message transfer relationship between the server and the AS2 process. To define an agreement, Transfer Family combines a server, local profile, partner profile, certificate, and other attributes. The partner is identified with the PartnerProfileId, and the AS2 process is identified with the LocalProfileId. Specify either BaseDirectory or CustomDirectories, but not both. Specifying both causes the command to fail. @Sendable @inlinable public func createAgreement(_ input: CreateAgreementRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateAgreementResponse { @@ -157,14 +157,17 @@ public struct Transfer: AWSService { logger: logger ) } - /// Creates an agreement. An agreement is a bilateral trading partner agreement, or partnership, between an Transfer Family server and an AS2 process. The agreement defines the file and message transfer relationship between the server and the AS2 process. To define an agreement, Transfer Family combines a server, local profile, partner profile, certificate, and other attributes. The partner is identified with the PartnerProfileId, and the AS2 process is identified with the LocalProfileId. + /// Creates an agreement. An agreement is a bilateral trading partner agreement, or partnership, between an Transfer Family server and an AS2 process. The agreement defines the file and message transfer relationship between the server and the AS2 process. To define an agreement, Transfer Family combines a server, local profile, partner profile, certificate, and other attributes. The partner is identified with the PartnerProfileId, and the AS2 process is identified with the LocalProfileId. Specify either BaseDirectory or CustomDirectories, but not both. Specifying both causes the command to fail. /// /// Parameters: /// - accessRole: Connectors are used to send files using either the AS2 or SFTP protocol. For the access role, provide the Amazon Resource Name (ARN) of the Identity and Access Management role to use. For AS2 connectors With AS2, you can send files by calling StartFileTransfer and specifying the file paths in the request parameter, SendFilePaths. We use the file’s parent directory (for example, for --send-file-paths /bucket/dir/file.txt, parent directory is /bucket/dir/) to temporarily store a processed AS2 message file, store the MDN when we receive them from the partner, and write a final JSON file containing relevant metadata of the transmission. So, the AccessRole needs to provide read and write access to the parent directory of the file location used in the StartFileTransfer request. Additionally, you need to provide read and write access to the parent directory of the files that you intend to send with StartFileTransfer. If you are using Basic authentication for your AS2 connector, the access role requires the secretsmanager:GetSecretValue permission for the secret. If the secret is encrypted using a customer-managed key instead of the Amazon Web Services managed key in Secrets Manager, then the role also needs the kms:Decrypt permission for that key. For SFTP connectors Make sure that the access role provides read and write access to the parent directory of the file location that's used in the StartFileTransfer request. Additionally, make sure that the role provides secretsmanager:GetSecretValue permission to Secrets Manager. /// - baseDirectory: The landing directory (folder) for files transferred by using the AS2 protocol. A BaseDirectory example is /amzn-s3-demo-bucket/home/mydirectory. + /// - customDirectories: A CustomDirectoriesType structure. This structure specifies custom directories for storing various AS2 message files. You can specify directories for the following types of files. Failed files MDN files Payload files Status files Temporary files /// - description: A name or short description to identify the agreement. + /// - enforceMessageSigning: Determines whether or not unsigned messages from your trading partners will be accepted. ENABLED: Transfer Family rejects unsigned messages from your trading partner. DISABLED (default value): Transfer Family accepts unsigned messages from your trading partner. /// - localProfileId: A unique identifier for the AS2 local profile. /// - partnerProfileId: A unique identifier for the partner profile used in the agreement. + /// - preserveFilename: Determines whether or not Transfer Family appends a unique string of characters to the end of the AS2 message payload filename when saving it. ENABLED: the filename provided by your trading parter is preserved when the file is saved. DISABLED (default value): when Transfer Family saves the file, the filename is adjusted, as described in File names and locations. /// - serverId: A system-assigned unique identifier for a server instance. This is the specific server that the agreement uses. /// - status: The status of the agreement. The agreement can be either ACTIVE or INACTIVE. /// - tags: Key-value pairs that can be used to group and search for agreements. @@ -172,10 +175,13 @@ public struct Transfer: AWSService { @inlinable public func createAgreement( accessRole: String, - baseDirectory: String, + baseDirectory: String? = nil, + customDirectories: CustomDirectoriesType? = nil, description: String? = nil, + enforceMessageSigning: EnforceMessageSigningType? = nil, localProfileId: String, partnerProfileId: String, + preserveFilename: PreserveFilenameType? = nil, serverId: String, status: AgreementStatusType? = nil, tags: [Tag]? = nil, @@ -184,9 +190,12 @@ public struct Transfer: AWSService { let input = CreateAgreementRequest( accessRole: accessRole, baseDirectory: baseDirectory, + customDirectories: customDirectories, description: description, + enforceMessageSigning: enforceMessageSigning, localProfileId: localProfileId, partnerProfileId: partnerProfileId, + preserveFilename: preserveFilename, serverId: serverId, status: status, tags: tags @@ -2203,7 +2212,7 @@ public struct Transfer: AWSService { return try await self.updateAccess(input, logger: logger) } - /// Updates some of the parameters for an existing agreement. Provide the AgreementId and the ServerId for the agreement that you want to update, along with the new values for the parameters to update. + /// Updates some of the parameters for an existing agreement. Provide the AgreementId and the ServerId for the agreement that you want to update, along with the new values for the parameters to update. Specify either BaseDirectory or CustomDirectories, but not both. Specifying both causes the command to fail. If you update an agreement from using base directory to custom directories, the base directory is no longer used. Similarly, if you change from custom directories to a base directory, the custom directories are no longer used. @Sendable @inlinable public func updateAgreement(_ input: UpdateAgreementRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateAgreementResponse { @@ -2216,15 +2225,18 @@ public struct Transfer: AWSService { logger: logger ) } - /// Updates some of the parameters for an existing agreement. Provide the AgreementId and the ServerId for the agreement that you want to update, along with the new values for the parameters to update. + /// Updates some of the parameters for an existing agreement. Provide the AgreementId and the ServerId for the agreement that you want to update, along with the new values for the parameters to update. Specify either BaseDirectory or CustomDirectories, but not both. Specifying both causes the command to fail. If you update an agreement from using base directory to custom directories, the base directory is no longer used. Similarly, if you change from custom directories to a base directory, the custom directories are no longer used. /// /// Parameters: /// - accessRole: Connectors are used to send files using either the AS2 or SFTP protocol. For the access role, provide the Amazon Resource Name (ARN) of the Identity and Access Management role to use. For AS2 connectors With AS2, you can send files by calling StartFileTransfer and specifying the file paths in the request parameter, SendFilePaths. We use the file’s parent directory (for example, for --send-file-paths /bucket/dir/file.txt, parent directory is /bucket/dir/) to temporarily store a processed AS2 message file, store the MDN when we receive them from the partner, and write a final JSON file containing relevant metadata of the transmission. So, the AccessRole needs to provide read and write access to the parent directory of the file location used in the StartFileTransfer request. Additionally, you need to provide read and write access to the parent directory of the files that you intend to send with StartFileTransfer. If you are using Basic authentication for your AS2 connector, the access role requires the secretsmanager:GetSecretValue permission for the secret. If the secret is encrypted using a customer-managed key instead of the Amazon Web Services managed key in Secrets Manager, then the role also needs the kms:Decrypt permission for that key. For SFTP connectors Make sure that the access role provides read and write access to the parent directory of the file location that's used in the StartFileTransfer request. Additionally, make sure that the role provides secretsmanager:GetSecretValue permission to Secrets Manager. /// - agreementId: A unique identifier for the agreement. This identifier is returned when you create an agreement. /// - baseDirectory: To change the landing directory (folder) for files that are transferred, provide the bucket folder that you want to use; for example, /amzn-s3-demo-bucket/home/mydirectory . + /// - customDirectories: A CustomDirectoriesType structure. This structure specifies custom directories for storing various AS2 message files. You can specify directories for the following types of files. Failed files MDN files Payload files Status files Temporary files /// - description: To replace the existing description, provide a short description for the agreement. + /// - enforceMessageSigning: Determines whether or not unsigned messages from your trading partners will be accepted. ENABLED: Transfer Family rejects unsigned messages from your trading partner. DISABLED (default value): Transfer Family accepts unsigned messages from your trading partner. /// - localProfileId: A unique identifier for the AS2 local profile. To change the local profile identifier, provide a new value here. /// - partnerProfileId: A unique identifier for the partner profile. To change the partner profile identifier, provide a new value here. + /// - preserveFilename: Determines whether or not Transfer Family appends a unique string of characters to the end of the AS2 message payload filename when saving it. ENABLED: the filename provided by your trading parter is preserved when the file is saved. DISABLED (default value): when Transfer Family saves the file, the filename is adjusted, as described in File names and locations. /// - serverId: A system-assigned unique identifier for a server instance. This is the specific server that the agreement uses. /// - status: You can update the status for the agreement, either activating an inactive agreement or the reverse. /// - logger: Logger use during operation @@ -2233,9 +2245,12 @@ public struct Transfer: AWSService { accessRole: String? = nil, agreementId: String, baseDirectory: String? = nil, + customDirectories: CustomDirectoriesType? = nil, description: String? = nil, + enforceMessageSigning: EnforceMessageSigningType? = nil, localProfileId: String? = nil, partnerProfileId: String? = nil, + preserveFilename: PreserveFilenameType? = nil, serverId: String, status: AgreementStatusType? = nil, logger: Logger = AWSClient.loggingDisabled @@ -2244,9 +2259,12 @@ public struct Transfer: AWSService { accessRole: accessRole, agreementId: agreementId, baseDirectory: baseDirectory, + customDirectories: customDirectories, description: description, + enforceMessageSigning: enforceMessageSigning, localProfileId: localProfileId, partnerProfileId: partnerProfileId, + preserveFilename: preserveFilename, serverId: serverId, status: status ) diff --git a/Sources/Soto/Services/Transfer/Transfer_shapes.swift b/Sources/Soto/Services/Transfer/Transfer_shapes.swift index 384dc042a4..0a7bef9eb3 100644 --- a/Sources/Soto/Services/Transfer/Transfer_shapes.swift +++ b/Sources/Soto/Services/Transfer/Transfer_shapes.swift @@ -102,6 +102,12 @@ extension Transfer { public var description: String { return self.rawValue } } + public enum EnforceMessageSigningType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case disabled = "DISABLED" + case enabled = "ENABLED" + public var description: String { return self.rawValue } + } + public enum ExecutionErrorType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case alreadyExists = "ALREADY_EXISTS" case badRequest = "BAD_REQUEST" @@ -164,6 +170,18 @@ extension Transfer { public var description: String { return self.rawValue } } + public enum PreserveContentType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case disabled = "DISABLED" + case enabled = "ENABLED" + public var description: String { return self.rawValue } + } + + public enum PreserveFilenameType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case disabled = "DISABLED" + case enabled = "ENABLED" + public var description: String { return self.rawValue } + } + public enum ProfileType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case local = "LOCAL" case partner = "PARTNER" @@ -266,11 +284,13 @@ extension Transfer { public let messageSubject: String? /// A unique identifier for the partner profile for the connector. public let partnerProfileId: String? + /// Allows you to use the Amazon S3 Content-Type that is associated with objects in S3 instead of having the content type mapped based on the file extension. This parameter is enabled by default when you create an AS2 connector from the console, but disabled by default when you create an AS2 connector by calling the API directly. + public let preserveContentType: PreserveContentType? /// The algorithm that is used to sign the AS2 messages sent with the connector. public let signingAlgorithm: SigningAlg? @inlinable - public init(basicAuthSecretId: String? = nil, compression: CompressionEnum? = nil, encryptionAlgorithm: EncryptionAlg? = nil, localProfileId: String? = nil, mdnResponse: MdnResponse? = nil, mdnSigningAlgorithm: MdnSigningAlg? = nil, messageSubject: String? = nil, partnerProfileId: String? = nil, signingAlgorithm: SigningAlg? = nil) { + public init(basicAuthSecretId: String? = nil, compression: CompressionEnum? = nil, encryptionAlgorithm: EncryptionAlg? = nil, localProfileId: String? = nil, mdnResponse: MdnResponse? = nil, mdnSigningAlgorithm: MdnSigningAlg? = nil, messageSubject: String? = nil, partnerProfileId: String? = nil, preserveContentType: PreserveContentType? = nil, signingAlgorithm: SigningAlg? = nil) { self.basicAuthSecretId = basicAuthSecretId self.compression = compression self.encryptionAlgorithm = encryptionAlgorithm @@ -279,6 +299,7 @@ extension Transfer { self.mdnSigningAlgorithm = mdnSigningAlgorithm self.messageSubject = messageSubject self.partnerProfileId = partnerProfileId + self.preserveContentType = preserveContentType self.signingAlgorithm = signingAlgorithm } @@ -304,6 +325,7 @@ extension Transfer { case mdnSigningAlgorithm = "MdnSigningAlgorithm" case messageSubject = "MessageSubject" case partnerProfileId = "PartnerProfileId" + case preserveContentType = "PreserveContentType" case signingAlgorithm = "SigningAlgorithm" } } @@ -452,13 +474,19 @@ extension Transfer { /// Connectors are used to send files using either the AS2 or SFTP protocol. For the access role, provide the Amazon Resource Name (ARN) of the Identity and Access Management role to use. For AS2 connectors With AS2, you can send files by calling StartFileTransfer and specifying the file paths in the request parameter, SendFilePaths. We use the file’s parent directory (for example, for --send-file-paths /bucket/dir/file.txt, parent directory is /bucket/dir/) to temporarily store a processed AS2 message file, store the MDN when we receive them from the partner, and write a final JSON file containing relevant metadata of the transmission. So, the AccessRole needs to provide read and write access to the parent directory of the file location used in the StartFileTransfer request. Additionally, you need to provide read and write access to the parent directory of the files that you intend to send with StartFileTransfer. If you are using Basic authentication for your AS2 connector, the access role requires the secretsmanager:GetSecretValue permission for the secret. If the secret is encrypted using a customer-managed key instead of the Amazon Web Services managed key in Secrets Manager, then the role also needs the kms:Decrypt permission for that key. For SFTP connectors Make sure that the access role provides read and write access to the parent directory of the file location that's used in the StartFileTransfer request. Additionally, make sure that the role provides secretsmanager:GetSecretValue permission to Secrets Manager. public let accessRole: String /// The landing directory (folder) for files transferred by using the AS2 protocol. A BaseDirectory example is /amzn-s3-demo-bucket/home/mydirectory. - public let baseDirectory: String + public let baseDirectory: String? + /// A CustomDirectoriesType structure. This structure specifies custom directories for storing various AS2 message files. You can specify directories for the following types of files. Failed files MDN files Payload files Status files Temporary files + public let customDirectories: CustomDirectoriesType? /// A name or short description to identify the agreement. public let description: String? + /// Determines whether or not unsigned messages from your trading partners will be accepted. ENABLED: Transfer Family rejects unsigned messages from your trading partner. DISABLED (default value): Transfer Family accepts unsigned messages from your trading partner. + public let enforceMessageSigning: EnforceMessageSigningType? /// A unique identifier for the AS2 local profile. public let localProfileId: String /// A unique identifier for the partner profile used in the agreement. public let partnerProfileId: String + /// Determines whether or not Transfer Family appends a unique string of characters to the end of the AS2 message payload filename when saving it. ENABLED: the filename provided by your trading parter is preserved when the file is saved. DISABLED (default value): when Transfer Family saves the file, the filename is adjusted, as described in File names and locations. + public let preserveFilename: PreserveFilenameType? /// A system-assigned unique identifier for a server instance. This is the specific server that the agreement uses. public let serverId: String /// The status of the agreement. The agreement can be either ACTIVE or INACTIVE. @@ -467,12 +495,15 @@ extension Transfer { public let tags: [Tag]? @inlinable - public init(accessRole: String, baseDirectory: String, description: String? = nil, localProfileId: String, partnerProfileId: String, serverId: String, status: AgreementStatusType? = nil, tags: [Tag]? = nil) { + public init(accessRole: String, baseDirectory: String? = nil, customDirectories: CustomDirectoriesType? = nil, description: String? = nil, enforceMessageSigning: EnforceMessageSigningType? = nil, localProfileId: String, partnerProfileId: String, preserveFilename: PreserveFilenameType? = nil, serverId: String, status: AgreementStatusType? = nil, tags: [Tag]? = nil) { self.accessRole = accessRole self.baseDirectory = baseDirectory + self.customDirectories = customDirectories self.description = description + self.enforceMessageSigning = enforceMessageSigning self.localProfileId = localProfileId self.partnerProfileId = partnerProfileId + self.preserveFilename = preserveFilename self.serverId = serverId self.status = status self.tags = tags @@ -484,6 +515,7 @@ extension Transfer { try self.validate(self.accessRole, name: "accessRole", parent: name, pattern: "^arn:.*role/\\S+$") try self.validate(self.baseDirectory, name: "baseDirectory", parent: name, max: 1024) try self.validate(self.baseDirectory, name: "baseDirectory", parent: name, pattern: "^(|/.*)$") + try self.customDirectories?.validate(name: "\(name).customDirectories") try self.validate(self.description, name: "description", parent: name, max: 200) try self.validate(self.description, name: "description", parent: name, min: 1) try self.validate(self.description, name: "description", parent: name, pattern: "^[\\p{Graph}]+$") @@ -506,9 +538,12 @@ extension Transfer { private enum CodingKeys: String, CodingKey { case accessRole = "AccessRole" case baseDirectory = "BaseDirectory" + case customDirectories = "CustomDirectories" case description = "Description" + case enforceMessageSigning = "EnforceMessageSigning" case localProfileId = "LocalProfileId" case partnerProfileId = "PartnerProfileId" + case preserveFilename = "PreserveFilename" case serverId = "ServerId" case status = "Status" case tags = "Tags" @@ -984,6 +1019,49 @@ extension Transfer { } } + public struct CustomDirectoriesType: AWSEncodableShape & AWSDecodableShape { + /// Specifies a location to store failed AS2 message files. + public let failedFilesDirectory: String + /// Specifies a location to store MDN files. + public let mdnFilesDirectory: String + /// Specifies a location to store the payload for AS2 message files. + public let payloadFilesDirectory: String + /// Specifies a location to store AS2 status messages. + public let statusFilesDirectory: String + /// Specifies a location to store temporary AS2 message files. + public let temporaryFilesDirectory: String + + @inlinable + public init(failedFilesDirectory: String, mdnFilesDirectory: String, payloadFilesDirectory: String, statusFilesDirectory: String, temporaryFilesDirectory: String) { + self.failedFilesDirectory = failedFilesDirectory + self.mdnFilesDirectory = mdnFilesDirectory + self.payloadFilesDirectory = payloadFilesDirectory + self.statusFilesDirectory = statusFilesDirectory + self.temporaryFilesDirectory = temporaryFilesDirectory + } + + public func validate(name: String) throws { + try self.validate(self.failedFilesDirectory, name: "failedFilesDirectory", parent: name, max: 1024) + try self.validate(self.failedFilesDirectory, name: "failedFilesDirectory", parent: name, pattern: "^(|/.*)$") + try self.validate(self.mdnFilesDirectory, name: "mdnFilesDirectory", parent: name, max: 1024) + try self.validate(self.mdnFilesDirectory, name: "mdnFilesDirectory", parent: name, pattern: "^(|/.*)$") + try self.validate(self.payloadFilesDirectory, name: "payloadFilesDirectory", parent: name, max: 1024) + try self.validate(self.payloadFilesDirectory, name: "payloadFilesDirectory", parent: name, pattern: "^(|/.*)$") + try self.validate(self.statusFilesDirectory, name: "statusFilesDirectory", parent: name, max: 1024) + try self.validate(self.statusFilesDirectory, name: "statusFilesDirectory", parent: name, pattern: "^(|/.*)$") + try self.validate(self.temporaryFilesDirectory, name: "temporaryFilesDirectory", parent: name, max: 1024) + try self.validate(self.temporaryFilesDirectory, name: "temporaryFilesDirectory", parent: name, pattern: "^(|/.*)$") + } + + private enum CodingKeys: String, CodingKey { + case failedFilesDirectory = "FailedFilesDirectory" + case mdnFilesDirectory = "MdnFilesDirectory" + case payloadFilesDirectory = "PayloadFilesDirectory" + case statusFilesDirectory = "StatusFilesDirectory" + case temporaryFilesDirectory = "TemporaryFilesDirectory" + } + } + public struct CustomStepDetails: AWSEncodableShape & AWSDecodableShape { /// The name of the step, used as an identifier. public let name: String? @@ -1900,12 +1978,18 @@ extension Transfer { public let arn: String /// The landing directory (folder) for files that are transferred by using the AS2 protocol. public let baseDirectory: String? + /// A CustomDirectoriesType structure. This structure specifies custom directories for storing various AS2 message files. You can specify directories for the following types of files. Failed files MDN files Payload files Status files Temporary files + public let customDirectories: CustomDirectoriesType? /// The name or short description that's used to identify the agreement. public let description: String? + /// Determines whether or not unsigned messages from your trading partners will be accepted. ENABLED: Transfer Family rejects unsigned messages from your trading partner. DISABLED (default value): Transfer Family accepts unsigned messages from your trading partner. + public let enforceMessageSigning: EnforceMessageSigningType? /// A unique identifier for the AS2 local profile. public let localProfileId: String? /// A unique identifier for the partner profile used in the agreement. public let partnerProfileId: String? + /// Determines whether or not Transfer Family appends a unique string of characters to the end of the AS2 message payload filename when saving it. ENABLED: the filename provided by your trading parter is preserved when the file is saved. DISABLED (default value): when Transfer Family saves the file, the filename is adjusted, as described in File names and locations. + public let preserveFilename: PreserveFilenameType? /// A system-assigned unique identifier for a server instance. This identifier indicates the specific server that the agreement uses. public let serverId: String? /// The current status of the agreement, either ACTIVE or INACTIVE. @@ -1914,14 +1998,17 @@ extension Transfer { public let tags: [Tag]? @inlinable - public init(accessRole: String? = nil, agreementId: String? = nil, arn: String, baseDirectory: String? = nil, description: String? = nil, localProfileId: String? = nil, partnerProfileId: String? = nil, serverId: String? = nil, status: AgreementStatusType? = nil, tags: [Tag]? = nil) { + public init(accessRole: String? = nil, agreementId: String? = nil, arn: String, baseDirectory: String? = nil, customDirectories: CustomDirectoriesType? = nil, description: String? = nil, enforceMessageSigning: EnforceMessageSigningType? = nil, localProfileId: String? = nil, partnerProfileId: String? = nil, preserveFilename: PreserveFilenameType? = nil, serverId: String? = nil, status: AgreementStatusType? = nil, tags: [Tag]? = nil) { self.accessRole = accessRole self.agreementId = agreementId self.arn = arn self.baseDirectory = baseDirectory + self.customDirectories = customDirectories self.description = description + self.enforceMessageSigning = enforceMessageSigning self.localProfileId = localProfileId self.partnerProfileId = partnerProfileId + self.preserveFilename = preserveFilename self.serverId = serverId self.status = status self.tags = tags @@ -1932,9 +2019,12 @@ extension Transfer { case agreementId = "AgreementId" case arn = "Arn" case baseDirectory = "BaseDirectory" + case customDirectories = "CustomDirectories" case description = "Description" + case enforceMessageSigning = "EnforceMessageSigning" case localProfileId = "LocalProfileId" case partnerProfileId = "PartnerProfileId" + case preserveFilename = "PreserveFilename" case serverId = "ServerId" case status = "Status" case tags = "Tags" @@ -1962,7 +2052,7 @@ extension Transfer { public let notBeforeDate: Date? /// The serial number for the certificate. public let serial: String? - /// The certificate can be either ACTIVE, PENDING_ROTATION, or INACTIVE. PENDING_ROTATION means that this certificate will replace the current certificate when it expires. + /// Currently, the only available status is ACTIVE: all other values are reserved for future use. public let status: CertificateStatusType? /// Key-value pairs that can be used to group and search for certificates. public let tags: [Tag]? @@ -4725,25 +4815,34 @@ extension Transfer { public let agreementId: String /// To change the landing directory (folder) for files that are transferred, provide the bucket folder that you want to use; for example, /amzn-s3-demo-bucket/home/mydirectory . public let baseDirectory: String? + /// A CustomDirectoriesType structure. This structure specifies custom directories for storing various AS2 message files. You can specify directories for the following types of files. Failed files MDN files Payload files Status files Temporary files + public let customDirectories: CustomDirectoriesType? /// To replace the existing description, provide a short description for the agreement. public let description: String? + /// Determines whether or not unsigned messages from your trading partners will be accepted. ENABLED: Transfer Family rejects unsigned messages from your trading partner. DISABLED (default value): Transfer Family accepts unsigned messages from your trading partner. + public let enforceMessageSigning: EnforceMessageSigningType? /// A unique identifier for the AS2 local profile. To change the local profile identifier, provide a new value here. public let localProfileId: String? /// A unique identifier for the partner profile. To change the partner profile identifier, provide a new value here. public let partnerProfileId: String? + /// Determines whether or not Transfer Family appends a unique string of characters to the end of the AS2 message payload filename when saving it. ENABLED: the filename provided by your trading parter is preserved when the file is saved. DISABLED (default value): when Transfer Family saves the file, the filename is adjusted, as described in File names and locations. + public let preserveFilename: PreserveFilenameType? /// A system-assigned unique identifier for a server instance. This is the specific server that the agreement uses. public let serverId: String /// You can update the status for the agreement, either activating an inactive agreement or the reverse. public let status: AgreementStatusType? @inlinable - public init(accessRole: String? = nil, agreementId: String, baseDirectory: String? = nil, description: String? = nil, localProfileId: String? = nil, partnerProfileId: String? = nil, serverId: String, status: AgreementStatusType? = nil) { + public init(accessRole: String? = nil, agreementId: String, baseDirectory: String? = nil, customDirectories: CustomDirectoriesType? = nil, description: String? = nil, enforceMessageSigning: EnforceMessageSigningType? = nil, localProfileId: String? = nil, partnerProfileId: String? = nil, preserveFilename: PreserveFilenameType? = nil, serverId: String, status: AgreementStatusType? = nil) { self.accessRole = accessRole self.agreementId = agreementId self.baseDirectory = baseDirectory + self.customDirectories = customDirectories self.description = description + self.enforceMessageSigning = enforceMessageSigning self.localProfileId = localProfileId self.partnerProfileId = partnerProfileId + self.preserveFilename = preserveFilename self.serverId = serverId self.status = status } @@ -4757,6 +4856,7 @@ extension Transfer { try self.validate(self.agreementId, name: "agreementId", parent: name, pattern: "^a-([0-9a-f]{17})$") try self.validate(self.baseDirectory, name: "baseDirectory", parent: name, max: 1024) try self.validate(self.baseDirectory, name: "baseDirectory", parent: name, pattern: "^(|/.*)$") + try self.customDirectories?.validate(name: "\(name).customDirectories") try self.validate(self.description, name: "description", parent: name, max: 200) try self.validate(self.description, name: "description", parent: name, min: 1) try self.validate(self.description, name: "description", parent: name, pattern: "^[\\p{Graph}]+$") @@ -4775,9 +4875,12 @@ extension Transfer { case accessRole = "AccessRole" case agreementId = "AgreementId" case baseDirectory = "BaseDirectory" + case customDirectories = "CustomDirectories" case description = "Description" + case enforceMessageSigning = "EnforceMessageSigning" case localProfileId = "LocalProfileId" case partnerProfileId = "PartnerProfileId" + case preserveFilename = "PreserveFilename" case serverId = "ServerId" case status = "Status" } diff --git a/Sources/Soto/Services/WorkSpaces/WorkSpaces_shapes.swift b/Sources/Soto/Services/WorkSpaces/WorkSpaces_shapes.swift index 9130888b89..2b41074928 100644 --- a/Sources/Soto/Services/WorkSpaces/WorkSpaces_shapes.swift +++ b/Sources/Soto/Services/WorkSpaces/WorkSpaces_shapes.swift @@ -26,6 +26,32 @@ import Foundation extension WorkSpaces { // MARK: Enums + public enum AGAModeForDirectoryEnum: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case disabled = "DISABLED" + case enabledAuto = "ENABLED_AUTO" + public var description: String { return self.rawValue } + } + + public enum AGAModeForWorkSpaceEnum: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case disabled = "DISABLED" + case enabledAuto = "ENABLED_AUTO" + case inherited = "INHERITED" + public var description: String { return self.rawValue } + } + + public enum AGAPreferredProtocolForDirectory: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case none = "NONE" + case tcp = "TCP" + public var description: String { return self.rawValue } + } + + public enum AGAPreferredProtocolForWorkSpace: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case inherited = "INHERITED" + case none = "NONE" + case tcp = "TCP" + public var description: String { return self.rawValue } + } + public enum AccessPropertyValue: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case allow = "ALLOW" case deny = "DENY" @@ -124,6 +150,8 @@ extension WorkSpaces { } public enum Compute: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case generalpurpose4Xlarge = "GENERALPURPOSE_4XLARGE" + case generalpurpose8Xlarge = "GENERALPURPOSE_8XLARGE" case graphics = "GRAPHICS" case graphicsG4Dn = "GRAPHICS_G4DN" case graphicspro = "GRAPHICSPRO" @@ -3596,6 +3624,42 @@ extension WorkSpaces { } } + public struct GlobalAcceleratorForDirectory: AWSEncodableShape & AWSDecodableShape { + /// Indicates if Global Accelerator for directory is enabled or disabled. + public let mode: AGAModeForDirectoryEnum + /// Indicates the preferred protocol for Global Accelerator. + public let preferredProtocol: AGAPreferredProtocolForDirectory? + + @inlinable + public init(mode: AGAModeForDirectoryEnum, preferredProtocol: AGAPreferredProtocolForDirectory? = nil) { + self.mode = mode + self.preferredProtocol = preferredProtocol + } + + private enum CodingKeys: String, CodingKey { + case mode = "Mode" + case preferredProtocol = "PreferredProtocol" + } + } + + public struct GlobalAcceleratorForWorkSpace: AWSEncodableShape & AWSDecodableShape { + /// Indicates if Global Accelerator for WorkSpaces is enabled, disabled, or the same mode as the associated directory. + public let mode: AGAModeForWorkSpaceEnum + /// Indicates the preferred protocol for Global Accelerator. + public let preferredProtocol: AGAPreferredProtocolForWorkSpace? + + @inlinable + public init(mode: AGAModeForWorkSpaceEnum, preferredProtocol: AGAPreferredProtocolForWorkSpace? = nil) { + self.mode = mode + self.preferredProtocol = preferredProtocol + } + + private enum CodingKeys: String, CodingKey { + case mode = "Mode" + case preferredProtocol = "PreferredProtocol" + } + } + public struct IDCConfig: AWSDecodableShape { /// The Amazon Resource Name (ARN) of the application. public let applicationArn: String? @@ -5098,6 +5162,8 @@ extension WorkSpaces { } public struct StreamingProperties: AWSEncodableShape & AWSDecodableShape { + /// Indicates the Global Accelerator properties. + public let globalAccelerator: GlobalAcceleratorForDirectory? /// Indicates the storage connector used public let storageConnectors: [StorageConnector]? /// Indicates the type of preferred protocol for the streaming experience. @@ -5106,7 +5172,8 @@ extension WorkSpaces { public let userSettings: [UserSetting]? @inlinable - public init(storageConnectors: [StorageConnector]? = nil, streamingExperiencePreferredProtocol: StreamingExperiencePreferredProtocolEnum? = nil, userSettings: [UserSetting]? = nil) { + public init(globalAccelerator: GlobalAcceleratorForDirectory? = nil, storageConnectors: [StorageConnector]? = nil, streamingExperiencePreferredProtocol: StreamingExperiencePreferredProtocolEnum? = nil, userSettings: [UserSetting]? = nil) { + self.globalAccelerator = globalAccelerator self.storageConnectors = storageConnectors self.streamingExperiencePreferredProtocol = streamingExperiencePreferredProtocol self.userSettings = userSettings @@ -5121,6 +5188,7 @@ extension WorkSpaces { } private enum CodingKeys: String, CodingKey { + case globalAccelerator = "GlobalAccelerator" case storageConnectors = "StorageConnectors" case streamingExperiencePreferredProtocol = "StreamingExperiencePreferredProtocol" case userSettings = "UserSettings" @@ -6064,6 +6132,8 @@ extension WorkSpaces { public struct WorkspaceProperties: AWSEncodableShape & AWSDecodableShape { /// The compute type. For more information, see Amazon WorkSpaces Bundles. public let computeTypeName: Compute? + /// Indicates the Global Accelerator properties. + public let globalAccelerator: GlobalAcceleratorForWorkSpace? /// The name of the operating system. public let operatingSystemName: OperatingSystemName? /// The protocol. For more information, see Protocols for Amazon WorkSpaces. Only available for WorkSpaces created with PCoIP bundles. The Protocols property is case sensitive. Ensure you use PCOIP or DCV (formerly WSP). Unavailable for Windows 7 WorkSpaces and WorkSpaces using GPU-based bundles (Graphics, GraphicsPro, Graphics.g4dn, and GraphicsPro.g4dn). @@ -6078,8 +6148,9 @@ extension WorkSpaces { public let userVolumeSizeGib: Int? @inlinable - public init(computeTypeName: Compute? = nil, operatingSystemName: OperatingSystemName? = nil, protocols: [`Protocol`]? = nil, rootVolumeSizeGib: Int? = nil, runningMode: RunningMode? = nil, runningModeAutoStopTimeoutInMinutes: Int? = nil, userVolumeSizeGib: Int? = nil) { + public init(computeTypeName: Compute? = nil, globalAccelerator: GlobalAcceleratorForWorkSpace? = nil, operatingSystemName: OperatingSystemName? = nil, protocols: [`Protocol`]? = nil, rootVolumeSizeGib: Int? = nil, runningMode: RunningMode? = nil, runningModeAutoStopTimeoutInMinutes: Int? = nil, userVolumeSizeGib: Int? = nil) { self.computeTypeName = computeTypeName + self.globalAccelerator = globalAccelerator self.operatingSystemName = operatingSystemName self.protocols = protocols self.rootVolumeSizeGib = rootVolumeSizeGib @@ -6090,6 +6161,7 @@ extension WorkSpaces { private enum CodingKeys: String, CodingKey { case computeTypeName = "ComputeTypeName" + case globalAccelerator = "GlobalAccelerator" case operatingSystemName = "OperatingSystemName" case protocols = "Protocols" case rootVolumeSizeGib = "RootVolumeSizeGib" diff --git a/Sources/Soto/Services/WorkSpacesThinClient/WorkSpacesThinClient_api.swift b/Sources/Soto/Services/WorkSpacesThinClient/WorkSpacesThinClient_api.swift index e5b24bce77..13661eb903 100644 --- a/Sources/Soto/Services/WorkSpacesThinClient/WorkSpacesThinClient_api.swift +++ b/Sources/Soto/Services/WorkSpacesThinClient/WorkSpacesThinClient_api.swift @@ -98,7 +98,7 @@ public struct WorkSpacesThinClient: AWSService { /// Parameters: /// - clientToken: Specifies a unique, case-sensitive identifier that you provide to ensure the idempotency of the request. This lets you safely retry the request without accidentally performing the same operation a second time. Passing the same value to a later call to an operation requires that you also pass the same value for all other parameters. We recommend that you use a UUID type of value. If you don't provide this value, then Amazon Web Services generates a random one for you. If you retry the operation with the same ClientToken, but with different parameters, the retry fails with an IdempotentParameterMismatch error. /// - desiredSoftwareSetId: The ID of the software set to apply. - /// - desktopArn: The Amazon Resource Name (ARN) of the desktop to stream from Amazon WorkSpaces, WorkSpaces Web, or AppStream 2.0. + /// - desktopArn: The Amazon Resource Name (ARN) of the desktop to stream from Amazon WorkSpaces, WorkSpaces Secure Browser, or AppStream 2.0. /// - desktopEndpoint: The URL for the identity provider login (only for environments that use AppStream 2.0). /// - deviceCreationTags: A map of the key-value pairs of the tag or tags to assign to the newly created devices for this environment. /// - kmsKeyArn: The Amazon Resource Name (ARN) of the Key Management Service key to use to encrypt the environment. @@ -583,7 +583,7 @@ public struct WorkSpacesThinClient: AWSService { /// /// Parameters: /// - desiredSoftwareSetId: The ID of the software set to apply. - /// - desktopArn: The Amazon Resource Name (ARN) of the desktop to stream from Amazon WorkSpaces, WorkSpaces Web, or AppStream 2.0. + /// - desktopArn: The Amazon Resource Name (ARN) of the desktop to stream from Amazon WorkSpaces, WorkSpaces Secure Browser, or AppStream 2.0. /// - desktopEndpoint: The URL for the identity provider login (only for environments that use AppStream 2.0). /// - deviceCreationTags: A map of the key-value pairs of the tag or tags to assign to the newly created devices for this environment. /// - id: The ID of the environment to update. diff --git a/Sources/Soto/Services/WorkSpacesThinClient/WorkSpacesThinClient_shapes.swift b/Sources/Soto/Services/WorkSpacesThinClient/WorkSpacesThinClient_shapes.swift index 1f763dbef8..90a7b7fcc0 100644 --- a/Sources/Soto/Services/WorkSpacesThinClient/WorkSpacesThinClient_shapes.swift +++ b/Sources/Soto/Services/WorkSpacesThinClient/WorkSpacesThinClient_shapes.swift @@ -116,7 +116,7 @@ extension WorkSpacesThinClient { public let clientToken: String? /// The ID of the software set to apply. public let desiredSoftwareSetId: String? - /// The Amazon Resource Name (ARN) of the desktop to stream from Amazon WorkSpaces, WorkSpaces Web, or AppStream 2.0. + /// The Amazon Resource Name (ARN) of the desktop to stream from Amazon WorkSpaces, WorkSpaces Secure Browser, or AppStream 2.0. public let desktopArn: String /// The URL for the identity provider login (only for environments that use AppStream 2.0). public let desktopEndpoint: String? @@ -480,7 +480,7 @@ extension WorkSpacesThinClient { public let createdAt: Date? /// The ID of the software set to apply. public let desiredSoftwareSetId: String? - /// The Amazon Resource Name (ARN) of the desktop to stream from Amazon WorkSpaces, WorkSpaces Web, or AppStream 2.0. + /// The Amazon Resource Name (ARN) of the desktop to stream from Amazon WorkSpaces, WorkSpaces Secure Browser, or AppStream 2.0. public let desktopArn: String? /// The URL for the identity provider login (only for environments that use AppStream 2.0). public let desktopEndpoint: String? @@ -570,7 +570,7 @@ extension WorkSpacesThinClient { public let createdAt: Date? /// The ID of the software set to apply. public let desiredSoftwareSetId: String? - /// The Amazon Resource Name (ARN) of the desktop to stream from Amazon WorkSpaces, WorkSpaces Web, or AppStream 2.0. + /// The Amazon Resource Name (ARN) of the desktop to stream from Amazon WorkSpaces, WorkSpaces Secure Browser, or AppStream 2.0. public let desktopArn: String? /// The URL for the identity provider login (only for environments that use AppStream 2.0). public let desktopEndpoint: String? @@ -922,10 +922,10 @@ extension WorkSpacesThinClient { /// The minutes past the hour for the maintenance window start (00-59). public let startTimeMinute: Int? /// An option to select the default or custom maintenance window. - public let type: MaintenanceWindowType? + public let type: MaintenanceWindowType @inlinable - public init(applyTimeOf: ApplyTimeOf? = nil, daysOfTheWeek: [DayOfWeek]? = nil, endTimeHour: Int? = nil, endTimeMinute: Int? = nil, startTimeHour: Int? = nil, startTimeMinute: Int? = nil, type: MaintenanceWindowType? = nil) { + public init(applyTimeOf: ApplyTimeOf? = nil, daysOfTheWeek: [DayOfWeek]? = nil, endTimeHour: Int? = nil, endTimeMinute: Int? = nil, startTimeHour: Int? = nil, startTimeMinute: Int? = nil, type: MaintenanceWindowType) { self.applyTimeOf = applyTimeOf self.daysOfTheWeek = daysOfTheWeek self.endTimeHour = endTimeHour @@ -1164,7 +1164,7 @@ extension WorkSpacesThinClient { public struct UpdateEnvironmentRequest: AWSEncodableShape { /// The ID of the software set to apply. public let desiredSoftwareSetId: String? - /// The Amazon Resource Name (ARN) of the desktop to stream from Amazon WorkSpaces, WorkSpaces Web, or AppStream 2.0. + /// The Amazon Resource Name (ARN) of the desktop to stream from Amazon WorkSpaces, WorkSpaces Secure Browser, or AppStream 2.0. public let desktopArn: String? /// The URL for the identity provider login (only for environments that use AppStream 2.0). public let desktopEndpoint: String? diff --git a/models/amplify.json b/models/amplify.json index 45ba331ec0..61be8fba94 100644 --- a/models/amplify.json +++ b/models/amplify.json @@ -1100,14 +1100,14 @@ "createTime": { "target": "com.amazonaws.amplify#CreateTime", "traits": { - "smithy.api#documentation": "

Creates a date and time for the Amplify app.

", + "smithy.api#documentation": "

A timestamp of when Amplify created the application.

", "smithy.api#required": {} } }, "updateTime": { "target": "com.amazonaws.amplify#UpdateTime", "traits": { - "smithy.api#documentation": "

Updates the date and time for the Amplify app.

", + "smithy.api#documentation": "

A timestamp of when Amplify updated the application.

", "smithy.api#required": {} } }, @@ -1210,6 +1210,18 @@ "traits": { "smithy.api#documentation": "

The cache configuration for the Amplify app. If you don't specify the\n cache configuration type, Amplify uses the default\n AMPLIFY_MANAGED setting.

" } + }, + "webhookCreateTime": { + "target": "com.amazonaws.amplify#webhookCreateTime", + "traits": { + "smithy.api#documentation": "

A timestamp of when Amplify created the webhook in your Git repository.

" + } + }, + "wafConfiguration": { + "target": "com.amazonaws.amplify#WafConfiguration", + "traits": { + "smithy.api#documentation": "

Describes the Firewall configuration for the Amplify app. Firewall support enables you to protect your hosted applications with a direct integration\n with WAF.

" + } } }, "traits": { @@ -1587,14 +1599,14 @@ "createTime": { "target": "com.amazonaws.amplify#CreateTime", "traits": { - "smithy.api#documentation": "

The creation date and time for a branch that is part of an Amplify app.

", + "smithy.api#documentation": "

A timestamp of when Amplify created the branch.

", "smithy.api#required": {} } }, "updateTime": { "target": "com.amazonaws.amplify#UpdateTime", "traits": { - "smithy.api#documentation": "

The last updated date and time for a branch that is part of an Amplify app.

", + "smithy.api#documentation": "

A timestamp for the last updated time for a branch.

", "smithy.api#required": {} } }, @@ -4169,6 +4181,12 @@ "com.amazonaws.amplify#JobStatus": { "type": "enum", "members": { + "CREATED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "CREATED" + } + }, "PENDING": { "target": "smithy.api#Unit", "traits": { @@ -4253,7 +4271,7 @@ "commitTime": { "target": "com.amazonaws.amplify#CommitTime", "traits": { - "smithy.api#documentation": "

The commit date and time for the job.

", + "smithy.api#documentation": "

The commit date and time for the job.

", "smithy.api#required": {} } }, @@ -6686,6 +6704,77 @@ "com.amazonaws.amplify#Verified": { "type": "boolean" }, + "com.amazonaws.amplify#WafConfiguration": { + "type": "structure", + "members": { + "webAclArn": { + "target": "com.amazonaws.amplify#WebAclArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) for the web ACL associated with an Amplify app.

" + } + }, + "wafStatus": { + "target": "com.amazonaws.amplify#WafStatus", + "traits": { + "smithy.api#documentation": "

The status of the process to associate or disassociate a web ACL to an Amplify app.

" + } + }, + "statusReason": { + "target": "com.amazonaws.amplify#StatusReason", + "traits": { + "smithy.api#documentation": "

The reason for the current status of the Firewall configuration.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Describes the Firewall configuration for a hosted Amplify application.\n Firewall support enables you to protect your web applications with a direct integration\n with WAF. For more information about using WAF protections for an Amplify application, see\n Firewall support for hosted sites in the Amplify\n User Guide.

" + } + }, + "com.amazonaws.amplify#WafStatus": { + "type": "enum", + "members": { + "ASSOCIATING": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ASSOCIATING" + } + }, + "ASSOCIATION_FAILED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ASSOCIATION_FAILED" + } + }, + "ASSOCIATION_SUCCESS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ASSOCIATION_SUCCESS" + } + }, + "DISASSOCIATING": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DISASSOCIATING" + } + }, + "DISASSOCIATION_FAILED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DISASSOCIATION_FAILED" + } + } + } + }, + "com.amazonaws.amplify#WebAclArn": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 512 + }, + "smithy.api#pattern": "^arn:aws:wafv2:" + } + }, "com.amazonaws.amplify#Webhook": { "type": "structure", "members": { @@ -6727,14 +6816,14 @@ "createTime": { "target": "com.amazonaws.amplify#CreateTime", "traits": { - "smithy.api#documentation": "

The create date and time for a webhook.

", + "smithy.api#documentation": "

A timestamp of when Amplify created the webhook in your Git repository.

", "smithy.api#required": {} } }, "updateTime": { "target": "com.amazonaws.amplify#UpdateTime", "traits": { - "smithy.api#documentation": "

Updates the date and time for a webhook.

", + "smithy.api#documentation": "

A timestamp of when Amplify updated the webhook in your Git repository.

", "smithy.api#required": {} } } @@ -6776,6 +6865,9 @@ "member": { "target": "com.amazonaws.amplify#Webhook" } + }, + "com.amazonaws.amplify#webhookCreateTime": { + "type": "timestamp" } } } diff --git a/models/api-gateway.json b/models/api-gateway.json index 44117eaf16..f744b3430f 100644 --- a/models/api-gateway.json +++ b/models/api-gateway.json @@ -2255,7 +2255,7 @@ "domainNameId": { "target": "com.amazonaws.apigateway#String", "traits": { - "smithy.api#documentation": "

The identifier for the domain name resource. Supported only for private custom domain names.

", + "smithy.api#documentation": "

The identifier for the domain name resource. Required for private custom domain names.

", "smithy.api#httpQuery": "domainNameId" } }, @@ -6765,7 +6765,7 @@ "domainNameId": { "target": "com.amazonaws.apigateway#String", "traits": { - "smithy.api#documentation": "

\n The identifier for the domain name resource. Supported only for private custom domain names.\n

", + "smithy.api#documentation": "

\n The identifier for the domain name resource. Required for private custom domain names.\n

", "smithy.api#httpQuery": "domainNameId" } } diff --git a/models/appstream.json b/models/appstream.json index f60bb59d4d..11b0cb92d3 100644 --- a/models/appstream.json +++ b/models/appstream.json @@ -8186,6 +8186,12 @@ "traits": { "smithy.api#enumValue": "RHEL8" } + }, + "ROCKY_LINUX8": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ROCKY_LINUX8" + } } } }, diff --git a/models/appsync.json b/models/appsync.json index 728d9ec457..e1dbedc20b 100644 --- a/models/appsync.json +++ b/models/appsync.json @@ -10207,7 +10207,8 @@ "authenticationType": { "target": "com.amazonaws.appsync#AuthenticationType", "traits": { - "smithy.api#documentation": "

The new authentication type for the GraphqlApi object.

" + "smithy.api#documentation": "

The new authentication type for the GraphqlApi object.

", + "smithy.api#required": {} } }, "userPoolConfig": { diff --git a/models/artifact.json b/models/artifact.json index 90a54e27e5..1eaacfd2ff 100644 --- a/models/artifact.json +++ b/models/artifact.json @@ -121,18 +121,15 @@ "aws.auth#sigv4": { "name": "artifact" }, + "aws.endpoints#standardPartitionalEndpoints": { + "endpointPatternType": "service_region_dnsSuffix" + }, "aws.protocols#restJson1": {}, "smithy.api#documentation": "

This reference provides descriptions of the low-level AWS Artifact Service API.

", "smithy.api#title": "AWS Artifact", "smithy.rules#endpointRuleSet": { "version": "1.0", "parameters": { - "Region": { - "builtIn": "AWS::Region", - "required": false, - "documentation": "The AWS region used to dispatch the request.", - "type": "String" - }, "UseDualStack": { "builtIn": "AWS::UseDualStack", "required": true, @@ -152,6 +149,12 @@ "required": false, "documentation": "Override the endpoint used to send this request", "type": "String" + }, + "Region": { + "builtIn": "AWS::Region", + "required": false, + "documentation": "The AWS region used to dispatch the request.", + "type": "String" } }, "rules": [ @@ -303,18 +306,19 @@ "rules": [ { "conditions": [], - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://artifact-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ], - "type": "tree" + "endpoint": { + "url": "https://artifact-fips.{PartitionResult#implicitGlobalRegion}.{PartitionResult#dualStackDnsSuffix}", + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "{PartitionResult#implicitGlobalRegion}" + } + ] + }, + "headers": {} + }, + "type": "endpoint" } ], "type": "tree" @@ -337,6 +341,15 @@ }, true ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + false + ] } ], "rules": [ @@ -361,18 +374,19 @@ "rules": [ { "conditions": [], - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://artifact-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ], - "type": "tree" + "endpoint": { + "url": "https://artifact-fips.{PartitionResult#implicitGlobalRegion}.{PartitionResult#dnsSuffix}", + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "{PartitionResult#implicitGlobalRegion}" + } + ] + }, + "headers": {} + }, + "type": "endpoint" } ], "type": "tree" @@ -387,6 +401,15 @@ }, { "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + false + ] + }, { "fn": "booleanEquals", "argv": [ @@ -419,18 +442,19 @@ "rules": [ { "conditions": [], - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://artifact.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ], - "type": "tree" + "endpoint": { + "url": "https://artifact.{PartitionResult#implicitGlobalRegion}.{PartitionResult#dualStackDnsSuffix}", + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "{PartitionResult#implicitGlobalRegion}" + } + ] + }, + "headers": {} + }, + "type": "endpoint" } ], "type": "tree" @@ -445,18 +469,19 @@ }, { "conditions": [], - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://artifact.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ], - "type": "tree" + "endpoint": { + "url": "https://artifact.{PartitionResult#implicitGlobalRegion}.{PartitionResult#dnsSuffix}", + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "{PartitionResult#implicitGlobalRegion}" + } + ] + }, + "headers": {} + }, + "type": "endpoint" } ], "type": "tree" @@ -476,10 +501,51 @@ }, "smithy.rules#endpointTests": { "testCases": [ + { + "documentation": "For custom endpoint with region not set and fips disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { + "Endpoint": "https://example.com", + "UseFIPS": false + } + }, + { + "documentation": "For custom endpoint with fips enabled", + "expect": { + "error": "Invalid Configuration: FIPS and custom endpoint are not supported" + }, + "params": { + "Endpoint": "https://example.com", + "UseFIPS": true + } + }, + { + "documentation": "For custom endpoint with fips disabled and dualstack enabled", + "expect": { + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" + }, + "params": { + "Endpoint": "https://example.com", + "UseFIPS": false, + "UseDualStack": true + } + }, { "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-east-1" + } + ] + }, "url": "https://artifact-fips.us-east-1.api.aws" } }, @@ -493,6 +559,14 @@ "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-east-1" + } + ] + }, "url": "https://artifact-fips.us-east-1.amazonaws.com" } }, @@ -506,6 +580,14 @@ "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-east-1" + } + ] + }, "url": "https://artifact.us-east-1.api.aws" } }, @@ -519,6 +601,14 @@ "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-east-1" + } + ] + }, "url": "https://artifact.us-east-1.amazonaws.com" } }, @@ -529,105 +619,169 @@ } }, { - "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", + "documentation": "For region cn-northwest-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://artifact-fips.cn-north-1.api.amazonwebservices.com.cn" + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "cn-northwest-1" + } + ] + }, + "url": "https://artifact-fips.cn-northwest-1.api.amazonwebservices.com.cn" } }, "params": { - "Region": "cn-north-1", + "Region": "cn-northwest-1", "UseFIPS": true, "UseDualStack": true } }, { - "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", + "documentation": "For region cn-northwest-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://artifact-fips.cn-north-1.amazonaws.com.cn" + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "cn-northwest-1" + } + ] + }, + "url": "https://artifact-fips.cn-northwest-1.amazonaws.com.cn" } }, "params": { - "Region": "cn-north-1", + "Region": "cn-northwest-1", "UseFIPS": true, "UseDualStack": false } }, { - "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", + "documentation": "For region cn-northwest-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://artifact.cn-north-1.api.amazonwebservices.com.cn" + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "cn-northwest-1" + } + ] + }, + "url": "https://artifact.cn-northwest-1.api.amazonwebservices.com.cn" } }, "params": { - "Region": "cn-north-1", + "Region": "cn-northwest-1", "UseFIPS": false, "UseDualStack": true } }, { - "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", + "documentation": "For region cn-northwest-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://artifact.cn-north-1.amazonaws.com.cn" + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "cn-northwest-1" + } + ] + }, + "url": "https://artifact.cn-northwest-1.amazonaws.com.cn" } }, "params": { - "Region": "cn-north-1", + "Region": "cn-northwest-1", "UseFIPS": false, "UseDualStack": false } }, { - "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://artifact-fips.us-gov-east-1.api.aws" + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-gov-west-1" + } + ] + }, + "url": "https://artifact-fips.us-gov-west-1.api.aws" } }, "params": { - "Region": "us-gov-east-1", + "Region": "us-gov-west-1", "UseFIPS": true, "UseDualStack": true } }, { - "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://artifact-fips.us-gov-east-1.amazonaws.com" + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-gov-west-1" + } + ] + }, + "url": "https://artifact-fips.us-gov-west-1.amazonaws.com" } }, "params": { - "Region": "us-gov-east-1", + "Region": "us-gov-west-1", "UseFIPS": true, "UseDualStack": false } }, { - "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://artifact.us-gov-east-1.api.aws" + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-gov-west-1" + } + ] + }, + "url": "https://artifact.us-gov-west-1.api.aws" } }, "params": { - "Region": "us-gov-east-1", + "Region": "us-gov-west-1", "UseFIPS": false, "UseDualStack": true } }, { - "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://artifact.us-gov-east-1.amazonaws.com" + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-gov-west-1" + } + ] + }, + "url": "https://artifact.us-gov-west-1.amazonaws.com" } }, "params": { - "Region": "us-gov-east-1", + "Region": "us-gov-west-1", "UseFIPS": false, "UseDualStack": false } @@ -647,6 +801,14 @@ "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-iso-east-1" + } + ] + }, "url": "https://artifact-fips.us-iso-east-1.c2s.ic.gov" } }, @@ -671,6 +833,14 @@ "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-iso-east-1" + } + ] + }, "url": "https://artifact.us-iso-east-1.c2s.ic.gov" } }, @@ -695,6 +865,14 @@ "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-isob-east-1" + } + ] + }, "url": "https://artifact-fips.us-isob-east-1.sc2s.sgov.gov" } }, @@ -719,6 +897,14 @@ "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-isob-east-1" + } + ] + }, "url": "https://artifact.us-isob-east-1.sc2s.sgov.gov" } }, @@ -729,54 +915,131 @@ } }, { - "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled", + "documentation": "For region eu-isoe-west-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "eu-isoe-west-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region eu-isoe-west-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://example.com" + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "eu-isoe-west-1" + } + ] + }, + "url": "https://artifact-fips.eu-isoe-west-1.cloud.adc-e.uk" } }, "params": { - "Region": "us-east-1", + "Region": "eu-isoe-west-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region eu-isoe-west-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "eu-isoe-west-1", "UseFIPS": false, - "UseDualStack": false, - "Endpoint": "https://example.com" + "UseDualStack": true } }, { - "documentation": "For custom endpoint with region not set and fips disabled and dualstack disabled", + "documentation": "For region eu-isoe-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://example.com" + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "eu-isoe-west-1" + } + ] + }, + "url": "https://artifact.eu-isoe-west-1.cloud.adc-e.uk" } }, "params": { + "Region": "eu-isoe-west-1", "UseFIPS": false, - "UseDualStack": false, - "Endpoint": "https://example.com" + "UseDualStack": false } }, { - "documentation": "For custom endpoint with fips enabled and dualstack disabled", + "documentation": "For region us-isof-south-1 with FIPS enabled and DualStack enabled", "expect": { - "error": "Invalid Configuration: FIPS and custom endpoint are not supported" + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "Region": "us-east-1", + "Region": "us-isof-south-1", "UseFIPS": true, - "UseDualStack": false, - "Endpoint": "https://example.com" + "UseDualStack": true } }, { - "documentation": "For custom endpoint with fips disabled and dualstack enabled", + "documentation": "For region us-isof-south-1 with FIPS enabled and DualStack disabled", "expect": { - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-isof-south-1" + } + ] + }, + "url": "https://artifact-fips.us-isof-south-1.csp.hci.ic.gov" + } }, "params": { - "Region": "us-east-1", + "Region": "us-isof-south-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isof-south-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-isof-south-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-isof-south-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-isof-south-1" + } + ] + }, + "url": "https://artifact.us-isof-south-1.csp.hci.ic.gov" + } + }, + "params": { + "Region": "us-isof-south-1", "UseFIPS": false, - "UseDualStack": true, - "Endpoint": "https://example.com" + "UseDualStack": false } }, { diff --git a/models/batch.json b/models/batch.json index 7319b98ad9..af5c3091d6 100644 --- a/models/batch.json +++ b/models/batch.json @@ -4907,7 +4907,7 @@ "shareDecaySeconds": { "target": "com.amazonaws.batch#Integer", "traits": { - "smithy.api#documentation": "

The amount of time (in seconds) to use to calculate a fair share percentage for each fair\n share identifier in use. A value of zero (0) indicates that only current usage is measured. The\n decay allows for more recently run jobs to have more weight than jobs that ran earlier. The\n maximum supported value is 604800 (1 week).

" + "smithy.api#documentation": "

The amount of time (in seconds) to use to calculate a fair share percentage for each fair\n share identifier in use. A value of zero (0) indicates the default minimum time window (600 seconds).\n The maximum supported value is 604800 (1 week).

\n

The decay allows for more recently run jobs to have more weight than jobs that ran earlier. \n Consider adjusting this number if you have jobs that (on average) run longer than ten minutes, \n or a large difference in job count or job run times between share identifiers, and the allocation\n of resources doesn’t meet your needs.

" } }, "computeReservation": { @@ -5585,7 +5585,7 @@ "target": "com.amazonaws.batch#Integer", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The priority of the job queue. Job queues with a higher priority (or a higher integer value for the priority parameter) are evaluated first when associated with the same compute environment. Priority is determined in descending order. For example, a job queue with a priority value of 10 is given scheduling preference over a job queue with a priority value of 1. All of the compute environments must be either Amazon EC2 (EC2 or SPOT) or Fargate (FARGATE or FARGATE_SPOT). Amazon EC2 and Fargate compute environments can't be mixed.

", + "smithy.api#documentation": "

The priority of the job queue. Job queue priority determines the order \n that job queues are evaluated when multiple queues dispatch jobs within a \n shared compute environment. A higher value for priority indicates\n a higher priority. Queues are evaluated in cycles, in descending order by\n priority. For example, a job queue with a priority value of 10 is \n evaluated before a queue with a priority value of 1. All of the \n compute environments must be either Amazon EC2 (EC2 or SPOT)\n or Fargate (FARGATE or FARGATE_SPOT). Amazon EC2 and \n Fargate compute environments can't be mixed.

\n \n

Job queue priority doesn't guarantee that a particular job executes before \n a job in a lower priority queue. Jobs added to higher priority queues during the \n queue evaluation cycle might not be evaluated until the next cycle. A job is \n dispatched from a queue only if resources are available when the queue is evaluated. \n If there are insufficient resources available at that time, the cycle proceeds to the \n next queue. This means that jobs added to higher priority queues might have to wait \n for jobs in multiple lower priority queues to complete before they are dispatched. \n You can use job dependencies to control the order for jobs from queues with different \n priorities. For more information, see Job Dependencies\n in the Batch User Guide.

\n
", "smithy.api#required": {} } }, @@ -5944,7 +5944,7 @@ "targetInstanceTypes": { "target": "com.amazonaws.batch#StringList", "traits": { - "smithy.api#documentation": "

The instance type or family that this this override launch template should be applied to.

\n

This parameter is required when defining a launch template override.

\n

Information included in this parameter must meet the following requirements:

\n
    \n
  • \n

    Must be a valid Amazon EC2 instance type or family.

    \n
  • \n
  • \n

    \n optimal isn't allowed.

    \n
  • \n
  • \n

    \n targetInstanceTypes can target only instance types and families that are included within the \n ComputeResource.instanceTypes\n set. targetInstanceTypes doesn't need to include all of the instances from the instanceType set, but at least a subset. For example, if ComputeResource.instanceTypes includes [m5, g5], targetInstanceTypes can include [m5.2xlarge] and [m5.large] but not [c5.large].

    \n
  • \n
  • \n

    \n targetInstanceTypes included within the same launch template override or across launch template overrides can't overlap for the same compute environment. For example, you can't define one launch template override to target an instance family and another define an instance type within this same family.

    \n
  • \n
" + "smithy.api#documentation": "

The instance type or family that this override launch template should be applied to.

\n

This parameter is required when defining a launch template override.

\n

Information included in this parameter must meet the following requirements:

\n
    \n
  • \n

    Must be a valid Amazon EC2 instance type or family.

    \n
  • \n
  • \n

    \n optimal isn't allowed.

    \n
  • \n
  • \n

    \n targetInstanceTypes can target only instance types and families that are included within the \n ComputeResource.instanceTypes\n set. targetInstanceTypes doesn't need to include all of the instances from the instanceType set, but at least a subset. For example, if ComputeResource.instanceTypes includes [m5, g5], targetInstanceTypes can include [m5.2xlarge] and [m5.large] but not [c5.large].

    \n
  • \n
  • \n

    \n targetInstanceTypes included within the same launch template override or across launch template overrides can't overlap for the same compute environment. For example, you can't define one launch template override to target an instance family and another define an instance type within this same family.

    \n
  • \n
" } } }, @@ -7743,7 +7743,7 @@ "environment": { "target": "com.amazonaws.batch#EnvironmentVariables", "traits": { - "smithy.api#documentation": "

The environment variables to pass to a container. This parameter maps to Env inthe Create a container\n section of the Docker Remote API\n and the --env parameter to docker run.

\n \n

We don't recommend using plaintext environment variables for sensitive information, such as\n credential data.

\n
\n \n

Environment variables cannot start with AWS_BATCH. This naming convention is\n reserved for variables that Batch sets.

\n
" + "smithy.api#documentation": "

The environment variables to pass to a container. This parameter maps to Env in the Create a container\n section of the Docker Remote API\n and the --env parameter to docker run.

\n \n

We don't recommend using plaintext environment variables for sensitive information, such as\n credential data.

\n
\n \n

Environment variables cannot start with AWS_BATCH. This naming convention is\n reserved for variables that Batch sets.

\n
" } }, "essential": { diff --git a/models/bcm-pricing-calculator.json b/models/bcm-pricing-calculator.json index e1576c9781..5e283231c2 100644 --- a/models/bcm-pricing-calculator.json +++ b/models/bcm-pricing-calculator.json @@ -3737,6 +3737,9 @@ "target": "com.amazonaws.bcmpricingcalculator#DeleteBillEstimateResponse" }, "errors": [ + { + "target": "com.amazonaws.bcmpricingcalculator#ConflictException" + }, { "target": "com.amazonaws.bcmpricingcalculator#DataUnavailableException" } diff --git a/models/bedrock-agent-runtime.json b/models/bedrock-agent-runtime.json index 9cea6ed60e..631335bc31 100644 --- a/models/bedrock-agent-runtime.json +++ b/models/bedrock-agent-runtime.json @@ -225,7 +225,7 @@ "parentActionGroupSignature": { "target": "com.amazonaws.bedrockagentruntime#ActionGroupSignature", "traits": { - "smithy.api#documentation": "

\n To allow your agent to request the user for additional information when trying to complete a task, set this field to AMAZON.UserInput. \n You must leave the description, apiSchema, and actionGroupExecutor fields blank for this action group.\n

\n

To allow your agent to generate, run, and troubleshoot code when trying to complete a task, set this field to AMAZON.CodeInterpreter. You must \n leave the description, apiSchema, and actionGroupExecutor fields blank for this action group.

\n

During orchestration, if your agent determines that it needs to invoke an API in an action group, but doesn't have enough information to complete the API request,\n it will invoke this action group instead and return an Observation reprompting the user for more information.

" + "smithy.api#documentation": "

\n To allow your agent to request the user for additional information when trying to complete a task, set this field to AMAZON.UserInput. \n You must leave the description, apiSchema, and actionGroupExecutor fields blank for this action group.\n

\n

To allow your agent to generate, run, and troubleshoot code when trying to complete a task, set this field to AMAZON.CodeInterpreter. You must \n leave the description, apiSchema, and actionGroupExecutor fields blank for this action group.

\n

During orchestration, if your agent determines that it needs to invoke an API in an action group, but doesn't have enough information to complete the API request,\n it will invoke this action group instead and return an Observation reprompting the user for more information.

" } }, "actionGroupExecutor": { @@ -1279,10 +1279,10 @@ "smithy.api#documentation": "

Controls the API operations or functions to invoke based on the user confirmation.

" } }, - "responseBody": { - "target": "com.amazonaws.bedrockagentruntime#ResponseBody", + "responseState": { + "target": "com.amazonaws.bedrockagentruntime#ResponseState", "traits": { - "smithy.api#documentation": "

The response body from the API operation. The key of the object is the content type (currently, only TEXT is supported). The response may be returned directly or from the Lambda function.

" + "smithy.api#documentation": "

Controls the final response state returned to end user when API/Function execution failed. When this state is FAILURE, the request would fail with dependency failure exception. When this state is REPROMPT, the API/function response will be sent to model for re-prompt

" } }, "httpStatusCode": { @@ -1291,10 +1291,10 @@ "smithy.api#documentation": "

http status code from API execution response (for example: 200, 400, 500).

" } }, - "responseState": { - "target": "com.amazonaws.bedrockagentruntime#ResponseState", + "responseBody": { + "target": "com.amazonaws.bedrockagentruntime#ResponseBody", "traits": { - "smithy.api#documentation": "

Controls the final response state returned to end user when API/Function execution failed. When this state is FAILURE, the request would fail with dependency failure exception. When this state is REPROMPT, the API/function response will be sent to model for re-prompt

" + "smithy.api#documentation": "

The response body from the API operation. The key of the object is the content type (currently, only TEXT is supported). The response may be returned directly or from the Lambda function.

" } }, "agentId": { @@ -1390,6 +1390,20 @@ "smithy.api#pattern": "^(arn:aws(-[^:]+)?:(bedrock|sagemaker):[a-z0-9-]{1,20}:([0-9]{12})?:([a-z-]+/)?)?([a-z0-9.-]{1,63}){0,2}(([:][a-z0-9-]{1,63}){0,2})?(/[a-z0-9]{1,12})?$" } }, + "com.amazonaws.bedrockagentruntime#BedrockModelConfigurations": { + "type": "structure", + "members": { + "performanceConfig": { + "target": "com.amazonaws.bedrockagentruntime#PerformanceConfiguration", + "traits": { + "smithy.api#documentation": "

The performance configuration for the model.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Settings for a model called with InvokeAgent.

" + } + }, "com.amazonaws.bedrockagentruntime#BedrockRerankingConfiguration": { "type": "structure", "members": { @@ -1873,6 +1887,13 @@ "smithy.api#documentation": "

The unique identifier of the memory.

", "smithy.api#httpQuery": "memoryId" } + }, + "sessionId": { + "target": "com.amazonaws.bedrockagentruntime#SessionId", + "traits": { + "smithy.api#documentation": "

The unique session identifier of the memory.

", + "smithy.api#httpQuery": "sessionId" + } } }, "traits": { @@ -2004,6 +2025,12 @@ "traits": { "smithy.api#documentation": "

Additional model parameters and their corresponding values not included in the textInferenceConfig structure for an external source. Takes in custom model parameters specific to the language model being used.

" } + }, + "performanceConfig": { + "target": "com.amazonaws.bedrockagentruntime#PerformanceConfiguration", + "traits": { + "smithy.api#documentation": "

The latency configuration for the model.

" + } } }, "traits": { @@ -2274,9 +2301,25 @@ "traits": { "smithy.api#enumValue": "SUCCESS" } + }, + "INPUT_REQUIRED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "INPUT_REQUIRED" + } } } }, + "com.amazonaws.bedrockagentruntime#FlowExecutionId": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 2, + "max": 100 + }, + "smithy.api#pattern": "^[0-9a-zA-Z._:-]+$" + } + }, "com.amazonaws.bedrockagentruntime#FlowIdentifier": { "type": "string", "traits": { @@ -2299,8 +2342,7 @@ "nodeOutputName": { "target": "com.amazonaws.bedrockagentruntime#NodeOutputName", "traits": { - "smithy.api#documentation": "

The name of the output from the flow input node that begins the prompt flow.

", - "smithy.api#required": {} + "smithy.api#documentation": "

The name of the output from the flow input node that begins the prompt flow.

" } }, "content": { @@ -2309,6 +2351,12 @@ "smithy.api#documentation": "

Contains information about an input into the prompt flow.

", "smithy.api#required": {} } + }, + "nodeInputName": { + "target": "com.amazonaws.bedrockagentruntime#NodeInputName", + "traits": { + "smithy.api#documentation": "

The name of the input from the flow input node.

" + } } }, "traits": { @@ -2342,6 +2390,50 @@ } } }, + "com.amazonaws.bedrockagentruntime#FlowMultiTurnInputContent": { + "type": "union", + "members": { + "document": { + "target": "smithy.api#Document", + "traits": { + "smithy.api#documentation": "

The requested additional input to send back to the multi-turn flow node.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The content structure containing input information for multi-turn flow interactions.

" + } + }, + "com.amazonaws.bedrockagentruntime#FlowMultiTurnInputRequestEvent": { + "type": "structure", + "members": { + "nodeName": { + "target": "com.amazonaws.bedrockagentruntime#NodeName", + "traits": { + "smithy.api#documentation": "

The name of the node in the flow that is requesting the input.

", + "smithy.api#required": {} + } + }, + "nodeType": { + "target": "com.amazonaws.bedrockagentruntime#NodeType", + "traits": { + "smithy.api#documentation": "

The type of the node in the flow that is requesting the input.

", + "smithy.api#required": {} + } + }, + "content": { + "target": "com.amazonaws.bedrockagentruntime#FlowMultiTurnInputContent", + "traits": { + "smithy.api#documentation": "

The content payload containing the input request details for the multi-turn interaction.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Response object from the flow multi-turn node requesting additional information.

", + "smithy.api#sensitive": {} + } + }, "com.amazonaws.bedrockagentruntime#FlowOutputContent": { "type": "union", "members": { @@ -2468,6 +2560,12 @@ "traits": { "smithy.api#documentation": "

There was an issue with a dependency due to a server issue. Retry your request.

" } + }, + "flowMultiTurnInputRequestEvent": { + "target": "com.amazonaws.bedrockagentruntime#FlowMultiTurnInputRequestEvent", + "traits": { + "smithy.api#documentation": "

The event stream containing the multi-turn input request information from the flow.

" + } } }, "traits": { @@ -3098,6 +3196,12 @@ "traits": { "smithy.api#documentation": "

Additional model parameters and corresponding values not included in the textInferenceConfig structure for a knowledge base. This allows users to provide custom model parameters specific to the language model being used.

" } + }, + "performanceConfig": { + "target": "com.amazonaws.bedrockagentruntime#PerformanceConfiguration", + "traits": { + "smithy.api#documentation": "

The latency configuration for the model.

" + } } }, "traits": { @@ -4308,6 +4412,20 @@ "smithy.api#sensitive": {} } }, + "com.amazonaws.bedrockagentruntime#InlineBedrockModelConfigurations": { + "type": "structure", + "members": { + "performanceConfig": { + "target": "com.amazonaws.bedrockagentruntime#PerformanceConfiguration", + "traits": { + "smithy.api#documentation": "

The latency configuration for the model.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Settings for a model called with InvokeInlineAgent.

" + } + }, "com.amazonaws.bedrockagentruntime#InlineSessionState": { "type": "structure", "members": { @@ -4593,6 +4711,9 @@ { "target": "com.amazonaws.bedrockagentruntime#InternalServerException" }, + { + "target": "com.amazonaws.bedrockagentruntime#ModelNotReadyException" + }, { "target": "com.amazonaws.bedrockagentruntime#ResourceNotFoundException" }, @@ -4607,7 +4728,7 @@ } ], "traits": { - "smithy.api#documentation": "\n

The CLI doesn't support streaming operations in Amazon Bedrock, including InvokeAgent.

\n
\n

Sends a prompt for the agent to process and respond to. Note the following fields for the request:

\n
    \n
  • \n

    To continue the same conversation with an agent, use the same sessionId value in the request.

    \n
  • \n
  • \n

    To activate trace enablement, turn enableTrace to true. Trace enablement helps you follow the agent's reasoning process that led it to the information it processed, the actions it took, and the final result it yielded. For more information, see Trace enablement.

    \n
  • \n
  • \n

    End a conversation by setting endSession to true.

    \n
  • \n
  • \n

    In the sessionState object, you can include attributes for the session or prompt or, if you configured an action group to return control, results from invocation of the action group.

    \n
  • \n
\n

The response is returned in the bytes field of the chunk object.

\n
    \n
  • \n

    The attribution object contains citations for parts of the response.

    \n
  • \n
  • \n

    If you set enableTrace to true in the request, you can trace the agent's steps and reasoning process that led it to the response.

    \n
  • \n
  • \n

    If the action predicted was configured to return control, the response returns parameters for the action, elicited from the user, in the returnControl field.

    \n
  • \n
  • \n

    Errors are also surfaced in the response.

    \n
  • \n
", + "smithy.api#documentation": "\n \n

Sends a prompt for the agent to process and respond to. Note the following fields for the request:

\n
    \n
  • \n

    To continue the same conversation with an agent, use the same sessionId value in the request.

    \n
  • \n
  • \n

    To activate trace enablement, turn enableTrace to true. Trace enablement helps you follow the agent's reasoning process that led it to the information it processed, the actions it took, and the final result it yielded. For more information, see Trace enablement.

    \n
  • \n
  • \n

    To stream agent responses, make sure that only orchestration prompt is enabled. Agent streaming is not supported for the following steps:\n

    \n
      \n
    • \n

      \n Pre-processing\n

      \n
    • \n
    • \n

      \n Post-processing\n

      \n
    • \n
    • \n

      Agent with 1 Knowledge base and User Input not enabled

      \n
    • \n
    \n
  • \n
  • \n

    End a conversation by setting endSession to true.

    \n
  • \n
  • \n

    In the sessionState object, you can include attributes for the session or prompt or, if you configured an action group to return control, results from invocation of the action group.

    \n
  • \n
\n

The response contains both chunk and trace attributes.

\n

The final response is returned in the bytes field of the chunk object. The InvokeAgent returns one chunk for the entire interaction.

\n
    \n
  • \n

    The attribution object contains citations for parts of the response.

    \n
  • \n
  • \n

    If you set enableTrace to true in the request, you can trace the agent's steps and reasoning process that led it to the response.

    \n
  • \n
  • \n

    If the action predicted was configured to return control, the response returns parameters for the action, elicited from the user, in the returnControl field.

    \n
  • \n
  • \n

    Errors are also surfaced in the response.

    \n
  • \n
", "smithy.api#http": { "code": 200, "method": "POST", @@ -4672,10 +4793,16 @@ "smithy.api#documentation": "

The unique identifier of the agent memory.

" } }, + "bedrockModelConfigurations": { + "target": "com.amazonaws.bedrockagentruntime#BedrockModelConfigurations", + "traits": { + "smithy.api#documentation": "

Model performance settings for the request.

" + } + }, "streamingConfigurations": { "target": "com.amazonaws.bedrockagentruntime#StreamingConfigurations", "traits": { - "smithy.api#documentation": "

\n Specifies the configurations for streaming.\n

" + "smithy.api#documentation": "

\n Specifies the configurations for streaming.\n

\n \n

To use agent streaming, you need permissions to perform the bedrock:InvokeModelWithResponseStream action.

\n
" } }, "sourceArn": { @@ -4806,6 +4933,18 @@ "traits": { "smithy.api#documentation": "

Specifies whether to return the trace for the flow or not. Traces track inputs and outputs for nodes in the flow. For more information, see Track each step in your prompt flow by viewing its trace in Amazon Bedrock.

" } + }, + "modelPerformanceConfiguration": { + "target": "com.amazonaws.bedrockagentruntime#ModelPerformanceConfiguration", + "traits": { + "smithy.api#documentation": "

Model performance settings for the request.

" + } + }, + "executionId": { + "target": "com.amazonaws.bedrockagentruntime#FlowExecutionId", + "traits": { + "smithy.api#documentation": "

The unique identifier for the current flow execution. If you don't provide a value, Amazon Bedrock creates the identifier for you.

" + } } }, "traits": { @@ -4822,6 +4961,13 @@ "smithy.api#httpPayload": {}, "smithy.api#required": {} } + }, + "executionId": { + "target": "com.amazonaws.bedrockagentruntime#FlowExecutionId", + "traits": { + "smithy.api#documentation": "

The unique identifier for the current flow execution.

", + "smithy.api#httpHeader": "x-amz-bedrock-flow-execution-id" + } } }, "traits": { @@ -4866,7 +5012,7 @@ } ], "traits": { - "smithy.api#documentation": "

\n Invokes an inline Amazon Bedrock agent using the configurations you provide with the request.\n

\n
    \n
  • \n

    Specify the following fields for security purposes.

    \n
      \n
    • \n

      (Optional) customerEncryptionKeyArn – The Amazon Resource Name (ARN) of a KMS key to encrypt the creation of the agent.

      \n
    • \n
    • \n

      (Optional) idleSessionTTLinSeconds – Specify the number of seconds for which the agent should maintain session information. After this time expires, the subsequent InvokeInlineAgent request begins a new session.

      \n
    • \n
    \n
  • \n
  • \n

    To override the default prompt behavior for agent orchestration and to use advanced prompts, include a promptOverrideConfiguration object. \n For more information, see Advanced prompts.

    \n
  • \n
  • \n

    The agent instructions will not be honored if your agent has only one knowledge base, uses default prompts, has no action group, and user input is disabled.

    \n
  • \n
\n \n

The CLI doesn't support streaming operations in Amazon Bedrock, including InvokeInlineAgent.

\n
", + "smithy.api#documentation": "

\n Invokes an inline Amazon Bedrock agent using the configurations you provide with the request.\n

\n
    \n
  • \n

    Specify the following fields for security purposes.

    \n
      \n
    • \n

      (Optional) customerEncryptionKeyArn – The Amazon Resource Name (ARN) of a KMS key to encrypt the creation of the agent.

      \n
    • \n
    • \n

      (Optional) idleSessionTTLinSeconds – Specify the number of seconds for which the agent should maintain session information. After this time expires, the subsequent InvokeInlineAgent request begins a new session.

      \n
    • \n
    \n
  • \n
  • \n

    To override the default prompt behavior for agent orchestration and to use advanced prompts, include a promptOverrideConfiguration object. \n For more information, see Advanced prompts.

    \n
  • \n
  • \n

    The agent instructions will not be honored if your agent has only one knowledge base, uses default prompts, has no action group, and user input is disabled.

    \n
  • \n
\n \n ", "smithy.api#http": { "code": 200, "method": "POST", @@ -4958,6 +5104,18 @@ "traits": { "smithy.api#documentation": "

\n Configurations for advanced prompts used to override the default prompts to enhance the accuracy of the inline agent.\n

" } + }, + "bedrockModelConfigurations": { + "target": "com.amazonaws.bedrockagentruntime#InlineBedrockModelConfigurations", + "traits": { + "smithy.api#documentation": "

Model settings for the request.

" + } + }, + "streamingConfigurations": { + "target": "com.amazonaws.bedrockagentruntime#StreamingConfigurations", + "traits": { + "smithy.api#documentation": "

\n Specifies the configurations for streaming.\n

\n \n

To use agent streaming, you need permissions to perform the bedrock:InvokeModelWithResponseStream action.

\n
" + } } }, "traits": { @@ -5607,6 +5765,33 @@ "smithy.api#sensitive": {} } }, + "com.amazonaws.bedrockagentruntime#ModelNotReadyException": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.bedrockagentruntime#NonBlankString" + } + }, + "traits": { + "smithy.api#documentation": "

\n The model specified in the request is not ready to serve inference requests. The AWS SDK\n will automatically retry the operation up to 5 times. For information about configuring\n automatic retries, see Retry behavior in the AWS SDKs and Tools\n reference guide.\n

", + "smithy.api#error": "client", + "smithy.api#httpError": 424 + } + }, + "com.amazonaws.bedrockagentruntime#ModelPerformanceConfiguration": { + "type": "structure", + "members": { + "performanceConfig": { + "target": "com.amazonaws.bedrockagentruntime#PerformanceConfiguration", + "traits": { + "smithy.api#documentation": "

The latency configuration for the model.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The performance configuration for a model called with InvokeFlow.

" + } + }, "com.amazonaws.bedrockagentruntime#Name": { "type": "string", "traits": { @@ -5952,6 +6137,12 @@ "traits": { "smithy.api#documentation": "

To split up the prompt and retrieve multiple sources, set the transformation type to\n QUERY_DECOMPOSITION.

" } + }, + "performanceConfig": { + "target": "com.amazonaws.bedrockagentruntime#PerformanceConfiguration", + "traits": { + "smithy.api#documentation": "

The latency configuration for the model.

" + } } }, "traits": { @@ -6246,6 +6437,38 @@ } } }, + "com.amazonaws.bedrockagentruntime#PerformanceConfigLatency": { + "type": "enum", + "members": { + "STANDARD": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "standard" + } + }, + "OPTIMIZED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "optimized" + } + } + } + }, + "com.amazonaws.bedrockagentruntime#PerformanceConfiguration": { + "type": "structure", + "members": { + "latency": { + "target": "com.amazonaws.bedrockagentruntime#PerformanceConfigLatency", + "traits": { + "smithy.api#default": "standard", + "smithy.api#documentation": "

To use a latency-optimized version of the model, set to optimized.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Performance settings for a model.

" + } + }, "com.amazonaws.bedrockagentruntime#PostProcessingModelInvocationOutput": { "type": "structure", "members": { @@ -7251,6 +7474,12 @@ "smithy.api#documentation": "

There was an issue with a dependency due to a server issue. Retry your request.

" } }, + "modelNotReadyException": { + "target": "com.amazonaws.bedrockagentruntime#ModelNotReadyException", + "traits": { + "smithy.api#documentation": "

\n The model specified in the request is not ready to serve Inference requests. The AWS SDK\n will automatically retry the operation up to 5 times. For information about configuring\n automatic retries, see Retry behavior in the AWS SDKs and Tools\n reference guide.\n

" + } + }, "files": { "target": "com.amazonaws.bedrockagentruntime#FilePart", "traits": { @@ -8721,7 +8950,7 @@ } }, "traits": { - "smithy.api#documentation": "

\n Configurations for streaming.\n

" + "smithy.api#documentation": "

\n Configurations for streaming.

" } }, "com.amazonaws.bedrockagentruntime#SummaryText": { diff --git a/models/bedrock-agent.json b/models/bedrock-agent.json index e6601004b9..3a4a736889 100644 --- a/models/bedrock-agent.json +++ b/models/bedrock-agent.json @@ -11871,6 +11871,14 @@ "smithy.api#documentation": "

Details about a malformed input expression in a node.

" } }, + "com.amazonaws.bedrockagent#MaxRecentSessions": { + "type": "integer", + "traits": { + "smithy.api#range": { + "min": 1 + } + } + }, "com.amazonaws.bedrockagent#MaxResults": { "type": "integer", "traits": { @@ -11905,6 +11913,12 @@ "smithy.api#default": 30, "smithy.api#documentation": "

The number of days the agent is configured to retain the conversational context.

" } + }, + "sessionSummaryConfiguration": { + "target": "com.amazonaws.bedrockagent#SessionSummaryConfiguration", + "traits": { + "smithy.api#documentation": "

Contains the configuration for SESSION_SUMMARY memory type enabled for the agent.

" + } } }, "traits": { @@ -13571,6 +13585,12 @@ "traits": { "smithy.api#enumValue": "KNOWLEDGE_BASE_RESPONSE_GENERATION" } + }, + "MEMORY_SUMMARIZATION": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "MEMORY_SUMMARIZATION" + } } } }, @@ -14744,6 +14764,21 @@ "smithy.api#httpError": 402 } }, + "com.amazonaws.bedrockagent#SessionSummaryConfiguration": { + "type": "structure", + "members": { + "maxRecentSessions": { + "target": "com.amazonaws.bedrockagent#MaxRecentSessions", + "traits": { + "smithy.api#default": null, + "smithy.api#documentation": "

Maximum number of recent session summaries to include in the agent's prompt context.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Configuration for SESSION_SUMMARY memory type enabled for the agent.

" + } + }, "com.amazonaws.bedrockagent#SessionTTL": { "type": "integer", "traits": { @@ -14761,6 +14796,12 @@ "traits": { "smithy.api#enumValue": "OAUTH2_CLIENT_CREDENTIALS" } + }, + "OAUTH2_SHAREPOINT_APP_ONLY_CLIENT_CREDENTIALS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "OAUTH2_SHAREPOINT_APP_ONLY_CLIENT_CREDENTIALS" + } } } }, @@ -15200,7 +15241,7 @@ "smithy.api#default": 30, "smithy.api#range": { "min": 0, - "max": 30 + "max": 365 } } }, @@ -17332,6 +17373,16 @@ "smithy.api#documentation": "

The configuration of web URLs that you want to crawl. \n You should be authorized to crawl the URLs.

" } }, + "com.amazonaws.bedrockagent#UserAgent": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 15, + "max": 40 + }, + "smithy.api#sensitive": {} + } + }, "com.amazonaws.bedrockagent#ValidateFlowDefinition": { "type": "operation", "input": { @@ -17547,6 +17598,12 @@ "traits": { "smithy.api#documentation": "

The scope of what is crawled for your URLs.

\n

You can choose to crawl only web pages that belong to the same host or primary \n domain. For example, only web pages that contain the seed URL \n \"https://docs.aws.amazon.com/bedrock/latest/userguide/\" and no other domains. \n You can choose to include sub domains in addition to the host or primary domain. \n For example, web pages that contain \"aws.amazon.com\" can also include sub domain \n \"docs.aws.amazon.com\".

" } + }, + "userAgent": { + "target": "com.amazonaws.bedrockagent#UserAgent", + "traits": { + "smithy.api#documentation": "

A string used for identifying the crawler or a bot when it accesses a web server. By default, \n this is set to bedrockbot_UUID for your crawler. You can optionally append a custom \n string to bedrockbot_UUID to allowlist a specific user agent permitted to access your source URLs. \n

" + } } }, "traits": { @@ -17565,6 +17622,15 @@ "max": 300 } } + }, + "maxPages": { + "target": "smithy.api#Integer", + "traits": { + "smithy.api#documentation": "

\n The max number of web pages crawled from your source URLs, up to 25,000 pages. If \n the web pages exceed this limit, the data source sync will fail and no web pages will be ingested.\n

", + "smithy.api#range": { + "min": 1 + } + } } }, "traits": { diff --git a/models/bedrock-data-automation-runtime.json b/models/bedrock-data-automation-runtime.json index 9b1ef3fa06..e6257f5d67 100644 --- a/models/bedrock-data-automation-runtime.json +++ b/models/bedrock-data-automation-runtime.json @@ -33,7 +33,7 @@ "name": "bedrock" }, "aws.protocols#awsJson1_1": {}, - "smithy.api#documentation": "Amazon Bedrock Keystone Runtime", + "smithy.api#documentation": "Amazon Bedrock Data Automation Runtime", "smithy.api#title": "Runtime for Amazon Bedrock Data Automation", "smithy.rules#endpointRuleSet": { "version": "1.0", diff --git a/models/bedrock-data-automation.json b/models/bedrock-data-automation.json index 2c2c8ab2df..625c2b8853 100644 --- a/models/bedrock-data-automation.json +++ b/models/bedrock-data-automation.json @@ -41,7 +41,7 @@ "name": "bedrock" }, "aws.protocols#restJson1": {}, - "smithy.api#documentation": "Amazon Bedrock Keystone Build", + "smithy.api#documentation": "Amazon Bedrock Data Automation BuildTime", "smithy.api#title": "Data Automation for Amazon Bedrock", "smithy.rules#endpointRuleSet": { "version": "1.0", @@ -1148,7 +1148,7 @@ } ], "traits": { - "smithy.api#documentation": "Creates an Amazon Bedrock Keystone Blueprint", + "smithy.api#documentation": "Creates an Amazon Bedrock Data Automation Blueprint", "smithy.api#http": { "code": 201, "method": "PUT", @@ -1241,7 +1241,7 @@ } ], "traits": { - "smithy.api#documentation": "Creates a new version of an existing Amazon Bedrock Keystone Blueprint", + "smithy.api#documentation": "Creates a new version of an existing Amazon Bedrock Data Automation Blueprint", "smithy.api#http": { "code": 201, "method": "POST", @@ -1318,7 +1318,7 @@ } ], "traits": { - "smithy.api#documentation": "Creates an Amazon Bedrock Keystone DataAutomationProject", + "smithy.api#documentation": "Creates an Amazon Bedrock Data Automation Project", "smithy.api#http": { "code": 201, "method": "PUT", @@ -1669,7 +1669,7 @@ } ], "traits": { - "smithy.api#documentation": "Deletes an existing Amazon Bedrock Keystone Blueprint", + "smithy.api#documentation": "Deletes an existing Amazon Bedrock Data Automation Blueprint", "smithy.api#http": { "code": 204, "method": "DELETE", @@ -1736,7 +1736,7 @@ } ], "traits": { - "smithy.api#documentation": "Deletes an existing Amazon Bedrock Keystone DataAutomationProject", + "smithy.api#documentation": "Deletes an existing Amazon Bedrock Data Automation Project", "smithy.api#http": { "code": 204, "method": "DELETE", @@ -2037,7 +2037,7 @@ } ], "traits": { - "smithy.api#documentation": "Gets an existing Amazon Bedrock Keystone Blueprint", + "smithy.api#documentation": "Gets an existing Amazon Bedrock Data Automation Blueprint", "smithy.api#http": { "code": 200, "method": "POST", @@ -2117,7 +2117,7 @@ } ], "traits": { - "smithy.api#documentation": "Gets an existing Amazon Bedrock Keystone DataAutomationProject", + "smithy.api#documentation": "Gets an existing Amazon Bedrock Data Automation Project", "smithy.api#http": { "code": 200, "method": "POST", @@ -2363,7 +2363,7 @@ } ], "traits": { - "smithy.api#documentation": "Lists all existing Amazon Bedrock Keystone Blueprints", + "smithy.api#documentation": "Lists all existing Amazon Bedrock Data Automation Blueprints", "smithy.api#http": { "code": 200, "method": "POST", @@ -2449,7 +2449,7 @@ } ], "traits": { - "smithy.api#documentation": "Lists all existing Amazon Bedrock Keystone DataAutomationProjects", + "smithy.api#documentation": "Lists all existing Amazon Bedrock Data Automation Projects", "smithy.api#http": { "code": 200, "method": "POST", @@ -2704,7 +2704,7 @@ } ], "traits": { - "smithy.api#documentation": "Updates an existing Amazon Bedrock Blueprint", + "smithy.api#documentation": "Updates an existing Amazon Bedrock Data Automation Blueprint", "smithy.api#http": { "code": 200, "method": "PUT", @@ -2783,7 +2783,7 @@ } ], "traits": { - "smithy.api#documentation": "Updates an existing Amazon Bedrock DataAutomationProject", + "smithy.api#documentation": "Updates an existing Amazon Bedrock Data Automation Project", "smithy.api#http": { "code": 200, "method": "PUT", diff --git a/models/bedrock-runtime.json b/models/bedrock-runtime.json index 51a4ba49c9..d560f3407c 100644 --- a/models/bedrock-runtime.json +++ b/models/bedrock-runtime.json @@ -4892,7 +4892,7 @@ "min": 1, "max": 64 }, - "smithy.api#pattern": "^[a-zA-Z][a-zA-Z0-9_]*$" + "smithy.api#pattern": "^[a-zA-Z0-9_-]+$" } }, "com.amazonaws.bedrockruntime#ToolResultBlock": { diff --git a/models/bedrock.json b/models/bedrock.json index 278940da89..73463e335b 100644 --- a/models/bedrock.json +++ b/models/bedrock.json @@ -3027,6 +3027,12 @@ "smithy.api#default": "{}", "smithy.api#documentation": "

Each Amazon Bedrock support different inference parameters that change how the model behaves during inference.

" } + }, + "performanceConfig": { + "target": "com.amazonaws.bedrock#PerformanceConfiguration", + "traits": { + "smithy.api#documentation": "

Specifies performance settings for the model or inference profile.

" + } } }, "traits": { @@ -10536,6 +10542,37 @@ "smithy.api#pattern": "^\\S*$" } }, + "com.amazonaws.bedrock#PerformanceConfigLatency": { + "type": "enum", + "members": { + "STANDARD": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "standard" + } + }, + "OPTIMIZED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "optimized" + } + } + } + }, + "com.amazonaws.bedrock#PerformanceConfiguration": { + "type": "structure", + "members": { + "latency": { + "target": "com.amazonaws.bedrock#PerformanceConfigLatency", + "traits": { + "smithy.api#documentation": "

Specifies whether to use the latency-optimized or standard version of a model or inference profile.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Contains performance settings for a model.

" + } + }, "com.amazonaws.bedrock#PositiveInteger": { "type": "integer", "traits": { diff --git a/models/billing.json b/models/billing.json index 5b6c49d7f8..4e4f50ad2c 100644 --- a/models/billing.json +++ b/models/billing.json @@ -5,8 +5,35 @@ "type": "service", "version": "2023-09-07", "operations": [ + { + "target": "com.amazonaws.billing#CreateBillingView" + }, + { + "target": "com.amazonaws.billing#DeleteBillingView" + }, + { + "target": "com.amazonaws.billing#GetBillingView" + }, + { + "target": "com.amazonaws.billing#GetResourcePolicy" + }, { "target": "com.amazonaws.billing#ListBillingViews" + }, + { + "target": "com.amazonaws.billing#ListSourceViewsForBillingView" + }, + { + "target": "com.amazonaws.billing#ListTagsForResource" + }, + { + "target": "com.amazonaws.billing#TagResource" + }, + { + "target": "com.amazonaws.billing#UntagResource" + }, + { + "target": "com.amazonaws.billing#UpdateBillingView" } ], "traits": { @@ -543,7 +570,86 @@ "com.amazonaws.billing#BillingViewArn": { "type": "string", "traits": { - "smithy.api#pattern": "^arn:aws[a-z-]*:(billing)::[0-9]{12}:billingview/[a-zA-Z0-9_\\+=\\.\\-@]{1,43}$" + "smithy.api#pattern": "^arn:aws[a-z-]*:(billing)::[0-9]{12}:billingview/[a-zA-Z0-9/:_\\+=\\.\\-@]{0,59}[a-zA-Z0-9]$" + } + }, + "com.amazonaws.billing#BillingViewArnList": { + "type": "list", + "member": { + "target": "com.amazonaws.billing#BillingViewArn" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 10 + } + } + }, + "com.amazonaws.billing#BillingViewDescription": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 1024 + }, + "smithy.api#pattern": "^([ a-zA-Z0-9_\\+=\\.\\-@]+)?$", + "smithy.api#sensitive": {} + } + }, + "com.amazonaws.billing#BillingViewElement": { + "type": "structure", + "members": { + "arn": { + "target": "com.amazonaws.billing#BillingViewArn", + "traits": { + "smithy.api#documentation": "

\n The Amazon Resource Name (ARN) that can be used to uniquely identify the billing view.\n

" + } + }, + "name": { + "target": "com.amazonaws.billing#BillingViewName", + "traits": { + "smithy.api#documentation": "

\n A list of names of the billing view.\n

" + } + }, + "description": { + "target": "com.amazonaws.billing#BillingViewDescription", + "traits": { + "smithy.api#documentation": "

\n The description of the billing view.\n

" + } + }, + "billingViewType": { + "target": "com.amazonaws.billing#BillingViewType", + "traits": { + "smithy.api#documentation": "

The type of billing group.\n

" + } + }, + "ownerAccountId": { + "target": "com.amazonaws.billing#AccountId", + "traits": { + "smithy.api#documentation": "

\n The list of owners of the billing view.\n

" + } + }, + "dataFilterExpression": { + "target": "com.amazonaws.billing#Expression", + "traits": { + "smithy.api#documentation": "

\n See Expression. Billing view only supports LINKED_ACCOUNT and Tags.\n

" + } + }, + "createdAt": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The time when the billing view was created.\n

" + } + }, + "updatedAt": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The time when the billing view was last updated.\n

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The metadata associated to the billing view.\n

" } }, "com.amazonaws.billing#BillingViewList": { @@ -567,6 +673,12 @@ "smithy.api#documentation": "

\n A list of names of the Billing view.\n

" } }, + "description": { + "target": "com.amazonaws.billing#BillingViewDescription", + "traits": { + "smithy.api#documentation": "

\n The description of the billing view.\n

" + } + }, "ownerAccountId": { "target": "com.amazonaws.billing#AccountId", "traits": { @@ -587,10 +699,26 @@ "com.amazonaws.billing#BillingViewName": { "type": "string", "traits": { + "smithy.api#length": { + "min": 1, + "max": 128 + }, "smithy.api#pattern": "^[ a-zA-Z0-9_\\+=\\.\\-@]+$", "smithy.api#sensitive": {} } }, + "com.amazonaws.billing#BillingViewSourceViewsList": { + "type": "list", + "member": { + "target": "com.amazonaws.billing#BillingViewArn" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 1 + } + } + }, "com.amazonaws.billing#BillingViewType": { "type": "enum", "members": { @@ -605,9 +733,21 @@ "traits": { "smithy.api#enumValue": "BILLING_GROUP" } + }, + "CUSTOM": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "CUSTOM" + } } } }, + "com.amazonaws.billing#BillingViewTypeList": { + "type": "list", + "member": { + "target": "com.amazonaws.billing#BillingViewType" + } + }, "com.amazonaws.billing#BillingViewsMaxResults": { "type": "integer", "traits": { @@ -617,23 +757,13 @@ } } }, - "com.amazonaws.billing#ErrorMessage": { - "type": "string", - "traits": { - "smithy.api#length": { - "max": 1024 - } - } - }, - "com.amazonaws.billing#FieldName": { + "com.amazonaws.billing#ClientToken": { "type": "string", "traits": { - "smithy.api#length": { - "max": 100 - } + "smithy.api#pattern": "^[a-zA-Z0-9-]+$" } }, - "com.amazonaws.billing#InternalServerException": { + "com.amazonaws.billing#ConflictException": { "type": "structure", "members": { "message": { @@ -641,33 +771,53 @@ "traits": { "smithy.api#required": {} } + }, + "resourceId": { + "target": "com.amazonaws.billing#ResourceId", + "traits": { + "smithy.api#documentation": "

\n The identifier for the service resource associated with the request.\n

", + "smithy.api#required": {} + } + }, + "resourceType": { + "target": "com.amazonaws.billing#ResourceType", + "traits": { + "smithy.api#documentation": "

\n The type of resource associated with the request.\n

", + "smithy.api#required": {} + } } }, "traits": { "aws.protocols#awsQueryError": { - "code": "BillingInternalServer", - "httpResponseCode": 500 + "code": "BillingConflict", + "httpResponseCode": 409 }, - "smithy.api#documentation": "

The request processing failed because of an unknown error, exception, or failure.\n

", - "smithy.api#error": "server", - "smithy.api#httpError": 500 + "smithy.api#documentation": "

\n The requested operation would cause a conflict with the current state of a service resource associated with the request. Resolve the conflict before retrying this request.\n

", + "smithy.api#error": "client", + "smithy.api#httpError": 409 } }, - "com.amazonaws.billing#ListBillingViews": { + "com.amazonaws.billing#CreateBillingView": { "type": "operation", "input": { - "target": "com.amazonaws.billing#ListBillingViewsRequest" + "target": "com.amazonaws.billing#CreateBillingViewRequest" }, "output": { - "target": "com.amazonaws.billing#ListBillingViewsResponse" + "target": "com.amazonaws.billing#CreateBillingViewResponse" }, "errors": [ { "target": "com.amazonaws.billing#AccessDeniedException" }, + { + "target": "com.amazonaws.billing#ConflictException" + }, { "target": "com.amazonaws.billing#InternalServerException" }, + { + "target": "com.amazonaws.billing#ServiceQuotaExceededException" + }, { "target": "com.amazonaws.billing#ThrottlingException" }, @@ -676,97 +826,75 @@ } ], "traits": { - "smithy.api#documentation": "

Lists the billing views available for a given time period.\n

\n

Every Amazon Web Services account has a unique PRIMARY billing view that represents the billing data available by default. Accounts that use Billing Conductor also have BILLING_GROUP billing views representing pro forma costs associated with each created billing group.

", + "smithy.api#documentation": "

\nCreates a billing view with the specified billing view attributes.\n

", "smithy.api#examples": [ { - "title": "Invoke ListBillingViews", + "title": "Invoke CreateBillingView", "input": { - "activeTimeRange": { - "activeAfterInclusive": 1719792000, - "activeBeforeInclusive": 1.722470399999E9 + "name": "Example Custom Billing View", + "sourceViews": [ + "arn:aws:billing::123456789101:billingview/primary" + ], + "description": "Custom Billing View Example", + "dataFilterExpression": { + "dimensions": { + "key": "LINKED_ACCOUNT", + "values": [ + "000000000000" + ] + } } }, "output": { - "billingViews": [ - { - "arn": "arn:aws:billing::123456789101:billingview/primary", - "billingViewType": "PRIMARY", - "name": "Primary Billing View Account 123456789101", - "ownerAccountId": "123456789101" - } - ] + "arn": "arn:aws:billing::123456789101:billingview/custom-46f47cb2-a11d-43f3-983d-470b5708a899", + "createdAt": 1719792001 } - }, - { - "title": "Error example for ListBillingViews", - "input": { - "activeTimeRange": { - "activeAfterInclusive": 1719792001, - "activeBeforeInclusive": 1719792000 - } - }, - "error": { - "shapeId": "com.amazonaws.billing#ValidationException", - "content": { - "message": "Failed to get billing view data for an invalid time range.", - "reason": "other" - } - }, - "allowConstraintErrors": true } ], - "smithy.api#http": { - "method": "POST", - "uri": "/", - "code": 200 - }, - "smithy.api#paginated": { - "inputToken": "nextToken", - "outputToken": "nextToken", - "pageSize": "maxResults", - "items": "billingViews" - }, - "smithy.api#readonly": {}, - "smithy.test#smokeTests": [ - { - "id": "ListBillingViewsSuccess", - "params": { - "activeTimeRange": { - "activeAfterInclusive": 1719792000, - "activeBeforeInclusive": 1.722470399999E9 - } - }, - "expect": { - "success": {} - }, - "vendorParamsShape": "aws.test#AwsVendorParams", - "vendorParams": { - "region": "us-east-1" - } - } - ] + "smithy.api#idempotent": {} } }, - "com.amazonaws.billing#ListBillingViewsRequest": { + "com.amazonaws.billing#CreateBillingViewRequest": { "type": "structure", "members": { - "activeTimeRange": { - "target": "com.amazonaws.billing#ActiveTimeRange", + "name": { + "target": "com.amazonaws.billing#BillingViewName", "traits": { - "smithy.api#documentation": "

\n The time range for the billing views listed. PRIMARY billing view is always listed. BILLING_GROUP billing views are listed for time ranges when the associated billing group resource in Billing Conductor is active. The time range must be within one calendar month.\n

", + "smithy.api#documentation": "

\n The name of the billing view.\n

", "smithy.api#required": {} } }, - "maxResults": { - "target": "com.amazonaws.billing#BillingViewsMaxResults", + "description": { + "target": "com.amazonaws.billing#BillingViewDescription", "traits": { - "smithy.api#documentation": "

The maximum number of billing views to retrieve. Default is 100.\n

" + "smithy.api#documentation": "

\n The description of the billing view.\n

" } }, - "nextToken": { - "target": "com.amazonaws.billing#PageToken", + "sourceViews": { + "target": "com.amazonaws.billing#BillingViewSourceViewsList", "traits": { - "smithy.api#documentation": "

The pagination token that is used on subsequent calls to list billing views.

" + "smithy.api#documentation": "

A list of billing views used as the data source for the custom billing view.

", + "smithy.api#required": {} + } + }, + "dataFilterExpression": { + "target": "com.amazonaws.billing#Expression", + "traits": { + "smithy.api#documentation": "

\n See Expression. Billing view only supports LINKED_ACCOUNT and Tags.\n

" + } + }, + "clientToken": { + "target": "com.amazonaws.billing#ClientToken", + "traits": { + "smithy.api#documentation": "

A unique, case-sensitive identifier you specify to ensure idempotency of the request. Idempotency ensures that an API request completes no more than one time. If the original request completes successfully, any subsequent retries complete successfully without performing any further actions with an idempotent request.\n

", + "smithy.api#httpHeader": "X-Amzn-Client-Token", + "smithy.api#idempotencyToken": {} + } + }, + "resourceTags": { + "target": "com.amazonaws.billing#ResourceTagList", + "traits": { + "smithy.api#documentation": "

A list of key value map specifying tags associated to the billing view being created.\n

" } } }, @@ -774,20 +902,20 @@ "smithy.api#input": {} } }, - "com.amazonaws.billing#ListBillingViewsResponse": { + "com.amazonaws.billing#CreateBillingViewResponse": { "type": "structure", "members": { - "billingViews": { - "target": "com.amazonaws.billing#BillingViewList", + "arn": { + "target": "com.amazonaws.billing#BillingViewArn", "traits": { - "smithy.api#documentation": "

A list of BillingViewListElement retrieved.

", + "smithy.api#documentation": "

\n The Amazon Resource Name (ARN) that can be used to uniquely identify the billing view.\n

", "smithy.api#required": {} } }, - "nextToken": { - "target": "com.amazonaws.billing#PageToken", + "createdAt": { + "target": "smithy.api#Timestamp", "traits": { - "smithy.api#documentation": "

The pagination token to use on subsequent calls to list billing views.\n

" + "smithy.api#documentation": "

\n The time when the billing view was created.\n

" } } }, @@ -795,35 +923,1166 @@ "smithy.api#output": {} } }, - "com.amazonaws.billing#PageToken": { - "type": "string", - "traits": { - "smithy.api#length": { - "min": 1, - "max": 2047 - } - } - }, - "com.amazonaws.billing#ThrottlingException": { - "type": "structure", - "members": { - "message": { - "target": "com.amazonaws.billing#ErrorMessage", - "traits": { - "smithy.api#required": {} - } - } + "com.amazonaws.billing#DeleteBillingView": { + "type": "operation", + "input": { + "target": "com.amazonaws.billing#DeleteBillingViewRequest" }, - "traits": { - "aws.protocols#awsQueryError": { - "code": "BillingThrottling", - "httpResponseCode": 429 + "output": { + "target": "com.amazonaws.billing#DeleteBillingViewResponse" + }, + "errors": [ + { + "target": "com.amazonaws.billing#AccessDeniedException" }, - "smithy.api#documentation": "

The request was denied due to request throttling.\n

", - "smithy.api#error": "client", + { + "target": "com.amazonaws.billing#ConflictException" + }, + { + "target": "com.amazonaws.billing#InternalServerException" + }, + { + "target": "com.amazonaws.billing#ThrottlingException" + }, + { + "target": "com.amazonaws.billing#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Deletes the specified billing view.

", + "smithy.api#examples": [ + { + "title": "Invoke DeleteBillingView", + "input": { + "arn": "arn:aws:billing::123456789101:billingview/custom-46f47cb2-a11d-43f3-983d-470b5708a899" + }, + "output": { + "arn": "arn:aws:billing::123456789101:billingview/custom-46f47cb2-a11d-43f3-983d-470b5708a899" + } + } + ], + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.billing#DeleteBillingViewRequest": { + "type": "structure", + "members": { + "arn": { + "target": "com.amazonaws.billing#BillingViewArn", + "traits": { + "smithy.api#documentation": "

\n The Amazon Resource Name (ARN) that can be used to uniquely identify the billing view.\n

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.billing#DeleteBillingViewResponse": { + "type": "structure", + "members": { + "arn": { + "target": "com.amazonaws.billing#BillingViewArn", + "traits": { + "smithy.api#documentation": "

\n The Amazon Resource Name (ARN) that can be used to uniquely identify the billing view.\n

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.billing#Dimension": { + "type": "enum", + "members": { + "LINKED_ACCOUNT": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "LINKED_ACCOUNT" + } + } + } + }, + "com.amazonaws.billing#DimensionValues": { + "type": "structure", + "members": { + "key": { + "target": "com.amazonaws.billing#Dimension", + "traits": { + "smithy.api#documentation": "

\n The names of the metadata types that you can use to filter and group your results. \n

", + "smithy.api#required": {} + } + }, + "values": { + "target": "com.amazonaws.billing#Values", + "traits": { + "smithy.api#documentation": "

\n The metadata values that you can use to filter and group your results. \n

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

\n The metadata that you can use to filter and group your results.\n

" + } + }, + "com.amazonaws.billing#ErrorMessage": { + "type": "string", + "traits": { + "smithy.api#length": { + "max": 1024 + } + } + }, + "com.amazonaws.billing#Expression": { + "type": "structure", + "members": { + "dimensions": { + "target": "com.amazonaws.billing#DimensionValues", + "traits": { + "smithy.api#documentation": "

\n The specific Dimension to use for Expression.\n

" + } + }, + "tags": { + "target": "com.amazonaws.billing#TagValues", + "traits": { + "smithy.api#documentation": "

\n The specific Tag to use for Expression.\n

" + } + } + }, + "traits": { + "smithy.api#documentation": "

\n See Expression. Billing view only supports LINKED_ACCOUNT and Tags.\n

" + } + }, + "com.amazonaws.billing#FieldName": { + "type": "string", + "traits": { + "smithy.api#length": { + "max": 100 + } + } + }, + "com.amazonaws.billing#GetBillingView": { + "type": "operation", + "input": { + "target": "com.amazonaws.billing#GetBillingViewRequest" + }, + "output": { + "target": "com.amazonaws.billing#GetBillingViewResponse" + }, + "errors": [ + { + "target": "com.amazonaws.billing#AccessDeniedException" + }, + { + "target": "com.amazonaws.billing#InternalServerException" + }, + { + "target": "com.amazonaws.billing#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.billing#ThrottlingException" + }, + { + "target": "com.amazonaws.billing#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Returns the metadata associated to the specified billing view ARN.\n

", + "smithy.api#examples": [ + { + "title": "Invoke GetBillingView", + "input": { + "arn": "arn:aws:billing::123456789101:billingview/custom-46f47cb2-a11d-43f3-983d-470b5708a899" + }, + "output": { + "billingView": { + "arn": "arn:aws:billing::123456789101:billingview/custom-46f47cb2-a11d-43f3-983d-470b5708a899", + "name": "Example Custom Billing View", + "description": "Custom Billing View Example -- updated description", + "dataFilterExpression": { + "dimensions": { + "key": "LINKED_ACCOUNT", + "values": [ + "000000000000" + ] + } + }, + "ownerAccountId": "123456789101", + "billingViewType": "CUSTOM" + } + } + } + ], + "smithy.api#readonly": {} + } + }, + "com.amazonaws.billing#GetBillingViewRequest": { + "type": "structure", + "members": { + "arn": { + "target": "com.amazonaws.billing#BillingViewArn", + "traits": { + "smithy.api#documentation": "

\n The Amazon Resource Name (ARN) that can be used to uniquely identify the billing view.\n

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.billing#GetBillingViewResponse": { + "type": "structure", + "members": { + "billingView": { + "target": "com.amazonaws.billing#BillingViewElement", + "traits": { + "smithy.api#documentation": "

The billing view element associated with the specified ARN.\n

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.billing#GetResourcePolicy": { + "type": "operation", + "input": { + "target": "com.amazonaws.billing#GetResourcePolicyRequest" + }, + "output": { + "target": "com.amazonaws.billing#GetResourcePolicyResponse" + }, + "errors": [ + { + "target": "com.amazonaws.billing#AccessDeniedException" + }, + { + "target": "com.amazonaws.billing#InternalServerException" + }, + { + "target": "com.amazonaws.billing#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.billing#ThrottlingException" + }, + { + "target": "com.amazonaws.billing#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Returns the resource-based policy document attached to the resource in JSON format.\n

", + "smithy.api#examples": [ + { + "title": "Invoke GetResourcePolicy", + "input": { + "resourceArn": "arn:aws:billing::123456789101:billingview/custom-46f47cb2-a11d-43f3-983d-470b5708a899" + }, + "output": { + "resourceArn": "arn:aws:billing::123456789101:billingview/custom-46f47cb2-a11d-43f3-983d-470b5708a899", + "policy": "{\"Version\":\"2012-10-17\",\"Id\":\"46f47cb2-a11d-43f3-983d-470b5708a899\",\"Statement\":[{\"Sid\":\"ExampleStatement1\",\"Effect\":\"Allow\",\"Principal\":{\"AWS\":\"arn:aws:iam::000000000000:root\"},\"Action\":[\"ce:GetDimensionValues\",\"ce:GetCostAndUsageWithResources\",\"ce:GetCostAndUsage\",\"ce:GetCostForecast\",\"ce:GetTags\",\"ce:GetUsageForecast\",\"ce:GetCostCategories\",\"billing:GetBillingView\"],\"Resource\":\"arn:aws:billing::123456789101:billingview/custom-46f47cb2-a11d-43f3-983d-470b5708a899\"}]}" + } + } + ], + "smithy.api#readonly": {} + } + }, + "com.amazonaws.billing#GetResourcePolicyRequest": { + "type": "structure", + "members": { + "resourceArn": { + "target": "com.amazonaws.billing#ResourceArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the billing view resource to which the policy is attached to.\n

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.billing#GetResourcePolicyResponse": { + "type": "structure", + "members": { + "resourceArn": { + "target": "com.amazonaws.billing#ResourceArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the billing view resource to which the policy is attached to.\n

", + "smithy.api#required": {} + } + }, + "policy": { + "target": "com.amazonaws.billing#PolicyDocument", + "traits": { + "smithy.api#documentation": "

The resource-based policy document attached to the resource in JSON format.\n

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.billing#InternalServerException": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.billing#ErrorMessage", + "traits": { + "smithy.api#required": {} + } + } + }, + "traits": { + "aws.protocols#awsQueryError": { + "code": "BillingInternalServer", + "httpResponseCode": 500 + }, + "smithy.api#documentation": "

The request processing failed because of an unknown error, exception, or failure.\n

", + "smithy.api#error": "server", + "smithy.api#httpError": 500 + } + }, + "com.amazonaws.billing#ListBillingViews": { + "type": "operation", + "input": { + "target": "com.amazonaws.billing#ListBillingViewsRequest" + }, + "output": { + "target": "com.amazonaws.billing#ListBillingViewsResponse" + }, + "errors": [ + { + "target": "com.amazonaws.billing#AccessDeniedException" + }, + { + "target": "com.amazonaws.billing#InternalServerException" + }, + { + "target": "com.amazonaws.billing#ThrottlingException" + }, + { + "target": "com.amazonaws.billing#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Lists the billing views available for a given time period.\n

\n

Every Amazon Web Services account has a unique PRIMARY billing view that represents the billing data available by default. Accounts that use Billing Conductor also have BILLING_GROUP billing views representing pro forma costs associated with each created billing group.

", + "smithy.api#examples": [ + { + "title": "Invoke ListBillingViews", + "input": { + "activeTimeRange": { + "activeAfterInclusive": 1719792000, + "activeBeforeInclusive": 1.722470399999E9 + } + }, + "output": { + "billingViews": [ + { + "arn": "arn:aws:billing::123456789101:billingview/primary", + "billingViewType": "PRIMARY", + "name": "Primary Billing View Account 123456789101", + "ownerAccountId": "123456789101" + } + ] + } + }, + { + "title": "Error example for ListBillingViews", + "input": { + "activeTimeRange": { + "activeAfterInclusive": 1719792001, + "activeBeforeInclusive": 1719792000 + } + }, + "error": { + "shapeId": "com.amazonaws.billing#ValidationException", + "content": { + "message": "Failed to get billing view data for an invalid time range.", + "reason": "other" + } + }, + "allowConstraintErrors": true + } + ], + "smithy.api#http": { + "method": "POST", + "uri": "/", + "code": 200 + }, + "smithy.api#paginated": { + "inputToken": "nextToken", + "outputToken": "nextToken", + "pageSize": "maxResults", + "items": "billingViews" + }, + "smithy.api#readonly": {}, + "smithy.test#smokeTests": [ + { + "id": "ListBillingViewsSuccess", + "params": { + "activeTimeRange": { + "activeAfterInclusive": 1719792000, + "activeBeforeInclusive": 1.722470399999E9 + } + }, + "expect": { + "success": {} + }, + "vendorParamsShape": "aws.test#AwsVendorParams", + "vendorParams": { + "region": "us-east-1" + } + } + ] + } + }, + "com.amazonaws.billing#ListBillingViewsRequest": { + "type": "structure", + "members": { + "activeTimeRange": { + "target": "com.amazonaws.billing#ActiveTimeRange", + "traits": { + "smithy.api#documentation": "

\n The time range for the billing views listed. PRIMARY billing view is always listed. BILLING_GROUP billing views are listed for time ranges when the associated billing group resource in Billing Conductor is active. The time range must be within one calendar month.\n

" + } + }, + "arns": { + "target": "com.amazonaws.billing#BillingViewArnList", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) that can be used to uniquely identify the billing view.\n

" + } + }, + "billingViewTypes": { + "target": "com.amazonaws.billing#BillingViewTypeList", + "traits": { + "smithy.api#documentation": "

The type of billing view.

" + } + }, + "ownerAccountId": { + "target": "com.amazonaws.billing#AccountId", + "traits": { + "smithy.api#documentation": "

\n The list of owners of the billing view.\n

" + } + }, + "maxResults": { + "target": "com.amazonaws.billing#BillingViewsMaxResults", + "traits": { + "smithy.api#documentation": "

The maximum number of billing views to retrieve. Default is 100.\n

" + } + }, + "nextToken": { + "target": "com.amazonaws.billing#PageToken", + "traits": { + "smithy.api#documentation": "

The pagination token that is used on subsequent calls to list billing views.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.billing#ListBillingViewsResponse": { + "type": "structure", + "members": { + "billingViews": { + "target": "com.amazonaws.billing#BillingViewList", + "traits": { + "smithy.api#documentation": "

A list of BillingViewListElement retrieved.

", + "smithy.api#required": {} + } + }, + "nextToken": { + "target": "com.amazonaws.billing#PageToken", + "traits": { + "smithy.api#documentation": "

The pagination token to use on subsequent calls to list billing views.\n

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.billing#ListSourceViewsForBillingView": { + "type": "operation", + "input": { + "target": "com.amazonaws.billing#ListSourceViewsForBillingViewRequest" + }, + "output": { + "target": "com.amazonaws.billing#ListSourceViewsForBillingViewResponse" + }, + "errors": [ + { + "target": "com.amazonaws.billing#AccessDeniedException" + }, + { + "target": "com.amazonaws.billing#InternalServerException" + }, + { + "target": "com.amazonaws.billing#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.billing#ThrottlingException" + }, + { + "target": "com.amazonaws.billing#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Lists the source views (managed Amazon Web Services billing views) associated with the billing view.\n

", + "smithy.api#examples": [ + { + "title": "Invoke ListSourceViewsForBillingView", + "input": { + "arn": "arn:aws:billing::123456789101:billingview/custom-46f47cb2-a11d-43f3-983d-470b5708a899" + }, + "output": { + "sourceViews": [ + "arn:aws:billing::123456789101:billingview/primary" + ] + } + } + ], + "smithy.api#paginated": { + "inputToken": "nextToken", + "outputToken": "nextToken", + "pageSize": "maxResults", + "items": "sourceViews" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.billing#ListSourceViewsForBillingViewRequest": { + "type": "structure", + "members": { + "arn": { + "target": "com.amazonaws.billing#BillingViewArn", + "traits": { + "smithy.api#documentation": "

\n The Amazon Resource Name (ARN) that can be used to uniquely identify the billing view.\n

", + "smithy.api#required": {} + } + }, + "maxResults": { + "target": "com.amazonaws.billing#BillingViewsMaxResults", + "traits": { + "smithy.api#documentation": "

\n The number of entries a paginated response contains.\n

" + } + }, + "nextToken": { + "target": "com.amazonaws.billing#PageToken", + "traits": { + "smithy.api#documentation": "

\n The pagination token that is used on subsequent calls to list billing views.\n

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.billing#ListSourceViewsForBillingViewResponse": { + "type": "structure", + "members": { + "sourceViews": { + "target": "com.amazonaws.billing#BillingViewSourceViewsList", + "traits": { + "smithy.api#documentation": "

A list of billing views used as the data source for the custom billing view.\n

", + "smithy.api#required": {} + } + }, + "nextToken": { + "target": "com.amazonaws.billing#PageToken", + "traits": { + "smithy.api#documentation": "

\n The pagination token that is used on subsequent calls to list billing views.\n

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.billing#ListTagsForResource": { + "type": "operation", + "input": { + "target": "com.amazonaws.billing#ListTagsForResourceRequest" + }, + "output": { + "target": "com.amazonaws.billing#ListTagsForResourceResponse" + }, + "errors": [ + { + "target": "com.amazonaws.billing#AccessDeniedException" + }, + { + "target": "com.amazonaws.billing#InternalServerException" + }, + { + "target": "com.amazonaws.billing#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.billing#ThrottlingException" + }, + { + "target": "com.amazonaws.billing#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Lists tags associated with the billing view resource.\n

", + "smithy.api#examples": [ + { + "title": "Invoke ListTagsForResource", + "input": { + "resourceArn": "arn:aws:billing::123456789101:billingview/custom-46f47cb2-a11d-43f3-983d-470b5708a899" + }, + "output": { + "resourceTags": [ + { + "key": "ExampleTagKey", + "value": "ExampleTagValue" + } + ] + } + } + ], + "smithy.api#readonly": {} + } + }, + "com.amazonaws.billing#ListTagsForResourceRequest": { + "type": "structure", + "members": { + "resourceArn": { + "target": "com.amazonaws.billing#ResourceArn", + "traits": { + "smithy.api#documentation": "

\n The Amazon Resource Name (ARN) of the resource. \n

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.billing#ListTagsForResourceResponse": { + "type": "structure", + "members": { + "resourceTags": { + "target": "com.amazonaws.billing#ResourceTagList", + "traits": { + "smithy.api#documentation": "

\n A list of tag key value pairs that are associated with the resource.\n

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.billing#PageToken": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 2047 + } + } + }, + "com.amazonaws.billing#PolicyDocument": { + "type": "string" + }, + "com.amazonaws.billing#QuotaCode": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 1024 + } + } + }, + "com.amazonaws.billing#ResourceArn": { + "type": "string", + "traits": { + "smithy.api#pattern": "^arn:aws[a-z-]*:(billing)::[0-9]{12}:[a-zA-Z0-9/:_\\+=\\.\\@-]{0,70}[a-zA-Z0-9]$" + } + }, + "com.amazonaws.billing#ResourceId": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 1024 + } + } + }, + "com.amazonaws.billing#ResourceNotFoundException": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.billing#ErrorMessage", + "traits": { + "smithy.api#required": {} + } + }, + "resourceId": { + "target": "com.amazonaws.billing#ResourceId", + "traits": { + "smithy.api#documentation": "

\n Value is a list of resource IDs that were not found.\n

", + "smithy.api#required": {} + } + }, + "resourceType": { + "target": "com.amazonaws.billing#ResourceType", + "traits": { + "smithy.api#documentation": "

\n Value is the type of resource that was not found.\n

", + "smithy.api#required": {} + } + } + }, + "traits": { + "aws.protocols#awsQueryError": { + "code": "BillingResourceNotFound", + "httpResponseCode": 404 + }, + "smithy.api#documentation": "

\n The specified ARN in the request doesn't exist.\n

", + "smithy.api#error": "client", + "smithy.api#httpError": 404 + } + }, + "com.amazonaws.billing#ResourceTag": { + "type": "structure", + "members": { + "key": { + "target": "com.amazonaws.billing#ResourceTagKey", + "traits": { + "smithy.api#documentation": "

\n The key that's associated with the tag.\n

", + "smithy.api#required": {} + } + }, + "value": { + "target": "com.amazonaws.billing#ResourceTagValue", + "traits": { + "smithy.api#documentation": "

\n The value that's associated with the tag.\n

" + } + } + }, + "traits": { + "smithy.api#documentation": "

\n The tag structure that contains a tag key and value.\n

" + } + }, + "com.amazonaws.billing#ResourceTagKey": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 128 + } + } + }, + "com.amazonaws.billing#ResourceTagKeyList": { + "type": "list", + "member": { + "target": "com.amazonaws.billing#ResourceTagKey" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 200 + } + } + }, + "com.amazonaws.billing#ResourceTagList": { + "type": "list", + "member": { + "target": "com.amazonaws.billing#ResourceTag" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 200 + } + } + }, + "com.amazonaws.billing#ResourceTagValue": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 256 + } + } + }, + "com.amazonaws.billing#ResourceType": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 1024 + } + } + }, + "com.amazonaws.billing#ServiceCode": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 1024 + } + } + }, + "com.amazonaws.billing#ServiceQuotaExceededException": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.billing#ErrorMessage", + "traits": { + "smithy.api#required": {} + } + }, + "resourceId": { + "target": "com.amazonaws.billing#ResourceId", + "traits": { + "smithy.api#documentation": "

\n The ID of the resource.\n

", + "smithy.api#required": {} + } + }, + "resourceType": { + "target": "com.amazonaws.billing#ResourceType", + "traits": { + "smithy.api#documentation": "

\n The type of Amazon Web Services resource.\n

", + "smithy.api#required": {} + } + }, + "serviceCode": { + "target": "com.amazonaws.billing#ServiceCode", + "traits": { + "smithy.api#documentation": "

\n The container for the serviceCode.\n

", + "smithy.api#required": {} + } + }, + "quotaCode": { + "target": "com.amazonaws.billing#QuotaCode", + "traits": { + "smithy.api#documentation": "

\n The container for the quotaCode.\n

", + "smithy.api#required": {} + } + } + }, + "traits": { + "aws.protocols#awsQueryError": { + "code": "BillingServiceQuotaExceeded", + "httpResponseCode": 402 + }, + "smithy.api#documentation": "

\n You've reached the limit of resources you can create, or exceeded the size of an individual resource.\n

", + "smithy.api#error": "client", + "smithy.api#httpError": 402 + } + }, + "com.amazonaws.billing#TagKey": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 1024 + }, + "smithy.api#pattern": "^[\\S\\s]*$" + } + }, + "com.amazonaws.billing#TagResource": { + "type": "operation", + "input": { + "target": "com.amazonaws.billing#TagResourceRequest" + }, + "output": { + "target": "com.amazonaws.billing#TagResourceResponse" + }, + "errors": [ + { + "target": "com.amazonaws.billing#AccessDeniedException" + }, + { + "target": "com.amazonaws.billing#InternalServerException" + }, + { + "target": "com.amazonaws.billing#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.billing#ThrottlingException" + }, + { + "target": "com.amazonaws.billing#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

\n An API operation for adding one or more tags (key-value pairs) to a resource.\n

", + "smithy.api#examples": [ + { + "title": "Invoke TagResource", + "input": { + "resourceArn": "arn:aws:billing::123456789101:billingview/custom-46f47cb2-a11d-43f3-983d-470b5708a899", + "resourceTags": [ + { + "key": "ExampleTagKey", + "value": "ExampleTagValue" + } + ] + }, + "output": {} + } + ] + } + }, + "com.amazonaws.billing#TagResourceRequest": { + "type": "structure", + "members": { + "resourceArn": { + "target": "com.amazonaws.billing#ResourceArn", + "traits": { + "smithy.api#documentation": "

\n The Amazon Resource Name (ARN) of the resource.\n

", + "smithy.api#required": {} + } + }, + "resourceTags": { + "target": "com.amazonaws.billing#ResourceTagList", + "traits": { + "smithy.api#documentation": "

\n A list of tag key value pairs that are associated with the resource.\n

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.billing#TagResourceResponse": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.billing#TagValues": { + "type": "structure", + "members": { + "key": { + "target": "com.amazonaws.billing#TagKey", + "traits": { + "smithy.api#documentation": "

\n The key for the tag.\n

", + "smithy.api#required": {} + } + }, + "values": { + "target": "com.amazonaws.billing#Values", + "traits": { + "smithy.api#documentation": "

\n The specific value of the tag.\n

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

\n The values that are available for a tag.\n

" + } + }, + "com.amazonaws.billing#ThrottlingException": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.billing#ErrorMessage", + "traits": { + "smithy.api#required": {} + } + } + }, + "traits": { + "aws.protocols#awsQueryError": { + "code": "BillingThrottling", + "httpResponseCode": 429 + }, + "smithy.api#documentation": "

The request was denied due to request throttling.\n

", + "smithy.api#error": "client", "smithy.api#httpError": 429 } }, + "com.amazonaws.billing#UntagResource": { + "type": "operation", + "input": { + "target": "com.amazonaws.billing#UntagResourceRequest" + }, + "output": { + "target": "com.amazonaws.billing#UntagResourceResponse" + }, + "errors": [ + { + "target": "com.amazonaws.billing#AccessDeniedException" + }, + { + "target": "com.amazonaws.billing#InternalServerException" + }, + { + "target": "com.amazonaws.billing#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.billing#ThrottlingException" + }, + { + "target": "com.amazonaws.billing#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

\n Removes one or more tags from a resource. Specify only tag keys in your request. Don't specify the value.\n

", + "smithy.api#examples": [ + { + "title": "Invoke UntagResource", + "input": { + "resourceArn": "arn:aws:billing::123456789101:billingview/custom-46f47cb2-a11d-43f3-983d-470b5708a899", + "resourceTagKeys": [ + "ExampleTagKey" + ] + }, + "output": {} + } + ] + } + }, + "com.amazonaws.billing#UntagResourceRequest": { + "type": "structure", + "members": { + "resourceArn": { + "target": "com.amazonaws.billing#ResourceArn", + "traits": { + "smithy.api#documentation": "

\n The Amazon Resource Name (ARN) of the resource.\n

", + "smithy.api#required": {} + } + }, + "resourceTagKeys": { + "target": "com.amazonaws.billing#ResourceTagKeyList", + "traits": { + "smithy.api#documentation": "

\n A list of tag key value pairs that are associated with the resource.\n

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.billing#UntagResourceResponse": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.billing#UpdateBillingView": { + "type": "operation", + "input": { + "target": "com.amazonaws.billing#UpdateBillingViewRequest" + }, + "output": { + "target": "com.amazonaws.billing#UpdateBillingViewResponse" + }, + "errors": [ + { + "target": "com.amazonaws.billing#AccessDeniedException" + }, + { + "target": "com.amazonaws.billing#ConflictException" + }, + { + "target": "com.amazonaws.billing#InternalServerException" + }, + { + "target": "com.amazonaws.billing#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.billing#ServiceQuotaExceededException" + }, + { + "target": "com.amazonaws.billing#ThrottlingException" + }, + { + "target": "com.amazonaws.billing#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

An API to update the attributes of the billing view.\n

", + "smithy.api#examples": [ + { + "title": "Invoke UpdateBillingView", + "input": { + "name": "Example Custom Billing View", + "arn": "arn:aws:billing::123456789101:billingview/custom-46f47cb2-a11d-43f3-983d-470b5708a899", + "description": "Custom Billing View Example -- updated description", + "dataFilterExpression": { + "dimensions": { + "key": "LINKED_ACCOUNT", + "values": [ + "000000000000" + ] + } + } + }, + "output": { + "arn": "arn:aws:billing::123456789101:billingview/custom-46f47cb2-a11d-43f3-983d-470b5708a899", + "updatedAt": 1719792001 + } + } + ], + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.billing#UpdateBillingViewRequest": { + "type": "structure", + "members": { + "arn": { + "target": "com.amazonaws.billing#BillingViewArn", + "traits": { + "smithy.api#documentation": "

\n The Amazon Resource Name (ARN) that can be used to uniquely identify the billing view.\n

", + "smithy.api#required": {} + } + }, + "name": { + "target": "com.amazonaws.billing#BillingViewName", + "traits": { + "smithy.api#documentation": "

\n The name of the billing view.\n

" + } + }, + "description": { + "target": "com.amazonaws.billing#BillingViewDescription", + "traits": { + "smithy.api#documentation": "

\n The description of the billing view.\n

" + } + }, + "dataFilterExpression": { + "target": "com.amazonaws.billing#Expression", + "traits": { + "smithy.api#documentation": "

See Expression. Billing view only supports LINKED_ACCOUNT and Tags.\n

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.billing#UpdateBillingViewResponse": { + "type": "structure", + "members": { + "arn": { + "target": "com.amazonaws.billing#BillingViewArn", + "traits": { + "smithy.api#documentation": "

\n The Amazon Resource Name (ARN) that can be used to uniquely identify the billing view.\n

", + "smithy.api#required": {} + } + }, + "updatedAt": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

\n The time when the billing view was last updated.\n

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.billing#ValidationException": { "type": "structure", "members": { @@ -913,6 +2172,28 @@ } } } + }, + "com.amazonaws.billing#Value": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 1024 + }, + "smithy.api#pattern": "^[\\S\\s]*$" + } + }, + "com.amazonaws.billing#Values": { + "type": "list", + "member": { + "target": "com.amazonaws.billing#Value" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 200 + } + } } } } \ No newline at end of file diff --git a/models/budgets.json b/models/budgets.json index 0fea2c0505..abf52523d0 100644 --- a/models/budgets.json +++ b/models/budgets.json @@ -340,6 +340,108 @@ }, "type": "endpoint" }, + { + "conditions": [ + { + "fn": "stringEquals", + "argv": [ + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "name" + ] + }, + "aws-iso" + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + false + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + false + ] + } + ], + "endpoint": { + "url": "https://budgets.c2s.ic.gov", + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "budgets", + "signingRegion": "us-iso-east-1" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [ + { + "fn": "stringEquals", + "argv": [ + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "name" + ] + }, + "aws-iso-b" + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + false + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + false + ] + } + ], + "endpoint": { + "url": "https://budgets.global.sc2s.sgov.gov", + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "budgets", + "signingRegion": "us-isob-east-1" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + }, { "conditions": [ { @@ -864,6 +966,28 @@ "UseDualStack": false } }, + { + "documentation": "For region aws-iso-global with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "budgets", + "signingRegion": "us-iso-east-1" + } + ] + }, + "url": "https://budgets.c2s.ic.gov" + } + }, + "params": { + "Region": "aws-iso-global", + "UseFIPS": false, + "UseDualStack": false + } + }, { "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", "expect": { @@ -903,7 +1027,16 @@ "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://budgets.us-iso-east-1.c2s.ic.gov" + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "budgets", + "signingRegion": "us-iso-east-1" + } + ] + }, + "url": "https://budgets.c2s.ic.gov" } }, "params": { @@ -912,6 +1045,28 @@ "UseDualStack": false } }, + { + "documentation": "For region aws-iso-b-global with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "budgets", + "signingRegion": "us-isob-east-1" + } + ] + }, + "url": "https://budgets.global.sc2s.sgov.gov" + } + }, + "params": { + "Region": "aws-iso-b-global", + "UseFIPS": false, + "UseDualStack": false + } + }, { "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", "expect": { @@ -951,7 +1106,16 @@ "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://budgets.us-isob-east-1.sc2s.sgov.gov" + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "budgets", + "signingRegion": "us-isob-east-1" + } + ] + }, + "url": "https://budgets.global.sc2s.sgov.gov" } }, "params": { diff --git a/models/cloudhsm-v2.json b/models/cloudhsm-v2.json index 18c34751c0..fcfcb58bb6 100644 --- a/models/cloudhsm-v2.json +++ b/models/cloudhsm-v2.json @@ -1437,6 +1437,12 @@ "smithy.api#documentation": "

The type of HSM that the cluster contains.

" } }, + "HsmTypeRollbackExpiration": { + "target": "com.amazonaws.cloudhsmv2#Timestamp", + "traits": { + "smithy.api#documentation": "

The timestamp until when the cluster can be rolled back to its original HSM type.

" + } + }, "PreCoPassword": { "target": "com.amazonaws.cloudhsmv2#PreCoPassword", "traits": { @@ -1482,7 +1488,7 @@ "NetworkType": { "target": "com.amazonaws.cloudhsmv2#NetworkType", "traits": { - "smithy.api#documentation": "

The cluster's NetworkType can be set to either IPV4 (which is the default) or DUALSTACK.\n When set to IPV4, communication between your application and the Hardware Security Modules (HSMs) is restricted to the IPv4 protocol only.\n In contrast, the DUALSTACK network type enables communication over both the IPv4 and IPv6 protocols.\n To use the DUALSTACK option, you'll need to configure your Virtual Private Cloud (VPC) and subnets to support both IPv4 and IPv6. This involves adding IPv6 Classless Inter-Domain Routing (CIDR) blocks to the existing IPv4 CIDR blocks in your subnets.\n The choice between IPV4 and DUALSTACK network types determines the flexibility of the network addressing setup for your cluster. The DUALSTACK option provides more flexibility by allowing both IPv4 and IPv6 communication.

" + "smithy.api#documentation": "

The cluster's NetworkType can be IPv4 (the default) or DUALSTACK.\n The IPv4 NetworkType restricts communication between your application and the hardware security modules (HSMs) to the IPv4 protocol only. The DUALSTACK NetworkType enables communication over both IPv4 and IPv6 protocols.\n To use DUALSTACK, configure your virtual private cloud (VPC) and subnets to support both IPv4 and IPv6.\n This configuration involves adding IPv6 Classless Inter-Domain Routing (CIDR) blocks to the existing IPv4 CIDR blocks in your subnets.\n The NetworkType you choose affects the network addressing options for your cluster. DUALSTACK provides more flexibility by supporting both IPv4 and IPv6 communication.

" } }, "Certificates": { @@ -2483,6 +2489,12 @@ "smithy.api#required": {} } }, + "HsmType": { + "target": "com.amazonaws.cloudhsmv2#HsmType", + "traits": { + "smithy.api#documentation": "

The type of HSM.

" + } + }, "State": { "target": "com.amazonaws.cloudhsmv2#HsmState", "traits": { @@ -2838,11 +2850,16 @@ "com.amazonaws.cloudhsmv2#ModifyClusterRequest": { "type": "structure", "members": { + "HsmType": { + "target": "com.amazonaws.cloudhsmv2#HsmType", + "traits": { + "smithy.api#documentation": "

The desired HSM type of the cluster.

" + } + }, "BackupRetentionPolicy": { "target": "com.amazonaws.cloudhsmv2#BackupRetentionPolicy", "traits": { - "smithy.api#documentation": "

A policy that defines how the service retains backups.

", - "smithy.api#required": {} + "smithy.api#documentation": "

A policy that defines how the service retains backups.

" } }, "ClusterId": { diff --git a/models/cloudtrail.json b/models/cloudtrail.json index cd57d4a614..0eb96f168e 100644 --- a/models/cloudtrail.json +++ b/models/cloudtrail.json @@ -251,7 +251,7 @@ } }, "traits": { - "smithy.api#documentation": "

Advanced event selectors let you create fine-grained selectors for CloudTrail management, data, and network activity events. They help you control costs by logging only those\n events that are important to you. For more information about configuring advanced event selectors, see\n the Logging data events, Logging network activity events, and Logging management events topics in the CloudTrail User Guide.

\n

You cannot apply both event selectors and advanced event selectors to a trail.

\n

For information about configurable advanced event selector fields, see \n AdvancedEventSelector \n in the CloudTrailUser Guide.

" + "smithy.api#documentation": "

Advanced event selectors let you create fine-grained selectors for CloudTrail management, data, and network activity events. They help you control costs by logging only those\n events that are important to you. For more information about configuring advanced event selectors, see\n the Logging data events, Logging network activity events, and Logging management events topics in the CloudTrail User Guide.

\n

You cannot apply both event selectors and advanced event selectors to a trail.

\n

For information about configurable advanced event selector fields, see \n AdvancedEventSelector \n in the CloudTrail API Reference.

" } }, "com.amazonaws.cloudtrail#AdvancedEventSelectors": { @@ -266,7 +266,7 @@ "Field": { "target": "com.amazonaws.cloudtrail#SelectorField", "traits": { - "smithy.api#documentation": "

A field in a CloudTrail event record on which to filter events to be logged. For\n event data stores for CloudTrail Insights events, Config configuration items, Audit Manager evidence, or events outside of Amazon Web Services, the field is used only for\n selecting events as filtering is not supported.

\n

For more information, see \n AdvancedFieldSelector \n in the CloudTrailUser Guide.

", + "smithy.api#documentation": "

A field in a CloudTrail event record on which to filter events to be logged. For\n event data stores for CloudTrail Insights events, Config configuration items, Audit Manager evidence, or events outside of Amazon Web Services, the field is used only for\n selecting events as filtering is not supported.

\n

For more information, see \n AdvancedFieldSelector \n in the CloudTrail API Reference.

\n \n

Selectors don't support the use of wildcards like * . To match multiple values with a single condition, \n you may use StartsWith, EndsWith, NotStartsWith, or NotEndsWith to explicitly match the beginning or end of the event field.

\n
", "smithy.api#required": {} } }, @@ -810,6 +810,9 @@ { "target": "com.amazonaws.cloudtrail#RestoreEventDataStore" }, + { + "target": "com.amazonaws.cloudtrail#SearchSampleQueries" + }, { "target": "com.amazonaws.cloudtrail#StartDashboardRefresh" }, @@ -4943,7 +4946,7 @@ } ], "traits": { - "smithy.api#documentation": "

Describes the settings for the Insights event selectors that you configured for your\n trail or event data store. GetInsightSelectors shows if CloudTrail Insights event logging\n is enabled on the trail or event data store, and if it is, which Insights types are enabled. If you run\n GetInsightSelectors on a trail or event data store that does not have Insights events enabled,\n the operation throws the exception InsightNotEnabledException\n

\n

Specify either the EventDataStore parameter to get Insights event selectors for an event data store, \n or the TrailName parameter to the get Insights event selectors for a trail. You cannot specify these parameters together.

\n

For more information, see Logging CloudTrail Insights events in the CloudTrail User Guide.

", + "smithy.api#documentation": "

Describes the settings for the Insights event selectors that you configured for your\n trail or event data store. GetInsightSelectors shows if CloudTrail Insights event logging\n is enabled on the trail or event data store, and if it is, which Insights types are enabled. If you run\n GetInsightSelectors on a trail or event data store that does not have Insights events enabled,\n the operation throws the exception InsightNotEnabledException\n

\n

Specify either the EventDataStore parameter to get Insights event selectors for an event data store, \n or the TrailName parameter to the get Insights event selectors for a trail. You cannot specify these parameters together.

\n

For more information, see Working with CloudTrail Insights in the CloudTrail User Guide.

", "smithy.api#idempotent": {} } }, @@ -8012,7 +8015,7 @@ } ], "traits": { - "smithy.api#documentation": "

Configures event selectors (also referred to as basic event selectors) or advanced event selectors for your trail. You can use\n either AdvancedEventSelectors or EventSelectors, but not both. If\n you apply AdvancedEventSelectors to a trail, any existing\n EventSelectors are overwritten.

\n

You can use AdvancedEventSelectors to \n log management events, data events for all resource types, and network activity events.

\n

You can use EventSelectors to log management events and data events for the following resource types:

\n
    \n
  • \n

    \n AWS::DynamoDB::Table\n

    \n
  • \n
  • \n

    \n AWS::Lambda::Function\n

    \n
  • \n
  • \n

    \n AWS::S3::Object\n

    \n
  • \n
\n

You can't use EventSelectors to log network activity events.

\n

If you want your trail to log Insights events, be sure the event selector or advanced event selector enables \n logging of the Insights event types you want configured for your trail. For more information about logging Insights events, see Logging Insights events in the CloudTrail User Guide.\n By default, trails created without specific event selectors are configured to\n log all read and write management events, and no data events or network activity events.

\n

When an event occurs in your account, CloudTrail evaluates the event selectors or\n advanced event selectors in all trails. For each trail, if the event matches any event\n selector, the trail processes and logs the event. If the event doesn't match any event\n selector, the trail doesn't log the event.

\n

Example

\n
    \n
  1. \n

    You create an event selector for a trail and specify that you want to log write-only\n events.

    \n
  2. \n
  3. \n

    The EC2 GetConsoleOutput and RunInstances API operations\n occur in your account.

    \n
  4. \n
  5. \n

    CloudTrail evaluates whether the events match your event selectors.

    \n
  6. \n
  7. \n

    The RunInstances is a write-only event and it matches your event\n selector. The trail logs the event.

    \n
  8. \n
  9. \n

    The GetConsoleOutput is a read-only event that doesn't match your\n event selector. The trail doesn't log the event.

    \n
  10. \n
\n

The PutEventSelectors operation must be called from the Region in which the\n trail was created; otherwise, an InvalidHomeRegionException exception is\n thrown.

\n

You can configure up to five event selectors for each trail.

\n

You can add advanced event selectors, and conditions for your advanced event selectors,\n up to a maximum of 500 values for all conditions and selectors on a trail. For more information, see\n Logging management events, Logging\n data events, Logging\n network activity events, and Quotas in CloudTrail in the CloudTrail User\n Guide.

", + "smithy.api#documentation": "

Configures event selectors (also referred to as basic event selectors) or advanced event selectors for your trail. You can use\n either AdvancedEventSelectors or EventSelectors, but not both. If\n you apply AdvancedEventSelectors to a trail, any existing\n EventSelectors are overwritten.

\n

You can use AdvancedEventSelectors to \n log management events, data events for all resource types, and network activity events.

\n

You can use EventSelectors to log management events and data events for the following resource types:

\n
    \n
  • \n

    \n AWS::DynamoDB::Table\n

    \n
  • \n
  • \n

    \n AWS::Lambda::Function\n

    \n
  • \n
  • \n

    \n AWS::S3::Object\n

    \n
  • \n
\n

You can't use EventSelectors to log network activity events.

\n

If you want your trail to log Insights events, be sure the event selector or advanced event selector enables \n logging of the Insights event types you want configured for your trail. For more information about logging Insights events, see Working with CloudTrail Insights in the CloudTrail User Guide.\n By default, trails created without specific event selectors are configured to\n log all read and write management events, and no data events or network activity events.

\n

When an event occurs in your account, CloudTrail evaluates the event selectors or\n advanced event selectors in all trails. For each trail, if the event matches any event\n selector, the trail processes and logs the event. If the event doesn't match any event\n selector, the trail doesn't log the event.

\n

Example

\n
    \n
  1. \n

    You create an event selector for a trail and specify that you want to log write-only\n events.

    \n
  2. \n
  3. \n

    The EC2 GetConsoleOutput and RunInstances API operations\n occur in your account.

    \n
  4. \n
  5. \n

    CloudTrail evaluates whether the events match your event selectors.

    \n
  6. \n
  7. \n

    The RunInstances is a write-only event and it matches your event\n selector. The trail logs the event.

    \n
  8. \n
  9. \n

    The GetConsoleOutput is a read-only event that doesn't match your\n event selector. The trail doesn't log the event.

    \n
  10. \n
\n

The PutEventSelectors operation must be called from the Region in which the\n trail was created; otherwise, an InvalidHomeRegionException exception is\n thrown.

\n

You can configure up to five event selectors for each trail.

\n

You can add advanced event selectors, and conditions for your advanced event selectors,\n up to a maximum of 500 values for all conditions and selectors on a trail. For more information, see\n Logging management events, Logging\n data events, Logging\n network activity events, and Quotas in CloudTrail in the CloudTrail User\n Guide.

", "smithy.api#idempotent": {} } }, @@ -8128,7 +8131,7 @@ } ], "traits": { - "smithy.api#documentation": "

Lets you enable Insights event logging by specifying the Insights selectors that you\n want to enable on an existing trail or event data store. You also use PutInsightSelectors to turn\n off Insights event logging, by passing an empty list of Insights types. The valid Insights\n event types are ApiErrorRateInsight and\n ApiCallRateInsight.

\n

To enable Insights on an event data store, you must specify the ARNs (or ID suffix of the ARNs) for the source event data store (EventDataStore) and the destination event data store (InsightsDestination). The source event data store logs management events and enables Insights. \n The destination event data store logs Insights events based upon the management event activity of the source event data store. The source and destination event data stores must belong to the same Amazon Web Services account.

\n

To log Insights events for a trail, you must specify the name (TrailName) of the CloudTrail trail for which you want to change or add Insights\n selectors.

\n

To log CloudTrail Insights events on API call volume, the trail or event data store\n must log write management events. To log CloudTrail\n Insights events on API error rate, the trail or event data store must log read or\n write management events. You can call GetEventSelectors on a trail \n to check whether the trail logs management events. You can call GetEventDataStore on an \n event data store to check whether the event data store logs management events.

\n

For more information, see Logging CloudTrail Insights events in the CloudTrail User Guide.

", + "smithy.api#documentation": "

Lets you enable Insights event logging by specifying the Insights selectors that you\n want to enable on an existing trail or event data store. You also use PutInsightSelectors to turn\n off Insights event logging, by passing an empty list of Insights types. The valid Insights\n event types are ApiErrorRateInsight and\n ApiCallRateInsight.

\n

To enable Insights on an event data store, you must specify the ARNs (or ID suffix of the ARNs) for the source event data store (EventDataStore) and the destination event data store (InsightsDestination). The source event data store logs management events and enables Insights. \n The destination event data store logs Insights events based upon the management event activity of the source event data store. The source and destination event data stores must belong to the same Amazon Web Services account.

\n

To log Insights events for a trail, you must specify the name (TrailName) of the CloudTrail trail for which you want to change or add Insights\n selectors.

\n

To log CloudTrail Insights events on API call volume, the trail or event data store\n must log write management events. To log CloudTrail\n Insights events on API error rate, the trail or event data store must log read or\n write management events. You can call GetEventSelectors on a trail \n to check whether the trail logs management events. You can call GetEventDataStore on an \n event data store to check whether the event data store logs management events.

\n

For more information, see Working with CloudTrail Insights in the CloudTrail User Guide.

", "smithy.api#idempotent": {} } }, @@ -9241,6 +9244,150 @@ "smithy.api#documentation": "

The settings for the source S3 bucket.

" } }, + "com.amazonaws.cloudtrail#SampleQueryDescription": { + "type": "string" + }, + "com.amazonaws.cloudtrail#SampleQueryName": { + "type": "string" + }, + "com.amazonaws.cloudtrail#SampleQueryRelevance": { + "type": "float", + "traits": { + "smithy.api#default": 0 + } + }, + "com.amazonaws.cloudtrail#SampleQuerySQL": { + "type": "string" + }, + "com.amazonaws.cloudtrail#SearchSampleQueries": { + "type": "operation", + "input": { + "target": "com.amazonaws.cloudtrail#SearchSampleQueriesRequest" + }, + "output": { + "target": "com.amazonaws.cloudtrail#SearchSampleQueriesResponse" + }, + "errors": [ + { + "target": "com.amazonaws.cloudtrail#InvalidParameterException" + }, + { + "target": "com.amazonaws.cloudtrail#OperationNotPermittedException" + }, + { + "target": "com.amazonaws.cloudtrail#UnsupportedOperationException" + } + ], + "traits": { + "smithy.api#documentation": "

\n Searches sample queries and returns a list of sample queries that are sorted by relevance. \n To search for sample queries, provide a natural language SearchPhrase in English.\n

", + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.cloudtrail#SearchSampleQueriesMaxResults": { + "type": "integer", + "traits": { + "smithy.api#range": { + "min": 1, + "max": 50 + } + } + }, + "com.amazonaws.cloudtrail#SearchSampleQueriesRequest": { + "type": "structure", + "members": { + "SearchPhrase": { + "target": "com.amazonaws.cloudtrail#SearchSampleQueriesSearchPhrase", + "traits": { + "smithy.api#documentation": "

\n The natural language phrase to use for the semantic search. The phrase must be in English. The length constraint is in characters, not words.

", + "smithy.api#required": {} + } + }, + "MaxResults": { + "target": "com.amazonaws.cloudtrail#SearchSampleQueriesMaxResults", + "traits": { + "smithy.api#documentation": "

\n The maximum number of results to return on a single page. The default value is 10.\n

" + } + }, + "NextToken": { + "target": "com.amazonaws.cloudtrail#PaginationToken", + "traits": { + "smithy.api#documentation": "

\n A token you can use to get the next page of results. The length constraint is in characters, not words.\n

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.cloudtrail#SearchSampleQueriesResponse": { + "type": "structure", + "members": { + "SearchResults": { + "target": "com.amazonaws.cloudtrail#SearchSampleQueriesSearchResults", + "traits": { + "smithy.api#documentation": "

\n A list of objects containing the search results ordered from most relevant to least relevant.\n

" + } + }, + "NextToken": { + "target": "com.amazonaws.cloudtrail#PaginationToken", + "traits": { + "smithy.api#documentation": "

\n A token you can use to get the next page of results.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.cloudtrail#SearchSampleQueriesSearchPhrase": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 2, + "max": 1000 + }, + "smithy.api#pattern": "^[ -~\\n]*$" + } + }, + "com.amazonaws.cloudtrail#SearchSampleQueriesSearchResult": { + "type": "structure", + "members": { + "Name": { + "target": "com.amazonaws.cloudtrail#SampleQueryName", + "traits": { + "smithy.api#documentation": "

\n The name of a sample query.\n

" + } + }, + "Description": { + "target": "com.amazonaws.cloudtrail#SampleQueryDescription", + "traits": { + "smithy.api#documentation": "

\n A longer description of a sample query.\n

" + } + }, + "SQL": { + "target": "com.amazonaws.cloudtrail#SampleQuerySQL", + "traits": { + "smithy.api#documentation": "

\n The SQL code of the sample query.\n

" + } + }, + "Relevance": { + "target": "com.amazonaws.cloudtrail#SampleQueryRelevance", + "traits": { + "smithy.api#default": 0, + "smithy.api#documentation": "

\n A value between 0 and 1 indicating the similarity between the search phrase and result.\n

" + } + } + }, + "traits": { + "smithy.api#documentation": "

\n A search result returned by the SearchSampleQueries operation.\n

" + } + }, + "com.amazonaws.cloudtrail#SearchSampleQueriesSearchResults": { + "type": "list", + "member": { + "target": "com.amazonaws.cloudtrail#SearchSampleQueriesSearchResult" + } + }, "com.amazonaws.cloudtrail#SelectorField": { "type": "string", "traits": { diff --git a/models/cloudwatch-logs.json b/models/cloudwatch-logs.json index d4a1f0549b..d2a383edfc 100644 --- a/models/cloudwatch-logs.json +++ b/models/cloudwatch-logs.json @@ -538,7 +538,7 @@ } ], "traits": { - "smithy.api#documentation": "

Associates the specified KMS key with either one log group in the account, or with all stored\n CloudWatch Logs query insights results in the account.

\n

When you use AssociateKmsKey, you specify either the logGroupName parameter\n or the resourceIdentifier parameter. You can't specify both of those parameters in the same operation.

\n
    \n
  • \n

    Specify the logGroupName parameter to cause all log events stored in the log group to\n be encrypted with that key. Only the log events ingested after the key is associated are encrypted with that key.

    \n

    Associating a KMS key with a log group overrides any existing\n associations between the log group and a KMS key. After a KMS key is associated with a log group, all newly ingested data for the log group is encrypted\n using the KMS key. This association is stored as long as the data encrypted\n with the KMS key is still within CloudWatch Logs. This enables CloudWatch Logs to decrypt this data whenever it is requested.

    \n

    Associating\n a key with a log group does not cause the results of queries of that log group to be encrypted with that key. To have query\n results encrypted with a KMS key, you must use an AssociateKmsKey operation with the resourceIdentifier\n parameter that specifies a query-result resource.

    \n
  • \n
  • \n

    Specify the resourceIdentifier parameter with a query-result resource, \n to use that key to encrypt the stored results of all future \n StartQuery\n operations in the account. The response from a \n GetQueryResults\n operation will still return\n the query results in plain text.

    \n

    Even if you have not associated a key with your query results, the query results are encrypted when stored,\n using the default CloudWatch Logs method.

    \n

    If you run a query from a monitoring account that queries logs in a source account, the\n query results key from the monitoring account, if any, is used.

    \n
  • \n
\n \n

If you delete the key that is used to encrypt log events or log group query results,\n then all the associated stored log events or query results that were encrypted with that key \n will be unencryptable and unusable.

\n
\n \n

CloudWatch Logs supports only symmetric KMS keys. Do not use an associate\n an asymmetric KMS key with your log group or query results. For more information, see Using\n Symmetric and Asymmetric Keys.

\n
\n

It can take up to 5 minutes for this operation to take effect.

\n

If you attempt to associate a KMS key with a log group but the KMS key does not exist or the KMS key is disabled, you receive an\n InvalidParameterException error.

" + "smithy.api#documentation": "

Associates the specified KMS key with either one log group in the account, or with all stored\n CloudWatch Logs query insights results in the account.

\n

When you use AssociateKmsKey, you specify either the logGroupName parameter\n or the resourceIdentifier parameter. You can't specify both of those parameters in the same operation.

\n
    \n
  • \n

    Specify the logGroupName parameter to cause log events ingested into that log group to\n be encrypted with that key. Only the log events ingested after the key is associated are encrypted with that key.

    \n

    Associating a KMS key with a log group overrides any existing\n associations between the log group and a KMS key. After a KMS key is associated with a log group, all newly ingested data for the log group is encrypted\n using the KMS key. This association is stored as long as the data encrypted\n with the KMS key is still within CloudWatch Logs. This enables CloudWatch Logs to decrypt this data whenever it is requested.

    \n

    Associating\n a key with a log group does not cause the results of queries of that log group to be encrypted with that key. To have query\n results encrypted with a KMS key, you must use an AssociateKmsKey operation with the resourceIdentifier\n parameter that specifies a query-result resource.

    \n
  • \n
  • \n

    Specify the resourceIdentifier parameter with a query-result resource, \n to use that key to encrypt the stored results of all future \n StartQuery\n operations in the account. The response from a \n GetQueryResults\n operation will still return\n the query results in plain text.

    \n

    Even if you have not associated a key with your query results, the query results are encrypted when stored,\n using the default CloudWatch Logs method.

    \n

    If you run a query from a monitoring account that queries logs in a source account, the\n query results key from the monitoring account, if any, is used.

    \n
  • \n
\n \n

If you delete the key that is used to encrypt log events or log group query results,\n then all the associated stored log events or query results that were encrypted with that key \n will be unencryptable and unusable.

\n
\n \n

CloudWatch Logs supports only symmetric KMS keys. Do not use an associate\n an asymmetric KMS key with your log group or query results. For more information, see Using\n Symmetric and Asymmetric Keys.

\n
\n

It can take up to 5 minutes for this operation to take effect.

\n

If you attempt to associate a KMS key with a log group but the KMS key does not exist or the KMS key is disabled, you receive an\n InvalidParameterException error.

" } }, "com.amazonaws.cloudwatchlogs#AssociateKmsKeyRequest": { @@ -985,7 +985,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates an export task so that you can efficiently export data from a log group to an\n Amazon S3 bucket. When you perform a CreateExportTask operation, you must use\n credentials that have permission to write to the S3 bucket that you specify as the\n destination.

\n

Exporting log data to S3 buckets that are encrypted by KMS is supported.\n Exporting log data to Amazon S3 buckets that have S3 Object Lock enabled with a\n retention period is also supported.

\n

Exporting to S3 buckets that are encrypted with AES-256 is supported.

\n

This is an asynchronous call. If all the required information is provided, this \n operation initiates an export task and responds with the ID of the task. After the task has started,\n you can use DescribeExportTasks to get the status of the export task. Each account can\n only have one active (RUNNING or PENDING) export task at a time.\n To cancel an export task, use CancelExportTask.

\n

You can export logs from multiple log groups or multiple time ranges to the same S3\n bucket. To separate log data for each export task, specify a prefix to be used as the Amazon\n S3 key prefix for all exported objects.

\n \n

Time-based sorting on chunks of log data inside an exported file is not guaranteed. You can\n sort the exported log field data by using Linux utilities.

\n
" + "smithy.api#documentation": "

Creates an export task so that you can efficiently export data from a log group to an\n Amazon S3 bucket. When you perform a CreateExportTask operation, you must use\n credentials that have permission to write to the S3 bucket that you specify as the\n destination.

\n

Exporting log data to S3 buckets that are encrypted by KMS is supported.\n Exporting log data to Amazon S3 buckets that have S3 Object Lock enabled with a\n retention period is also supported.

\n

Exporting to S3 buckets that are encrypted with AES-256 is supported.

\n

This is an asynchronous call. If all the required information is provided, this \n operation initiates an export task and responds with the ID of the task. After the task has started,\n you can use DescribeExportTasks to get the status of the export task. Each account can\n only have one active (RUNNING or PENDING) export task at a time.\n To cancel an export task, use CancelExportTask.

\n

You can export logs from multiple log groups or multiple time ranges to the same S3\n bucket. To separate log data for each export task, specify a prefix to be used as the Amazon\n S3 key prefix for all exported objects.

\n \n

We recommend that you don't regularly export to Amazon S3 as a way to continuously archive your logs. For that use case, we instaed recommend that \n you use subscriptions. For more information about subscriptions, see \n Real-time processing of log data with subscriptions.

\n
\n \n

Time-based sorting on chunks of log data inside an exported file is not guaranteed. You can\n sort the exported log field data by using Linux utilities.

\n
" } }, "com.amazonaws.cloudwatchlogs#CreateExportTaskRequest": { @@ -1493,7 +1493,7 @@ } ], "traits": { - "smithy.api#documentation": "

Deletes s delivery. A delivery is a connection between a logical delivery source and a logical\n delivery destination. Deleting a delivery only deletes the connection between the delivery source and delivery destination. It does\n not delete the delivery destination or the delivery source.

" + "smithy.api#documentation": "

Deletes a delivery. A delivery is a connection between a logical delivery source and a logical\n delivery destination. Deleting a delivery only deletes the connection between the delivery source and delivery destination. It does\n not delete the delivery destination or the delivery source.

" } }, "com.amazonaws.cloudwatchlogs#DeleteDeliveryDestination": { @@ -2522,7 +2522,7 @@ } ], "traits": { - "smithy.api#documentation": "

Returns a list of all CloudWatch Logs account policies in the account.

" + "smithy.api#documentation": "

Returns a list of all CloudWatch Logs account policies in the account.

\n

To use this operation, you must be signed on with the correct permissions depending on the type of policy that you are retrieving information for.

\n
    \n
  • \n

    To see data protection policies, you must have the logs:GetDataProtectionPolicy and \n logs:DescribeAccountPolicies permissions.

    \n
  • \n
  • \n

    To see subscription filter policies, you must have the logs:DescrubeSubscriptionFilters and \n logs:DescribeAccountPolicies permissions.

    \n
  • \n
  • \n

    To see transformer policies, you must have the logs:GetTransformer and logs:DescribeAccountPolicies permissions.

    \n
  • \n
  • \n

    To see field index policies, you must have the logs:DescribeIndexPolicies and \n logs:DescribeAccountPolicies permissions.

    \n
  • \n
" } }, "com.amazonaws.cloudwatchlogs#DescribeAccountPoliciesRequest": { @@ -3291,7 +3291,7 @@ } ], "traits": { - "smithy.api#documentation": "

Lists the log streams for the specified log group. \n You can list all the log streams or filter the results by prefix.\n You can also control how the results are ordered.

\n

You can specify the log group to search by using either logGroupIdentifier or logGroupName.\n You must include one of these two parameters, but you can't include both.\n

\n

This operation has a limit of five transactions per second, after which transactions are throttled.

\n

If you are using CloudWatch cross-account observability, you can use this operation in a monitoring account and \n view data from the linked source accounts. For more information, see \n CloudWatch cross-account observability.

", + "smithy.api#documentation": "

Lists the log streams for the specified log group. \n You can list all the log streams or filter the results by prefix.\n You can also control how the results are ordered.

\n

You can specify the log group to search by using either logGroupIdentifier or logGroupName.\n You must include one of these two parameters, but you can't include both.\n

\n

This operation has a limit of 25 transactions per second, after which transactions are throttled.

\n

If you are using CloudWatch cross-account observability, you can use this operation in a monitoring account and \n view data from the linked source accounts. For more information, see \n CloudWatch cross-account observability.

", "smithy.api#paginated": { "inputToken": "nextToken", "outputToken": "nextToken", @@ -8752,14 +8752,14 @@ "dataSourceRoleArn": { "target": "com.amazonaws.cloudwatchlogs#Arn", "traits": { - "smithy.api#documentation": "

Specify the ARN of an IAM role that CloudWatch Logs will use to create the integration. This role must have the permissions necessary to access the OpenSearch Service\n collection to be able to create the dashboards. For more information about the permissions needed, see Create an IAM role to access the OpenSearch Service collection in the CloudWatch Logs User Guide.

", + "smithy.api#documentation": "

Specify the ARN of an IAM role that CloudWatch Logs will use to create the integration. This role must have the permissions necessary to access the OpenSearch Service\n collection to be able to create the dashboards. For more information about the permissions needed, see Permissions that the integration needs in the CloudWatch Logs User Guide.

", "smithy.api#required": {} } }, "dashboardViewerPrincipals": { "target": "com.amazonaws.cloudwatchlogs#DashboardViewerPrincipals", "traits": { - "smithy.api#documentation": "

Specify the ARNs of IAM roles and IAM users who you want to grant permission to for viewing the dashboards.

\n \n

In addition to specifying these users here, you must also grant them the CloudWatchOpenSearchDashboardsAccess \n IAM policy. For more information, see

\n
", + "smithy.api#documentation": "

Specify the ARNs of IAM roles and IAM users who you want to grant permission to for viewing the dashboards.

\n \n

In addition to specifying these users here, you must also grant them the CloudWatchOpenSearchDashboardAccess \n IAM policy. For more information, see IAM policies for users.

\n
", "smithy.api#required": {} } }, @@ -9430,7 +9430,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates an account-level data protection policy, subscription filter policy, or field index policy\n that applies to all log groups \n or a subset of log groups in the account.

\n

\n Data protection policy\n

\n

A data protection policy can help safeguard sensitive \n data that's ingested by your log groups by auditing and masking the sensitive log data. Each account can have only\n one account-level data protection policy.

\n \n

Sensitive data is detected and masked when it is ingested into a log group. When you set a \n data protection policy, log events ingested into the log groups before that time are not masked.

\n
\n

If you use PutAccountPolicy to create a data protection policy for your whole account, it applies to both existing log groups\n and all log groups that are created later in this account. The account-level policy is applied to existing log groups\n with eventual consistency. It might take up to 5 minutes before sensitive data in existing log groups begins to be masked.

\n

By default, when a user views a log event that includes masked data, the sensitive data is replaced by asterisks.\n A user who has the logs:Unmask permission can use a \n GetLogEvents or \n FilterLogEvents\n operation with the unmask parameter set to true to view the unmasked \n log events. Users with the logs:Unmask can also view unmasked data in the CloudWatch Logs\n console by running a CloudWatch Logs Insights query with the unmask query command.

\n

For more information, including a list of types of data that can be audited and masked, see\n Protect sensitive log data with masking.

\n

To use the PutAccountPolicy operation for a data protection policy, you must be signed on with \n the logs:PutDataProtectionPolicy\n and logs:PutAccountPolicy permissions.

\n

The PutAccountPolicy operation applies to all log groups in the account. You can use \n PutDataProtectionPolicy\n to create a data protection policy that applies to just one log group. \n If a log group has its own data protection policy and \n the account also has an account-level data protection policy, then the two policies are cumulative. Any sensitive term\n specified in either policy is masked.

\n

\n Subscription filter policy\n

\n

A subscription filter policy sets up a real-time feed of log events from CloudWatch Logs to other Amazon Web Services services.\n Account-level subscription filter policies apply to both existing log groups and log groups that are created later in \n this account. Supported destinations are Kinesis Data Streams, Firehose, and \n Lambda. When log events are sent to the receiving service, they are Base64 encoded and \n compressed with the GZIP format.

\n

The following destinations are supported for subscription filters:

\n
    \n
  • \n

    An Kinesis Data Streams data stream in the same account as the subscription policy, for same-account delivery.

    \n
  • \n
  • \n

    An Firehose data stream in the same account as the subscription policy, for same-account delivery.

    \n
  • \n
  • \n

    A Lambda function in the same account as the subscription policy, for same-account delivery.

    \n
  • \n
  • \n

    A logical destination in a different account created with PutDestination, for cross-account\n delivery. Kinesis Data Streams and Firehose are supported as logical destinations.

    \n
  • \n
\n

Each account can have one account-level subscription filter policy per Region. \n If you are updating an existing filter, you must specify the correct name in PolicyName.\n To perform a PutAccountPolicy subscription filter operation for any destination except a Lambda \n function, you must also have the iam:PassRole permission.

\n

\n Transformer policy\n

\n

Creates or updates a log transformer policy for your account. You use log transformers to transform log events into\n a different format, making them easier for you to process and analyze. You can also transform logs from different sources into standardized formats that \n contain\n relevant, source-specific information. After you have created a transformer, \n CloudWatch Logs performs this transformation at the time of log ingestion. You can then refer to the transformed versions of the logs during\n operations such as querying with CloudWatch Logs Insights or creating metric filters or subscription filters.

\n

You can also use a transformer to copy metadata from metadata keys into the log events themselves. This metadata can include log group name, \n log stream name, account ID and Region.

\n

A transformer for a log group is a series of processors, where each processor applies one type of transformation to the log events\n ingested into this log group. For more information about the available processors to use in a transformer, see \n Processors that you can use.

\n

Having log events in standardized format enables visibility across your applications for your log analysis, reporting, and alarming needs. \n CloudWatch Logs provides transformation for common log types with out-of-the-box transformation templates for major Amazon Web Services log sources such \n as VPC flow logs, Lambda, and Amazon RDS. You can use pre-built transformation templates or create custom transformation policies.

\n

You can create transformers only for the log groups in the Standard log class.

\n

You can have one account-level transformer policy that applies to all log groups in the account. \n Or you can create as many as 20 account-level transformer policies that are each scoped to a subset of log groups with \n the selectionCriteria parameter. If you have multiple\n account-level transformer policies with selection criteria, no two of them can use the same or overlapping log group name prefixes.\n For example, if you have one policy filtered to log groups that start with my-log, you can't have another field index\n policy filtered to my-logpprod or my-logging.

\n

You can also set up a transformer at the log-group level. For more information, see \n PutTransformer. If there is both a \n log-group level transformer created with PutTransformer and an account-level transformer that could apply to the same log \n group, the log group uses only the log-group level transformer. It ignores the account-level transformer.

\n

\n Field index policy\n

\n

You can use field index policies to create indexes on fields found in \n log events in the log group. Creating field indexes can help lower the scan volume for CloudWatch Logs Insights queries that reference\n those fields, because these queries attempt to skip the processing of log events that are known to not match the indexed field.\n Good fields to index are fields that you often need to query for and fields or values that match only a small fraction of the total log events.\n Common examples of indexes\n include request ID, session ID, user IDs, or instance IDs. For more information, see \n Create field indexes to improve query performance and reduce costs\n

\n

To find the fields that are in your log group events, use the \n GetLogGroupFields\n operation.

\n

For example, suppose you have created a field index for requestId. Then, any \n CloudWatch Logs Insights query on that log group that includes requestId = value\n \n or requestId in [value, value, ...] will attempt to process only the log events where\n the indexed field matches the specified value.

\n

Matches of log events to the names of indexed fields are case-sensitive. For example, an indexed field\n of RequestId won't match a log event containing requestId.

\n

You can have one account-level field index policy that applies to all log groups in the account. \n Or you can create as many as 20 account-level field index policies that are each scoped to a subset of log groups with \n the selectionCriteria parameter. If you have multiple\n account-level index policies with selection criteria, no two of them can use the same or overlapping log group name prefixes.\n For example, if you have one policy filtered to log groups that start with my-log, you can't have another field index\n policy filtered to my-logpprod or my-logging.

\n

If you create an account-level field index policy in a monitoring account in cross-account observability, the policy is applied only\n to the monitoring account and not to any source accounts.

\n

If you want to create a field index policy for a single log group, you can use PutIndexPolicy instead of \n PutAccountPolicy. If you do so, that log group will use only that log-group level policy, and will ignore the account-level policy\n that you create with PutAccountPolicy.

" + "smithy.api#documentation": "

Creates an account-level data protection policy, subscription filter policy, or field index policy\n that applies to all log groups \n or a subset of log groups in the account.

\n

To use this operation, you must be signed on with the correct permissions depending on the type of policy that you are creating.

\n
    \n
  • \n

    To create a data protection policy, you must have the logs:PutDataProtectionPolicy and \n logs:PutAccountPolicy permissions.

    \n
  • \n
  • \n

    To create a subscription filter policy, you must have the logs:PutSubscriptionFilter and \n logs:PutccountPolicy permissions.

    \n
  • \n
  • \n

    To create a transformer policy, you must have the logs:PutTransformer and logs:PutAccountPolicy permissions.

    \n
  • \n
  • \n

    To create a field index policy, you must have the logs:PutIndexPolicy and \n logs:PutAccountPolicy permissions.

    \n
  • \n
\n

\n Data protection policy\n

\n

A data protection policy can help safeguard sensitive \n data that's ingested by your log groups by auditing and masking the sensitive log data. Each account can have only\n one account-level data protection policy.

\n \n

Sensitive data is detected and masked when it is ingested into a log group. When you set a \n data protection policy, log events ingested into the log groups before that time are not masked.

\n
\n

If you use PutAccountPolicy to create a data protection policy for your whole account, it applies to both existing log groups\n and all log groups that are created later in this account. The account-level policy is applied to existing log groups\n with eventual consistency. It might take up to 5 minutes before sensitive data in existing log groups begins to be masked.

\n

By default, when a user views a log event that includes masked data, the sensitive data is replaced by asterisks.\n A user who has the logs:Unmask permission can use a \n GetLogEvents or \n FilterLogEvents\n operation with the unmask parameter set to true to view the unmasked \n log events. Users with the logs:Unmask can also view unmasked data in the CloudWatch Logs\n console by running a CloudWatch Logs Insights query with the unmask query command.

\n

For more information, including a list of types of data that can be audited and masked, see\n Protect sensitive log data with masking.

\n

To use the PutAccountPolicy operation for a data protection policy, you must be signed on with \n the logs:PutDataProtectionPolicy\n and logs:PutAccountPolicy permissions.

\n

The PutAccountPolicy operation applies to all log groups in the account. You can use \n PutDataProtectionPolicy\n to create a data protection policy that applies to just one log group. \n If a log group has its own data protection policy and \n the account also has an account-level data protection policy, then the two policies are cumulative. Any sensitive term\n specified in either policy is masked.

\n

\n Subscription filter policy\n

\n

A subscription filter policy sets up a real-time feed of log events from CloudWatch Logs to other Amazon Web Services services.\n Account-level subscription filter policies apply to both existing log groups and log groups that are created later in \n this account. Supported destinations are Kinesis Data Streams, Firehose, and \n Lambda. When log events are sent to the receiving service, they are Base64 encoded and \n compressed with the GZIP format.

\n

The following destinations are supported for subscription filters:

\n
    \n
  • \n

    An Kinesis Data Streams data stream in the same account as the subscription policy, for same-account delivery.

    \n
  • \n
  • \n

    An Firehose data stream in the same account as the subscription policy, for same-account delivery.

    \n
  • \n
  • \n

    A Lambda function in the same account as the subscription policy, for same-account delivery.

    \n
  • \n
  • \n

    A logical destination in a different account created with PutDestination, for cross-account\n delivery. Kinesis Data Streams and Firehose are supported as logical destinations.

    \n
  • \n
\n

Each account can have one account-level subscription filter policy per Region. \n If you are updating an existing filter, you must specify the correct name in PolicyName.\n To perform a PutAccountPolicy subscription filter operation for any destination except a Lambda \n function, you must also have the iam:PassRole permission.

\n

\n Transformer policy\n

\n

Creates or updates a log transformer policy for your account. You use log transformers to transform log events into\n a different format, making them easier for you to process and analyze. You can also transform logs from different sources into standardized formats that \n contain\n relevant, source-specific information. After you have created a transformer, \n CloudWatch Logs performs this transformation at the time of log ingestion. You can then refer to the transformed versions of the logs during\n operations such as querying with CloudWatch Logs Insights or creating metric filters or subscription filters.

\n

You can also use a transformer to copy metadata from metadata keys into the log events themselves. This metadata can include log group name, \n log stream name, account ID and Region.

\n

A transformer for a log group is a series of processors, where each processor applies one type of transformation to the log events\n ingested into this log group. For more information about the available processors to use in a transformer, see \n Processors that you can use.

\n

Having log events in standardized format enables visibility across your applications for your log analysis, reporting, and alarming needs. \n CloudWatch Logs provides transformation for common log types with out-of-the-box transformation templates for major Amazon Web Services log sources such \n as VPC flow logs, Lambda, and Amazon RDS. You can use pre-built transformation templates or create custom transformation policies.

\n

You can create transformers only for the log groups in the Standard log class.

\n

You can have one account-level transformer policy that applies to all log groups in the account. \n Or you can create as many as 20 account-level transformer policies that are each scoped to a subset of log groups with \n the selectionCriteria parameter. If you have multiple\n account-level transformer policies with selection criteria, no two of them can use the same or overlapping log group name prefixes.\n For example, if you have one policy filtered to log groups that start with my-log, you can't have another field index\n policy filtered to my-logpprod or my-logging.

\n

You can also set up a transformer at the log-group level. For more information, see \n PutTransformer. If there is both a \n log-group level transformer created with PutTransformer and an account-level transformer that could apply to the same log \n group, the log group uses only the log-group level transformer. It ignores the account-level transformer.

\n

\n Field index policy\n

\n

You can use field index policies to create indexes on fields found in \n log events in the log group. Creating field indexes can help lower the scan volume for CloudWatch Logs Insights queries that reference\n those fields, because these queries attempt to skip the processing of log events that are known to not match the indexed field.\n Good fields to index are fields that you often need to query for and fields or values that match only a small fraction of the total log events.\n Common examples of indexes\n include request ID, session ID, user IDs, or instance IDs. For more information, see \n Create field indexes to improve query performance and reduce costs\n

\n

To find the fields that are in your log group events, use the \n GetLogGroupFields\n operation.

\n

For example, suppose you have created a field index for requestId. Then, any \n CloudWatch Logs Insights query on that log group that includes requestId = value\n \n or requestId in [value, value, ...] will attempt to process only the log events where\n the indexed field matches the specified value.

\n

Matches of log events to the names of indexed fields are case-sensitive. For example, an indexed field\n of RequestId won't match a log event containing requestId.

\n

You can have one account-level field index policy that applies to all log groups in the account. \n Or you can create as many as 20 account-level field index policies that are each scoped to a subset of log groups with \n the selectionCriteria parameter. If you have multiple\n account-level index policies with selection criteria, no two of them can use the same or overlapping log group name prefixes.\n For example, if you have one policy filtered to log groups that start with my-log, you can't have another field index\n policy filtered to my-logpprod or my-logging.

\n

If you create an account-level field index policy in a monitoring account in cross-account observability, the policy is applied only\n to the monitoring account and not to any source accounts.

\n

If you want to create a field index policy for a single log group, you can use PutIndexPolicy instead of \n PutAccountPolicy. If you do so, that log group will use only that log-group level policy, and will ignore the account-level policy\n that you create with PutAccountPolicy.

" } }, "com.amazonaws.cloudwatchlogs#PutAccountPolicyRequest": { @@ -9594,7 +9594,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates or updates a logical delivery destination. A delivery destination is an Amazon Web Services resource that represents an \n Amazon Web Services service that logs can be sent to. CloudWatch Logs, Amazon S3, and\n Firehose are supported as logs delivery destinations.

\n

To configure logs delivery between a supported Amazon Web Services service and a destination, you must do the following:

\n
    \n
  • \n

    Create a delivery source, which is a logical object that represents the resource that is actually\n sending the logs. For more \n information, see PutDeliverySource.

    \n
  • \n
  • \n

    Use PutDeliveryDestination to create a delivery destination, which is a logical object that represents the actual\n delivery destination.

    \n
  • \n
  • \n

    If you are delivering logs cross-account, you must use \n PutDeliveryDestinationPolicy\n in the destination account to assign an IAM policy to the \n destination. This policy allows delivery to that destination.\n

    \n
  • \n
  • \n

    Use CreateDelivery to create a delivery by pairing exactly \n one delivery source and one delivery destination. For more \n information, see CreateDelivery.\n

    \n
  • \n
\n

You can configure a single delivery source to send logs to multiple destinations by creating multiple deliveries. You \n can also create multiple deliveries to configure multiple delivery sources to send logs to the same delivery destination.

\n

Only some Amazon Web Services services support being configured as a delivery source. These services are listed\n as Supported [V2 Permissions] in the table at \n Enabling \n logging from Amazon Web Services services.\n

\n

If you use this operation to update an existing delivery destination, all the current delivery destination parameters are overwritten\n with the new parameter values that you specify.

" + "smithy.api#documentation": "

Creates or updates a logical delivery destination. A delivery destination is an Amazon Web Services resource that represents an \n Amazon Web Services service that logs can be sent to. CloudWatch Logs, Amazon S3, and\n Firehose are supported as logs delivery destinations.

\n

To configure logs delivery between a supported Amazon Web Services service and a destination, you must do the following:

\n
    \n
  • \n

    Create a delivery source, which is a logical object that represents the resource that is actually\n sending the logs. For more \n information, see PutDeliverySource.

    \n
  • \n
  • \n

    Use PutDeliveryDestination to create a delivery destination in the same account of the actual delivery destination. \n The delivery destination that you create is a logical object that represents the actual\n delivery destination.

    \n
  • \n
  • \n

    If you are delivering logs cross-account, you must use \n PutDeliveryDestinationPolicy\n in the destination account to assign an IAM policy to the \n destination. This policy allows delivery to that destination.\n

    \n
  • \n
  • \n

    Use CreateDelivery to create a delivery by pairing exactly \n one delivery source and one delivery destination. For more \n information, see CreateDelivery.\n

    \n
  • \n
\n

You can configure a single delivery source to send logs to multiple destinations by creating multiple deliveries. You \n can also create multiple deliveries to configure multiple delivery sources to send logs to the same delivery destination.

\n

Only some Amazon Web Services services support being configured as a delivery source. These services are listed\n as Supported [V2 Permissions] in the table at \n Enabling \n logging from Amazon Web Services services.\n

\n

If you use this operation to update an existing delivery destination, all the current delivery destination parameters are overwritten\n with the new parameter values that you specify.

" } }, "com.amazonaws.cloudwatchlogs#PutDeliveryDestinationPolicy": { @@ -9759,7 +9759,7 @@ "logType": { "target": "com.amazonaws.cloudwatchlogs#LogType", "traits": { - "smithy.api#documentation": "

Defines the type of log that the source is sending.

\n
    \n
  • \n

    For Amazon Bedrock, the valid value is \n APPLICATION_LOGS.

    \n
  • \n
  • \n

    For Amazon CodeWhisperer, the valid value is \n EVENT_LOGS.

    \n
  • \n
  • \n

    For IAM Identity Center, the valid value is \n ERROR_LOGS.

    \n
  • \n
  • \n

    For Amazon WorkMail, the valid values are \n ACCESS_CONTROL_LOGS, AUTHENTICATION_LOGS, WORKMAIL_AVAILABILITY_PROVIDER_LOGS, and WORKMAIL_MAILBOX_ACCESS_LOGS.

    \n
  • \n
", + "smithy.api#documentation": "

Defines the type of log that the source is sending.

\n
    \n
  • \n

    For Amazon Bedrock, the valid value is \n APPLICATION_LOGS.

    \n
  • \n
  • \n

    For CloudFront, the valid value is \n ACCESS_LOGS.

    \n
  • \n
  • \n

    For Amazon CodeWhisperer, the valid value is \n EVENT_LOGS.

    \n
  • \n
  • \n

    For Elemental MediaPackage, the valid values are \n EGRESS_ACCESS_LOGS and INGRESS_ACCESS_LOGS.

    \n
  • \n
  • \n

    For Elemental MediaTailor, the valid values are \n AD_DECISION_SERVER_LOGS, MANIFEST_SERVICE_LOGS, and TRANSCODE_LOGS.

    \n
  • \n
  • \n

    For IAM Identity Center, the valid value is \n ERROR_LOGS.

    \n
  • \n
  • \n

    For Amazon Q, the valid value is \n EVENT_LOGS.

    \n
  • \n
  • \n

    For Amazon SES mail manager, the valid value is \n APPLICATION_LOG.

    \n
  • \n
  • \n

    For Amazon WorkMail, the valid values are \n ACCESS_CONTROL_LOGS, AUTHENTICATION_LOGS, WORKMAIL_AVAILABILITY_PROVIDER_LOGS, WORKMAIL_MAILBOX_ACCESS_LOGS, \n and WORKMAIL_PERSONAL_ACCESS_TOKEN_LOGS.

    \n
  • \n
", "smithy.api#required": {} } }, @@ -10179,7 +10179,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates or updates a metric filter and associates it with the specified log group. With\n metric filters, you can configure rules to extract metric data from log events ingested\n through PutLogEvents.

\n

The maximum number of metric filters that can be associated with a log group is\n 100.

\n

Using regular expressions to create metric filters is supported. For these filters, \n there is a quota of two regular expression patterns within a single filter pattern. There\n is also a quota of five regular expression patterns per log group.\n For more information about using regular expressions in metric filters, \n see \n Filter pattern syntax for metric filters, subscription filters, filter log events, and Live Tail.

\n

When you create a metric filter, you can also optionally assign a unit and dimensions\n to the metric that is created.

\n \n

Metrics extracted from log events are charged as custom metrics.\n To prevent unexpected high charges, do not specify high-cardinality fields such as \n IPAddress or requestID as dimensions. Each different value \n found for \n a dimension is treated as a separate metric and accrues charges as a separate custom metric.\n

\n

CloudWatch Logs might disable a metric filter if it generates 1,000 different name/value pairs for\n your specified dimensions within one hour.

\n

You can also set up a billing alarm to alert you if your charges are higher than \n expected. For more information, \n see \n Creating a Billing Alarm to Monitor Your Estimated Amazon Web Services Charges.\n

\n
" + "smithy.api#documentation": "

Creates or updates a metric filter and associates it with the specified log group. With\n metric filters, you can configure rules to extract metric data from log events ingested\n through PutLogEvents.

\n

The maximum number of metric filters that can be associated with a log group is\n 100.

\n

Using regular expressions in filter patterns is supported. For these filters, \n there is a quota of two regular expression patterns within a single filter pattern. There\n is also a quota of five regular expression patterns per log group.\n For more information about using regular expressions in filter patterns, \n see \n Filter pattern syntax for metric filters, subscription filters, filter log events, and Live Tail.

\n

When you create a metric filter, you can also optionally assign a unit and dimensions\n to the metric that is created.

\n \n

Metrics extracted from log events are charged as custom metrics.\n To prevent unexpected high charges, do not specify high-cardinality fields such as \n IPAddress or requestID as dimensions. Each different value \n found for \n a dimension is treated as a separate metric and accrues charges as a separate custom metric.\n

\n

CloudWatch Logs might disable a metric filter if it generates 1,000 different name/value pairs for\n your specified dimensions within one hour.

\n

You can also set up a billing alarm to alert you if your charges are higher than \n expected. For more information, \n see \n Creating a Billing Alarm to Monitor Your Estimated Amazon Web Services Charges.\n

\n
" } }, "com.amazonaws.cloudwatchlogs#PutMetricFilterRequest": { @@ -10445,7 +10445,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates or updates a subscription filter and associates it with the specified log\n group. With subscription filters, you can subscribe to a real-time stream of log events\n ingested through PutLogEvents\n and have them delivered to a specific destination. When log events are sent to the receiving\n service, they are Base64 encoded and compressed with the GZIP format.

\n

The following destinations are supported for subscription filters:

\n
    \n
  • \n

    An Amazon Kinesis data stream belonging to the same account as the subscription\n filter, for same-account delivery.

    \n
  • \n
  • \n

    A logical destination created with PutDestination that belongs to a different account, for cross-account delivery.\n We currently support Kinesis Data Streams and Firehose as logical destinations.

    \n
  • \n
  • \n

    An Amazon Kinesis Data Firehose delivery stream that belongs to the same account as\n the subscription filter, for same-account delivery.

    \n
  • \n
  • \n

    An Lambda function that belongs to the same account as the\n subscription filter, for same-account delivery.

    \n
  • \n
\n

Each log group can have up to two subscription filters associated with it. If you are\n updating an existing filter, you must specify the correct name in filterName.\n

\n

Using regular expressions to create subscription filters is supported. For these filters, \n there is a quotas of quota of two regular expression patterns within a single filter pattern. There\n is also a quota of five regular expression patterns per log group.\n For more information about using regular expressions in subscription filters, \n see \n Filter pattern syntax for metric filters, subscription filters, filter log events, and Live Tail.

\n

To perform a PutSubscriptionFilter operation for any destination except a Lambda function, \n you must also have the \n iam:PassRole permission.

" + "smithy.api#documentation": "

Creates or updates a subscription filter and associates it with the specified log\n group. With subscription filters, you can subscribe to a real-time stream of log events\n ingested through PutLogEvents\n and have them delivered to a specific destination. When log events are sent to the receiving\n service, they are Base64 encoded and compressed with the GZIP format.

\n

The following destinations are supported for subscription filters:

\n
    \n
  • \n

    An Amazon Kinesis data stream belonging to the same account as the subscription\n filter, for same-account delivery.

    \n
  • \n
  • \n

    A logical destination created with PutDestination that belongs to a different account, for cross-account delivery.\n We currently support Kinesis Data Streams and Firehose as logical destinations.

    \n
  • \n
  • \n

    An Amazon Kinesis Data Firehose delivery stream that belongs to the same account as\n the subscription filter, for same-account delivery.

    \n
  • \n
  • \n

    An Lambda function that belongs to the same account as the\n subscription filter, for same-account delivery.

    \n
  • \n
\n

Each log group can have up to two subscription filters associated with it. If you are\n updating an existing filter, you must specify the correct name in filterName.\n

\n

Using regular expressions in filter patterns is supported. For these filters, \n there is a quotas of quota of two regular expression patterns within a single filter pattern. There\n is also a quota of five regular expression patterns per log group.\n For more information about using regular expressions in filter patterns, \n see \n Filter pattern syntax for metric filters, subscription filters, filter log events, and Live Tail.

\n

To perform a PutSubscriptionFilter operation for any destination except a Lambda function, \n you must also have the \n iam:PassRole permission.

" } }, "com.amazonaws.cloudwatchlogs#PutSubscriptionFilterRequest": { @@ -11175,7 +11175,7 @@ "suffixPath": { "target": "com.amazonaws.cloudwatchlogs#DeliverySuffixPath", "traits": { - "smithy.api#documentation": "

This string allows re-configuring the S3 object prefix to contain either static or variable sections. The valid variables \n to use in the suffix path will vary by each log source. See ConfigurationTemplate$allowedSuffixPathFields for \n more info on what values are supported in the suffix path for each log source.

" + "smithy.api#documentation": "

This string allows re-configuring the S3 object prefix to contain either static or variable sections. The valid variables \n to use in the suffix path will vary by each log source. To find the values supported for the suffix path for each log source, \n use the DescribeConfigurationTemplates operation and check the \n allowedSuffixPathFields field in the response.

" } }, "enableHiveCompatiblePath": { @@ -11288,7 +11288,7 @@ } }, "traits": { - "smithy.api#documentation": "

his exception is returned if an unknown error occurs during a Live Tail session.

", + "smithy.api#documentation": "

This exception is returned if an unknown error occurs during a Live Tail session.

", "smithy.api#error": "client" } }, diff --git a/models/codebuild.json b/models/codebuild.json index 910db18d58..b0c1b09d97 100644 --- a/models/codebuild.json +++ b/models/codebuild.json @@ -560,6 +560,12 @@ "traits": { "smithy.api#documentation": "

An array of strings that specify the compute types that are allowed for the batch\n build. See Build environment\n compute types in the CodeBuild User Guide for these values.\n

" } + }, + "fleetsAllowed": { + "target": "com.amazonaws.codebuild#FleetsAllowed", + "traits": { + "smithy.api#documentation": "

An array of strings that specify the fleets that are allowed\n for the batch build. See Run builds on reserved capacity fleets in the CodeBuild User Guide \n for more information.

" + } } }, "traits": { @@ -4626,6 +4632,12 @@ } } }, + "com.amazonaws.codebuild#FleetsAllowed": { + "type": "list", + "member": { + "target": "com.amazonaws.codebuild#NonEmptyString" + } + }, "com.amazonaws.codebuild#GetReportGroupTrend": { "type": "operation", "input": { @@ -6736,7 +6748,7 @@ "reportBuildStatus": { "target": "com.amazonaws.codebuild#WrapperBoolean", "traits": { - "smithy.api#documentation": "

Set to true to report the status of a build's start and finish to your source\n provider. This option is valid only when your source provider is GitHub, GitHub\n Enterprise, GitLab, GitLab Self Managed, or Bitbucket. If this is set and you use a different source provider, an\n invalidInputException is thrown.

\n

To be able to report the build status to the source provider, the user associated with the source provider must\nhave write access to the repo. If the user does not have write access, the build status cannot be updated. For more information, see Source provider access in the CodeBuild User Guide.

\n

The status of a build triggered by a webhook is always reported to your source\n provider.

\n

If your project's builds are triggered by a webhook, you must push a\n new commit to the repo for a change to this property to take\n effect.

" + "smithy.api#documentation": "

Set to true to report the status of a build's start and finish to your source\n provider. This option is valid only when your source provider is GitHub, GitHub\n Enterprise, GitLab, GitLab Self Managed, GitLab, GitLab Self Managed, or Bitbucket. If this is set and you use a different source provider, an\n invalidInputException is thrown.

\n

To be able to report the build status to the source provider, the user associated with the source provider must\nhave write access to the repo. If the user does not have write access, the build status cannot be updated. For more information, see Source provider access in the CodeBuild User Guide.

\n

The status of a build triggered by a webhook is always reported to your source\n provider.

\n

If your project's builds are triggered by a webhook, you must push a\n new commit to the repo for a change to this property to take\n effect.

" } }, "buildStatusConfig": { @@ -8334,7 +8346,7 @@ "reportBuildStatusOverride": { "target": "com.amazonaws.codebuild#WrapperBoolean", "traits": { - "smithy.api#documentation": "

Set to true to report to your source provider the status of a build's start and\n completion. If you use this option with a source provider other than GitHub, GitHub\n Enterprise, or Bitbucket, an invalidInputException is thrown.

\n

To be able to report the build status to the source provider, the user associated with the source provider must\nhave write access to the repo. If the user does not have write access, the build status cannot be updated. For more information, see Source provider access in the CodeBuild User Guide.

\n \n

The status of a build triggered by a webhook is always reported to your source\n provider.

\n
" + "smithy.api#documentation": "

Set to true to report to your source provider the status of a build's start and\n completion. If you use this option with a source provider other than GitHub, GitHub\n Enterprise, GitLab, GitLab Self Managed, or Bitbucket, an invalidInputException is thrown.

\n

To be able to report the build status to the source provider, the user associated with the source provider must\nhave write access to the repo. If the user does not have write access, the build status cannot be updated. For more information, see Source provider access in the CodeBuild User Guide.

\n \n

The status of a build triggered by a webhook is always reported to your source\n provider.

\n
" } }, "buildStatusConfigOverride": { diff --git a/models/cognito-identity-provider.json b/models/cognito-identity-provider.json index 172de5b1d8..d623cbe4cd 100644 --- a/models/cognito-identity-provider.json +++ b/models/cognito-identity-provider.json @@ -569,6 +569,82 @@ } ], "rules": [ + { + "conditions": [ + { + "fn": "stringEquals", + "argv": [ + { + "ref": "Region" + }, + "us-east-1" + ] + } + ], + "endpoint": { + "url": "https://cognito-idp-fips.us-east-1.amazonaws.com", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [ + { + "fn": "stringEquals", + "argv": [ + { + "ref": "Region" + }, + "us-east-2" + ] + } + ], + "endpoint": { + "url": "https://cognito-idp-fips.us-east-2.amazonaws.com", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [ + { + "fn": "stringEquals", + "argv": [ + { + "ref": "Region" + }, + "us-west-1" + ] + } + ], + "endpoint": { + "url": "https://cognito-idp-fips.us-west-1.amazonaws.com", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [ + { + "fn": "stringEquals", + "argv": [ + { + "ref": "Region" + }, + "us-west-2" + ] + } + ], + "endpoint": { + "url": "https://cognito-idp-fips.us-west-2.amazonaws.com", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + }, { "conditions": [], "endpoint": { @@ -673,6 +749,31 @@ } ], "rules": [ + { + "conditions": [ + { + "fn": "stringEquals", + "argv": [ + "aws", + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "name" + ] + } + ] + } + ], + "endpoint": { + "url": "https://cognito-idp.{Region}.amazonaws.com", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + }, { "conditions": [], "endpoint": { @@ -717,6 +818,32 @@ }, "smithy.rules#endpointTests": { "testCases": [ + { + "documentation": "For region af-south-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://cognito-idp.af-south-1.amazonaws.com" + } + }, + "params": { + "Region": "af-south-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region ap-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://cognito-idp.ap-east-1.amazonaws.com" + } + }, + "params": { + "Region": "ap-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, { "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack disabled", "expect": { @@ -743,6 +870,19 @@ "UseDualStack": false } }, + { + "documentation": "For region ap-northeast-3 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://cognito-idp.ap-northeast-3.amazonaws.com" + } + }, + "params": { + "Region": "ap-northeast-3", + "UseFIPS": false, + "UseDualStack": false + } + }, { "documentation": "For region ap-south-1 with FIPS disabled and DualStack disabled", "expect": { @@ -756,6 +896,19 @@ "UseDualStack": false } }, + { + "documentation": "For region ap-south-2 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://cognito-idp.ap-south-2.amazonaws.com" + } + }, + "params": { + "Region": "ap-south-2", + "UseFIPS": false, + "UseDualStack": false + } + }, { "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack disabled", "expect": { @@ -782,6 +935,32 @@ "UseDualStack": false } }, + { + "documentation": "For region ap-southeast-3 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://cognito-idp.ap-southeast-3.amazonaws.com" + } + }, + "params": { + "Region": "ap-southeast-3", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region ap-southeast-4 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://cognito-idp.ap-southeast-4.amazonaws.com" + } + }, + "params": { + "Region": "ap-southeast-4", + "UseFIPS": false, + "UseDualStack": false + } + }, { "documentation": "For region ca-central-1 with FIPS disabled and DualStack disabled", "expect": { @@ -795,6 +974,19 @@ "UseDualStack": false } }, + { + "documentation": "For region ca-west-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://cognito-idp.ca-west-1.amazonaws.com" + } + }, + "params": { + "Region": "ca-west-1", + "UseFIPS": false, + "UseDualStack": false + } + }, { "documentation": "For region eu-central-1 with FIPS disabled and DualStack disabled", "expect": { @@ -808,6 +1000,19 @@ "UseDualStack": false } }, + { + "documentation": "For region eu-central-2 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://cognito-idp.eu-central-2.amazonaws.com" + } + }, + "params": { + "Region": "eu-central-2", + "UseFIPS": false, + "UseDualStack": false + } + }, { "documentation": "For region eu-north-1 with FIPS disabled and DualStack disabled", "expect": { @@ -821,6 +1026,32 @@ "UseDualStack": false } }, + { + "documentation": "For region eu-south-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://cognito-idp.eu-south-1.amazonaws.com" + } + }, + "params": { + "Region": "eu-south-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region eu-south-2 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://cognito-idp.eu-south-2.amazonaws.com" + } + }, + "params": { + "Region": "eu-south-2", + "UseFIPS": false, + "UseDualStack": false + } + }, { "documentation": "For region eu-west-1 with FIPS disabled and DualStack disabled", "expect": { @@ -860,6 +1091,32 @@ "UseDualStack": false } }, + { + "documentation": "For region il-central-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://cognito-idp.il-central-1.amazonaws.com" + } + }, + "params": { + "Region": "il-central-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region me-central-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://cognito-idp.me-central-1.amazonaws.com" + } + }, + "params": { + "Region": "me-central-1", + "UseFIPS": false, + "UseDualStack": false + } + }, { "documentation": "For region me-south-1 with FIPS disabled and DualStack disabled", "expect": { @@ -912,6 +1169,19 @@ "UseDualStack": false } }, + { + "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://cognito-idp-fips.us-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, { "documentation": "For region us-east-2 with FIPS disabled and DualStack disabled", "expect": { @@ -938,6 +1208,19 @@ "UseDualStack": false } }, + { + "documentation": "For region us-east-2 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://cognito-idp-fips.us-east-2.amazonaws.com" + } + }, + "params": { + "Region": "us-east-2", + "UseFIPS": true, + "UseDualStack": true + } + }, { "documentation": "For region us-west-1 with FIPS disabled and DualStack disabled", "expect": { @@ -964,6 +1247,19 @@ "UseDualStack": false } }, + { + "documentation": "For region us-west-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://cognito-idp-fips.us-west-1.amazonaws.com" + } + }, + "params": { + "Region": "us-west-1", + "UseFIPS": true, + "UseDualStack": true + } + }, { "documentation": "For region us-west-2 with FIPS disabled and DualStack disabled", "expect": { @@ -991,14 +1287,14 @@ } }, { - "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-west-2 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://cognito-idp-fips.us-east-1.api.aws" + "url": "https://cognito-idp-fips.us-west-2.amazonaws.com" } }, "params": { - "Region": "us-east-1", + "Region": "us-west-2", "UseFIPS": true, "UseDualStack": true } @@ -1007,7 +1303,7 @@ "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://cognito-idp.us-east-1.api.aws" + "url": "https://cognito-idp.us-east-1.amazonaws.com" } }, "params": { diff --git a/models/cognito-identity.json b/models/cognito-identity.json index e5ba8de9ed..95471907b7 100644 --- a/models/cognito-identity.json +++ b/models/cognito-identity.json @@ -297,6 +297,82 @@ } ], "rules": [ + { + "conditions": [ + { + "fn": "stringEquals", + "argv": [ + { + "ref": "Region" + }, + "us-east-1" + ] + } + ], + "endpoint": { + "url": "https://cognito-identity-fips.us-east-1.amazonaws.com", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [ + { + "fn": "stringEquals", + "argv": [ + { + "ref": "Region" + }, + "us-east-2" + ] + } + ], + "endpoint": { + "url": "https://cognito-identity-fips.us-east-2.amazonaws.com", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [ + { + "fn": "stringEquals", + "argv": [ + { + "ref": "Region" + }, + "us-west-1" + ] + } + ], + "endpoint": { + "url": "https://cognito-identity-fips.us-west-1.amazonaws.com", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [ + { + "fn": "stringEquals", + "argv": [ + { + "ref": "Region" + }, + "us-west-2" + ] + } + ], + "endpoint": { + "url": "https://cognito-identity-fips.us-west-2.amazonaws.com", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + }, { "conditions": [], "endpoint": { @@ -401,6 +477,31 @@ } ], "rules": [ + { + "conditions": [ + { + "fn": "stringEquals", + "argv": [ + "aws", + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "name" + ] + } + ] + } + ], + "endpoint": { + "url": "https://cognito-identity.{Region}.amazonaws.com", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + }, { "conditions": [], "endpoint": { @@ -446,237 +547,159 @@ "smithy.rules#endpointTests": { "testCases": [ { - "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://cognito-identity.ap-northeast-1.amazonaws.com" - } - }, - "params": { - "Region": "ap-northeast-1", - "UseFIPS": false, - "UseDualStack": false - } - }, - { - "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://cognito-identity.ap-northeast-2.amazonaws.com" - } - }, - "params": { - "Region": "ap-northeast-2", - "UseFIPS": false, - "UseDualStack": false - } - }, - { - "documentation": "For region ap-south-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://cognito-identity.ap-south-1.amazonaws.com" - } - }, - "params": { - "Region": "ap-south-1", - "UseFIPS": false, - "UseDualStack": false - } - }, - { - "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://cognito-identity.ap-southeast-1.amazonaws.com" - } - }, - "params": { - "Region": "ap-southeast-1", - "UseFIPS": false, - "UseDualStack": false - } - }, - { - "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://cognito-identity.ap-southeast-2.amazonaws.com" - } - }, - "params": { - "Region": "ap-southeast-2", - "UseFIPS": false, - "UseDualStack": false - } - }, - { - "documentation": "For region ca-central-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cognito-identity.ca-central-1.amazonaws.com" + "url": "https://cognito-identity.us-east-1.amazonaws.com" } }, "params": { - "Region": "ca-central-1", + "Region": "us-east-1", "UseFIPS": false, "UseDualStack": false } }, { - "documentation": "For region eu-central-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cognito-identity.eu-central-1.amazonaws.com" + "url": "https://cognito-identity-fips.us-east-1.amazonaws.com" } }, "params": { - "Region": "eu-central-1", - "UseFIPS": false, + "Region": "us-east-1", + "UseFIPS": true, "UseDualStack": false } }, { - "documentation": "For region eu-north-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://cognito-identity.eu-north-1.amazonaws.com" + "url": "https://cognito-identity.us-east-1.amazonaws.com" } }, "params": { - "Region": "eu-north-1", + "Region": "us-east-1", "UseFIPS": false, - "UseDualStack": false + "UseDualStack": true } }, { - "documentation": "For region eu-west-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://cognito-identity.eu-west-1.amazonaws.com" + "url": "https://cognito-identity-fips.us-east-1.amazonaws.com" } }, "params": { - "Region": "eu-west-1", - "UseFIPS": false, - "UseDualStack": false + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { - "documentation": "For region eu-west-2 with FIPS disabled and DualStack disabled", + "documentation": "For region us-east-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cognito-identity.eu-west-2.amazonaws.com" + "url": "https://cognito-identity.us-east-2.amazonaws.com" } }, "params": { - "Region": "eu-west-2", + "Region": "us-east-2", "UseFIPS": false, "UseDualStack": false } }, { - "documentation": "For region eu-west-3 with FIPS disabled and DualStack disabled", + "documentation": "For region us-east-2 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cognito-identity.eu-west-3.amazonaws.com" + "url": "https://cognito-identity-fips.us-east-2.amazonaws.com" } }, "params": { - "Region": "eu-west-3", - "UseFIPS": false, + "Region": "us-east-2", + "UseFIPS": true, "UseDualStack": false } }, { - "documentation": "For region me-south-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-east-2 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://cognito-identity.me-south-1.amazonaws.com" + "url": "https://cognito-identity.us-east-2.amazonaws.com" } }, "params": { - "Region": "me-south-1", + "Region": "us-east-2", "UseFIPS": false, - "UseDualStack": false + "UseDualStack": true } }, { - "documentation": "For region sa-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-east-2 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://cognito-identity.sa-east-1.amazonaws.com" + "url": "https://cognito-identity-fips.us-east-2.amazonaws.com" } }, "params": { - "Region": "sa-east-1", - "UseFIPS": false, - "UseDualStack": false + "Region": "us-east-2", + "UseFIPS": true, + "UseDualStack": true } }, { - "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cognito-identity.us-east-1.amazonaws.com" + "url": "https://cognito-identity.us-west-1.amazonaws.com" } }, "params": { - "Region": "us-east-1", + "Region": "us-west-1", "UseFIPS": false, "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-west-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cognito-identity-fips.us-east-1.amazonaws.com" + "url": "https://cognito-identity-fips.us-west-1.amazonaws.com" } }, "params": { - "Region": "us-east-1", + "Region": "us-west-1", "UseFIPS": true, "UseDualStack": false } }, { - "documentation": "For region us-east-2 with FIPS disabled and DualStack disabled", + "documentation": "For region us-west-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://cognito-identity.us-east-2.amazonaws.com" + "url": "https://cognito-identity.us-west-1.amazonaws.com" } }, "params": { - "Region": "us-east-2", + "Region": "us-west-1", "UseFIPS": false, - "UseDualStack": false - } - }, - { - "documentation": "For region us-east-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://cognito-identity-fips.us-east-2.amazonaws.com" - } - }, - "params": { - "Region": "us-east-2", - "UseFIPS": true, - "UseDualStack": false + "UseDualStack": true } }, { - "documentation": "For region us-west-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-west-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://cognito-identity.us-west-1.amazonaws.com" + "url": "https://cognito-identity-fips.us-west-1.amazonaws.com" } }, "params": { "Region": "us-west-1", - "UseFIPS": false, - "UseDualStack": false + "UseFIPS": true, + "UseDualStack": true } }, { @@ -706,28 +729,28 @@ } }, { - "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-west-2 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://cognito-identity-fips.us-east-1.api.aws" + "url": "https://cognito-identity.us-west-2.amazonaws.com" } }, "params": { - "Region": "us-east-1", - "UseFIPS": true, + "Region": "us-west-2", + "UseFIPS": false, "UseDualStack": true } }, { - "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-west-2 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://cognito-identity.us-east-1.api.aws" + "url": "https://cognito-identity-fips.us-west-2.amazonaws.com" } }, "params": { - "Region": "us-east-1", - "UseFIPS": false, + "Region": "us-west-2", + "UseFIPS": true, "UseDualStack": true } }, diff --git a/models/compute-optimizer.json b/models/compute-optimizer.json index 468b2e5208..14a5a48869 100644 --- a/models/compute-optimizer.json +++ b/models/compute-optimizer.json @@ -95,6 +95,40 @@ "smithy.api#default": 0 } }, + "com.amazonaws.computeoptimizer#AllocationStrategy": { + "type": "enum", + "members": { + "PRIORITIZED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Prioritized" + } + }, + "LOWEST_PRICE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "LowestPrice" + } + } + } + }, + "com.amazonaws.computeoptimizer#AsgType": { + "type": "enum", + "members": { + "SINGLE_INSTANCE_TYPE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SingleInstanceType" + } + }, + "MIXED_INSTANCE_TYPE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "MixedInstanceTypes" + } + } + } + }, "com.amazonaws.computeoptimizer#AutoScalingConfiguration": { "type": "enum", "members": { @@ -128,32 +162,56 @@ "target": "com.amazonaws.computeoptimizer#DesiredCapacity", "traits": { "smithy.api#default": 0, - "smithy.api#documentation": "

The desired capacity, or number of instances, for the Auto Scaling group.

" + "smithy.api#documentation": "

The desired capacity, or number of instances, for the EC2 Auto Scaling group.

" } }, "minSize": { "target": "com.amazonaws.computeoptimizer#MinSize", "traits": { "smithy.api#default": 0, - "smithy.api#documentation": "

The minimum size, or minimum number of instances, for the Auto Scaling\n group.

" + "smithy.api#documentation": "

The minimum size, or minimum number of instances, for the EC2 Auto Scaling\n group.

" } }, "maxSize": { "target": "com.amazonaws.computeoptimizer#MaxSize", "traits": { "smithy.api#default": 0, - "smithy.api#documentation": "

The maximum size, or maximum number of instances, for the Auto Scaling\n group.

" + "smithy.api#documentation": "

The maximum size, or maximum number of instances, for the EC2 Auto Scaling\n group.

" } }, "instanceType": { - "target": "com.amazonaws.computeoptimizer#InstanceType", + "target": "com.amazonaws.computeoptimizer#NullableInstanceType", + "traits": { + "smithy.api#documentation": "

The instance type for the EC2 Auto Scaling group.

" + } + }, + "allocationStrategy": { + "target": "com.amazonaws.computeoptimizer#AllocationStrategy", + "traits": { + "smithy.api#documentation": "

\n Describes the allocation strategy that the EC2 Auto Scaling group uses. This field is only available for EC2 Auto Scaling groups with mixed instance types.\n

" + } + }, + "estimatedInstanceHourReductionPercentage": { + "target": "com.amazonaws.computeoptimizer#NullableEstimatedInstanceHourReductionPercentage", + "traits": { + "smithy.api#documentation": "

\n Describes the projected percentage reduction in instance hours after adopting the recommended configuration. This field is only available for EC2 Auto Scaling groups with scaling policies.\n

" + } + }, + "type": { + "target": "com.amazonaws.computeoptimizer#AsgType", + "traits": { + "smithy.api#documentation": "

\n Describes whether the EC2 Auto Scaling group has a single instance type or a mixed instance type configuration.\n

" + } + }, + "mixedInstanceTypes": { + "target": "com.amazonaws.computeoptimizer#MixedInstanceTypes", "traits": { - "smithy.api#documentation": "

The instance type for the Auto Scaling group.

" + "smithy.api#documentation": "

\n List the instance types within an EC2 Auto Scaling group that has mixed instance types.\n

" } } }, "traits": { - "smithy.api#documentation": "

Describes the configuration of an Auto Scaling group.

" + "smithy.api#documentation": "

Describes the configuration of an EC2 Auto Scaling group.

" } }, "com.amazonaws.computeoptimizer#AutoScalingGroupEstimatedMonthlySavings": { @@ -3719,6 +3777,24 @@ "smithy.api#enumValue": "CurrentConfigurationMaxSize" } }, + "CURRENT_CONFIGURATION_ALLOCATION_STRATEGY": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "CurrentConfigurationAllocationStrategy" + } + }, + "CURRENT_CONFIGURATION_MIXED_INSTANCE_TYPES": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "CurrentConfigurationMixedInstanceTypes" + } + }, + "CURRENT_CONFIGURATION_TYPE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "CurrentConfigurationType" + } + }, "CURRENT_ON_DEMAND_PRICE": { "target": "smithy.api#Unit", "traits": { @@ -3785,6 +3861,30 @@ "smithy.api#enumValue": "RecommendationOptionsConfigurationMaxSize" } }, + "RECOMMENDATION_OPTIONS_CONFIGURATION_ESTIMATED_INSTANCE_HOUR_REDUCTION_PERCENTAGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "RecommendationOptionsConfigurationEstimatedInstanceHourReductionPercentage" + } + }, + "RECOMMENDATION_OPTIONS_CONFIGURATION_ALLOCATION_STRATEGY": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "RecommendationOptionsConfigurationAllocationStrategy" + } + }, + "RECOMMENDATION_OPTIONS_CONFIGURATION_MIXED_INSTANCE_TYPES": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "RecommendationOptionsConfigurationMixedInstanceTypes" + } + }, + "RECOMMENDATION_OPTIONS_CONFIGURATION_TYPE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "RecommendationOptionsConfigurationType" + } + }, "RECOMMENDATION_OPTIONS_PROJECTED_UTILIZATION_METRICS_CPU_MAXIMUM": { "target": "smithy.api#Unit", "traits": { @@ -9926,15 +10026,30 @@ "smithy.api#httpError": 403 } }, + "com.amazonaws.computeoptimizer#MixedInstanceType": { + "type": "string" + }, + "com.amazonaws.computeoptimizer#MixedInstanceTypes": { + "type": "list", + "member": { + "target": "com.amazonaws.computeoptimizer#MixedInstanceType" + } + }, "com.amazonaws.computeoptimizer#NextToken": { "type": "string" }, "com.amazonaws.computeoptimizer#NullableCpu": { "type": "integer" }, + "com.amazonaws.computeoptimizer#NullableEstimatedInstanceHourReductionPercentage": { + "type": "double" + }, "com.amazonaws.computeoptimizer#NullableIOPS": { "type": "integer" }, + "com.amazonaws.computeoptimizer#NullableInstanceType": { + "type": "string" + }, "com.amazonaws.computeoptimizer#NullableMaxAllocatedStorage": { "type": "integer" }, diff --git a/models/connect.json b/models/connect.json index b7d3ffc65e..7aeb9c9d13 100644 --- a/models/connect.json +++ b/models/connect.json @@ -1057,6 +1057,9 @@ { "target": "com.amazonaws.connect#DeleteContactFlowModule" }, + { + "target": "com.amazonaws.connect#DeleteContactFlowVersion" + }, { "target": "com.amazonaws.connect#DeleteEmailAddress" }, @@ -1630,6 +1633,9 @@ { "target": "com.amazonaws.connect#UpdateInstanceStorageConfig" }, + { + "target": "com.amazonaws.connect#UpdateParticipantAuthentication" + }, { "target": "com.amazonaws.connect#UpdateParticipantRoleConfig" }, @@ -3731,7 +3737,7 @@ } ], "traits": { - "smithy.api#documentation": "

>Associates a set of proficiencies with a user.

", + "smithy.api#documentation": "

Associates a set of proficiencies with a user.

", "smithy.api#http": { "method": "POST", "uri": "/users/{InstanceId}/{UserId}/associate-proficiencies", @@ -4127,6 +4133,12 @@ "smithy.api#documentation": "

The proficiency level of the condition.

" } }, + "Range": { + "target": "com.amazonaws.connect#Range", + "traits": { + "smithy.api#documentation": "

An Object to define the minimum and maximum proficiency levels.

" + } + }, "MatchCriteria": { "target": "com.amazonaws.connect#MatchCriteria", "traits": { @@ -4224,6 +4236,28 @@ "smithy.api#default": 0 } }, + "com.amazonaws.connect#AuthenticationError": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 2048 + }, + "smithy.api#pattern": "^[\\x20-\\x21\\x23-\\x5B\\x5D-\\x7E]*$", + "smithy.api#sensitive": {} + } + }, + "com.amazonaws.connect#AuthenticationErrorDescription": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 2048 + }, + "smithy.api#pattern": "^[\\x20-\\x21\\x23-\\x5B\\x5D-\\x7E]*$", + "smithy.api#sensitive": {} + } + }, "com.amazonaws.connect#AuthenticationProfile": { "type": "structure", "members": { @@ -4383,6 +4417,16 @@ "target": "com.amazonaws.connect#AuthenticationProfileSummary" } }, + "com.amazonaws.connect#AuthorizationCode": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 2048 + }, + "smithy.api#sensitive": {} + } + }, "com.amazonaws.connect#AutoAccept": { "type": "boolean", "traits": { @@ -5656,6 +5700,12 @@ "smithy.api#documentation": "

Information about Amazon Connect Wisdom.

" } }, + "CustomerId": { + "target": "com.amazonaws.connect#CustomerId", + "traits": { + "smithy.api#documentation": "

The customer's identification number. For example, the CustomerId may be a\n customer number from your CRM. You can create a Lambda function to pull the unique customer ID of\n the caller from your CRM system. If you enable Amazon Connect Voice ID capability, this\n attribute is populated with the CustomerSpeakerId of the caller.

" + } + }, "CustomerEndpoint": { "target": "com.amazonaws.connect#EndpointInfo", "traits": { @@ -6416,6 +6466,12 @@ "traits": { "smithy.api#enumValue": "QUEUE_TRANSFER" } + }, + "CAMPAIGN": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "CAMPAIGN" + } } } }, @@ -7370,7 +7426,7 @@ "FlowContentSha256": { "target": "com.amazonaws.connect#FlowContentSha256", "traits": { - "smithy.api#documentation": "

Indicates the checksum value of the flow content.

" + "smithy.api#documentation": "

Indicates the checksum value of the latest published flow content.

" } } }, @@ -7410,7 +7466,7 @@ } ], "traits": { - "smithy.api#documentation": "

Publishes a new version of the flow provided. Versions are immutable and monotonically\n increasing. If a version of the same flow content already exists, no new version is created and\n instead the existing version number is returned. If the FlowContentSha256 provided\n is different from the FlowContentSha256 of the $LATEST published flow\n content, then an error is returned. This API only supports creating versions for flows of type\n Campaign.

", + "smithy.api#documentation": "

Publishes a new version of the flow provided. Versions are immutable and monotonically\n increasing. If the FlowContentSha256 provided is different from the\n FlowContentSha256 of the $LATEST published flow content, then an error\n is returned. This API only supports creating versions for flows of type\n Campaign.

", "smithy.api#http": { "method": "PUT", "uri": "/contact-flows/{InstanceId}/{ContactFlowId}/version", @@ -7449,6 +7505,12 @@ "smithy.api#documentation": "

Indicates the checksum value of the flow content.

" } }, + "ContactFlowVersion": { + "target": "com.amazonaws.connect#ResourceVersion", + "traits": { + "smithy.api#documentation": "

The identifier of the flow version.

" + } + }, "LastModifiedTime": { "target": "com.amazonaws.connect#Timestamp", "traits": { @@ -8640,7 +8702,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates registration for a device token and a chat contact to receive real-time push\n notifications. For more information about push notifications, see Set up push\n notifications in Amazon Connect for mobile chat in the Amazon Connect\n Administrator Guide.

", + "smithy.api#documentation": "

Creates registration for a device token and a chat contact to receive real-time push\n notifications. For more information about push notifications, see Set up push\n notifications in Amazon Connect for mobile chat in the Amazon Connect\n Administrator Guide.

", "smithy.api#http": { "method": "PUT", "uri": "/push-notification/{InstanceId}/registrations", @@ -10489,6 +10551,25 @@ "smithy.api#documentation": "

Information about the Customer on the contact.

" } }, + "com.amazonaws.connect#CustomerId": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 128 + } + } + }, + "com.amazonaws.connect#CustomerIdNonEmpty": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 128 + }, + "smithy.api#sensitive": {} + } + }, "com.amazonaws.connect#CustomerProfileAttributesSerialized": { "type": "string" }, @@ -10594,7 +10675,7 @@ "ComparisonType": { "target": "com.amazonaws.connect#DateComparisonType", "traits": { - "smithy.api#documentation": "

An object to specify the hours of operation override date condition\n comparisonType.

" + "smithy.api#documentation": "

An object to specify the hours of operation override date condition\n comparisonType.

" } } }, @@ -11046,6 +11127,82 @@ "smithy.api#output": {} } }, + "com.amazonaws.connect#DeleteContactFlowVersion": { + "type": "operation", + "input": { + "target": "com.amazonaws.connect#DeleteContactFlowVersionRequest" + }, + "output": { + "target": "com.amazonaws.connect#DeleteContactFlowVersionResponse" + }, + "errors": [ + { + "target": "com.amazonaws.connect#AccessDeniedException" + }, + { + "target": "com.amazonaws.connect#InternalServiceException" + }, + { + "target": "com.amazonaws.connect#InvalidParameterException" + }, + { + "target": "com.amazonaws.connect#InvalidRequestException" + }, + { + "target": "com.amazonaws.connect#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.connect#ThrottlingException" + } + ], + "traits": { + "smithy.api#documentation": "

Deletes the particular version specified in flow version identifier.

", + "smithy.api#http": { + "method": "DELETE", + "uri": "/contact-flows/{InstanceId}/{ContactFlowId}/version/{ContactFlowVersion}", + "code": 200 + } + } + }, + "com.amazonaws.connect#DeleteContactFlowVersionRequest": { + "type": "structure", + "members": { + "InstanceId": { + "target": "com.amazonaws.connect#InstanceId", + "traits": { + "smithy.api#documentation": "

The identifier of the Amazon Connect instance. You can find the instance ID in the Amazon Resource Name (ARN) of the instance.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "ContactFlowId": { + "target": "com.amazonaws.connect#ARN", + "traits": { + "smithy.api#documentation": "

The identifier of the flow.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "ContactFlowVersion": { + "target": "com.amazonaws.connect#ResourceVersion", + "traits": { + "smithy.api#documentation": "

The identifier of the flow version.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.connect#DeleteContactFlowVersionResponse": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.connect#DeleteEmailAddress": { "type": "operation", "input": { @@ -11629,7 +11786,7 @@ } ], "traits": { - "smithy.api#documentation": "

Deletes a queue. It isn't possible to delete a queue by using the Amazon Connect admin website.

", + "smithy.api#documentation": "

Deletes a queue.

", "smithy.api#http": { "method": "DELETE", "uri": "/queues/{InstanceId}/{QueueId}", @@ -12731,7 +12888,7 @@ } ], "traits": { - "smithy.api#documentation": "

Describes the specified flow.

\n

You can also create and update flows using the Amazon Connect\n Flow language.

\n

Use the $SAVED alias in the request to describe the SAVED content\n of a Flow. For example, arn:aws:.../contact-flow/{id}:$SAVED. After a flow is\n published, $SAVED needs to be supplied to view saved content that has not been\n published.

\n

In the response, Status indicates the flow status as either\n SAVED or PUBLISHED. The PUBLISHED status will initiate\n validation on the content. SAVED does not initiate validation of the content.\n SAVED | PUBLISHED\n

", + "smithy.api#documentation": "

Describes the specified flow.

\n

You can also create and update flows using the Amazon Connect\n Flow language.

\n

Use the $SAVED alias in the request to describe the SAVED content\n of a Flow. For example, arn:aws:.../contact-flow/{id}:$SAVED. After a flow is\n published, $SAVED needs to be supplied to view saved content that has not been\n published.

\n

Use arn:aws:.../contact-flow/{id}:{version} to retrieve the content of a\n specific flow version.

\n

In the response, Status indicates the flow status as either\n SAVED or PUBLISHED. The PUBLISHED status will initiate\n validation on the content. SAVED does not initiate validation of the content.\n SAVED | PUBLISHED\n

", "smithy.api#http": { "method": "GET", "uri": "/contact-flows/{InstanceId}/{ContactFlowId}", @@ -17624,6 +17781,9 @@ "traits": { "smithy.api#documentation": "

List of routing expressions which will be OR-ed together.

" } + }, + "NotAttributeCondition": { + "target": "com.amazonaws.connect#AttributeCondition" } }, "traits": { @@ -20957,6 +21117,12 @@ "traits": { "smithy.api#enumValue": "ENHANCED_CHAT_MONITORING" } + }, + "MULTI_PARTY_CHAT_CONFERENCE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "MULTI_PARTY_CHAT_CONFERENCE" + } } } }, @@ -21439,6 +21605,12 @@ "traits": { "smithy.api#enumValue": "CALL_TRANSFER_CONNECTOR" } + }, + "COGNITO_USER_POOL": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "COGNITO_USER_POOL" + } } } }, @@ -22331,7 +22503,7 @@ } ], "traits": { - "smithy.api#documentation": "

This API is in preview release for Amazon Connect and is subject to change.

\n

For the specified version of Amazon Lex, returns a paginated list of all the Amazon Lex bots currently associated with the instance. Use this API to returns both Amazon Lex V1 and V2 bots.

", + "smithy.api#documentation": "

This API is in preview release for Amazon Connect and is subject to change.

\n

For the specified version of Amazon Lex, returns a paginated list of all the Amazon Lex bots currently associated with the instance. Use this API to return both Amazon Lex V1 and V2 bots.

", "smithy.api#http": { "method": "GET", "uri": "/instance/{InstanceId}/bots", @@ -31069,6 +31241,26 @@ } } }, + "com.amazonaws.connect#Range": { + "type": "structure", + "members": { + "MinProficiencyLevel": { + "target": "com.amazonaws.connect#NullableProficiencyLevel", + "traits": { + "smithy.api#documentation": "

The minimum proficiency level of the range.

" + } + }, + "MaxProficiencyLevel": { + "target": "com.amazonaws.connect#NullableProficiencyLevel", + "traits": { + "smithy.api#documentation": "

The maximum proficiency level of the range.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

An Object to define the minimum and maximum proficiency levels.

" + } + }, "com.amazonaws.connect#ReadOnlyFieldInfo": { "type": "structure", "members": { @@ -36808,7 +37000,7 @@ "UploadUrlMetadata": { "target": "com.amazonaws.connect#UploadUrlMetadata", "traits": { - "smithy.api#documentation": "

Information to be used while uploading the attached file.

" + "smithy.api#documentation": "

The headers to be provided while uploading the file to the URL.

" } } }, @@ -36923,6 +37115,12 @@ "traits": { "smithy.api#documentation": "

A set of system defined key-value pairs stored on individual contact segments using an\n attribute map. The attributes are standard Amazon Connect attributes. They can be accessed in\n flows.

\n

Attribute keys can include only alphanumeric, -, and _.

\n

This field can be used to show channel subtype, such as connect:Guide.

\n \n

The types application/vnd.amazonaws.connect.message.interactive and\n application/vnd.amazonaws.connect.message.interactive.response must be present in\n the SupportedMessagingContentTypes field of this API in order to set\n SegmentAttributes as { \"connect:Subtype\": {\"valueString\" : \"connect:Guide\"\n }}.

\n
" } + }, + "CustomerId": { + "target": "com.amazonaws.connect#CustomerIdNonEmpty", + "traits": { + "smithy.api#documentation": "

The customer's identification number. For example, the CustomerId may be a\n customer number from your CRM.

" + } } }, "traits": { @@ -41814,6 +42012,90 @@ "smithy.api#input": {} } }, + "com.amazonaws.connect#UpdateParticipantAuthentication": { + "type": "operation", + "input": { + "target": "com.amazonaws.connect#UpdateParticipantAuthenticationRequest" + }, + "output": { + "target": "com.amazonaws.connect#UpdateParticipantAuthenticationResponse" + }, + "errors": [ + { + "target": "com.amazonaws.connect#AccessDeniedException" + }, + { + "target": "com.amazonaws.connect#ConflictException" + }, + { + "target": "com.amazonaws.connect#InternalServiceException" + }, + { + "target": "com.amazonaws.connect#InvalidParameterException" + }, + { + "target": "com.amazonaws.connect#InvalidRequestException" + }, + { + "target": "com.amazonaws.connect#ThrottlingException" + } + ], + "traits": { + "smithy.api#documentation": "

Instructs Amazon Connect to resume the authentication process. The subsequent actions\n depend on the request body contents:

\n
    \n
  • \n

    \n If a code is provided: Connect retrieves the identity\n information from Amazon Cognito and imports it into Connect Customer Profiles.

    \n
  • \n
  • \n

    \n If an error is provided: The error branch of the\n Authenticate Customer block is executed.

    \n
  • \n
\n \n

The API returns a success response to acknowledge the request. However, the interaction and\n exchange of identity information occur asynchronously after the response is returned.

\n
", + "smithy.api#http": { + "method": "POST", + "uri": "/contact/update-participant-authentication", + "code": 200 + } + } + }, + "com.amazonaws.connect#UpdateParticipantAuthenticationRequest": { + "type": "structure", + "members": { + "State": { + "target": "com.amazonaws.connect#ParticipantToken", + "traits": { + "smithy.api#documentation": "

The state query parameter that was provided by Cognito in the\n redirectUri. This will also match the state parameter provided in the\n AuthenticationUrl from the GetAuthenticationUrl\n response.

", + "smithy.api#required": {} + } + }, + "InstanceId": { + "target": "com.amazonaws.connect#InstanceId", + "traits": { + "smithy.api#documentation": "

The identifier of the Amazon Connect instance. You can find the instance ID in the Amazon Resource Name (ARN) of the instance.

", + "smithy.api#required": {} + } + }, + "Code": { + "target": "com.amazonaws.connect#AuthorizationCode", + "traits": { + "smithy.api#documentation": "

The code query parameter provided by Cognito in the\n redirectUri.

" + } + }, + "Error": { + "target": "com.amazonaws.connect#AuthenticationError", + "traits": { + "smithy.api#documentation": "

The error query parameter provided by Cognito in the\n redirectUri.

" + } + }, + "ErrorDescription": { + "target": "com.amazonaws.connect#AuthenticationErrorDescription", + "traits": { + "smithy.api#documentation": "

The error_description parameter provided by Cognito in the\n redirectUri.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.connect#UpdateParticipantAuthenticationResponse": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.connect#UpdateParticipantRoleConfig": { "type": "operation", "input": { @@ -45890,7 +46172,7 @@ "IvrRecordingTrack": { "target": "com.amazonaws.connect#IvrRecordingTrack", "traits": { - "smithy.api#documentation": "

Identifies which IVR track is being recorded.

" + "smithy.api#documentation": "

Identifies which IVR track is being recorded.

\n

One and only one of the track configurations should be presented in the request.

" } } }, diff --git a/models/connectparticipant.json b/models/connectparticipant.json index 2992b78527..809c6b5ce8 100644 --- a/models/connectparticipant.json +++ b/models/connectparticipant.json @@ -52,6 +52,9 @@ "type": "service", "version": "2018-09-07", "operations": [ + { + "target": "com.amazonaws.connectparticipant#CancelParticipantAuthentication" + }, { "target": "com.amazonaws.connectparticipant#CompleteAttachmentUpload" }, @@ -67,6 +70,9 @@ { "target": "com.amazonaws.connectparticipant#GetAttachment" }, + { + "target": "com.amazonaws.connectparticipant#GetAuthenticationUrl" + }, { "target": "com.amazonaws.connectparticipant#GetTranscript" }, @@ -92,7 +98,7 @@ "name": "execute-api" }, "aws.protocols#restJson1": {}, - "smithy.api#documentation": "

Amazon Connect is an easy-to-use omnichannel cloud contact center service that\n enables companies of any size to deliver superior customer service at a lower cost.\n Amazon Connect communications capabilities make it easy for companies to deliver\n personalized interactions across communication channels, including chat.

\n

Use the Amazon Connect Participant Service to manage participants (for example,\n agents, customers, and managers listening in), and to send messages and events within a\n chat contact. The APIs in the service enable the following: sending chat messages,\n attachment sharing, managing a participant's connection state and message events, and\n retrieving chat transcripts.

", + "smithy.api#documentation": "\n

Amazon Connect is an easy-to-use omnichannel cloud contact center service that\n enables companies of any size to deliver superior customer service at a lower cost.\n Amazon Connect communications capabilities make it easy for companies to deliver\n personalized interactions across communication channels, including chat.

\n

Use the Amazon Connect Participant Service to manage participants (for example,\n agents, customers, and managers listening in), and to send messages and events within a\n chat contact. The APIs in the service enable the following: sending chat messages,\n attachment sharing, managing a participant's connection state and message events, and\n retrieving chat transcripts.

", "smithy.api#title": "Amazon Connect Participant Service", "smithy.rules#endpointRuleSet": { "version": "1.0", @@ -875,9 +881,79 @@ "target": "com.amazonaws.connectparticipant#AttachmentItem" } }, + "com.amazonaws.connectparticipant#AuthenticationUrl": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 2083 + } + } + }, "com.amazonaws.connectparticipant#Bool": { "type": "boolean" }, + "com.amazonaws.connectparticipant#CancelParticipantAuthentication": { + "type": "operation", + "input": { + "target": "com.amazonaws.connectparticipant#CancelParticipantAuthenticationRequest" + }, + "output": { + "target": "com.amazonaws.connectparticipant#CancelParticipantAuthenticationResponse" + }, + "errors": [ + { + "target": "com.amazonaws.connectparticipant#AccessDeniedException" + }, + { + "target": "com.amazonaws.connectparticipant#InternalServerException" + }, + { + "target": "com.amazonaws.connectparticipant#ThrottlingException" + }, + { + "target": "com.amazonaws.connectparticipant#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Cancels the authentication session. The opted out branch of the Authenticate Customer\n flow block will be taken.

\n \n

The current supported channel is chat. This API is not supported for Apple\n Messages for Business, WhatsApp, or SMS chats.

\n
", + "smithy.api#http": { + "method": "POST", + "uri": "/participant/cancel-authentication", + "code": 200 + } + } + }, + "com.amazonaws.connectparticipant#CancelParticipantAuthenticationRequest": { + "type": "structure", + "members": { + "SessionId": { + "target": "com.amazonaws.connectparticipant#SessionId", + "traits": { + "smithy.api#documentation": "

The sessionId provided in the authenticationInitiated\n event.

", + "smithy.api#required": {} + } + }, + "ConnectionToken": { + "target": "com.amazonaws.connectparticipant#ParticipantToken", + "traits": { + "smithy.api#documentation": "

The authentication token associated with the participant's connection.

", + "smithy.api#httpHeader": "X-Amz-Bearer", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.connectparticipant#CancelParticipantAuthenticationResponse": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.connectparticipant#ChatContent": { "type": "string", "traits": { @@ -1020,7 +1096,7 @@ } ], "traits": { - "smithy.api#documentation": "

Allows you to confirm that the attachment has been uploaded using the pre-signed URL\n provided in StartAttachmentUpload API. A conflict exception is thrown when an attachment\n with that identifier is already being uploaded.

\n \n

\n ConnectionToken is used for invoking this API instead of\n ParticipantToken.

\n
\n

The Amazon Connect Participant Service APIs do not use Signature Version 4\n authentication.

", + "smithy.api#documentation": "

Allows you to confirm that the attachment has been uploaded using the pre-signed URL\n provided in StartAttachmentUpload API. A conflict exception is thrown when an attachment\n with that identifier is already being uploaded.

\n

For security recommendations, see Amazon Connect Chat security best practices.

\n \n

\n ConnectionToken is used for invoking this API instead of\n ParticipantToken.

\n
\n

The Amazon Connect Participant Service APIs do not use Signature Version 4\n authentication.

", "smithy.api#http": { "method": "POST", "uri": "/participant/complete-attachment-upload", @@ -1077,7 +1153,7 @@ } }, "traits": { - "smithy.api#documentation": "

The requested operation conflicts with the current state of a service\n resource associated with the request.

", + "smithy.api#documentation": "

The requested operation conflicts with the current state of a service resource\n associated with the request.

", "smithy.api#error": "client", "smithy.api#httpError": 409 } @@ -1171,7 +1247,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates the participant's connection.

\n \n

\n ParticipantToken is used for invoking this API instead of\n ConnectionToken.

\n
\n

The participant token is valid for the lifetime of the participant – until they are\n part of a contact.

\n

The response URL for WEBSOCKET Type has a connect expiry timeout of 100s.\n Clients must manually connect to the returned websocket URL and subscribe to the desired\n topic.

\n

For chat, you need to publish the following on the established websocket\n connection:

\n

\n {\"topic\":\"aws/subscribe\",\"content\":{\"topics\":[\"aws/chat\"]}}\n

\n

Upon websocket URL expiry, as specified in the response ConnectionExpiry parameter,\n clients need to call this API again to obtain a new websocket URL and perform the same\n steps as before.

\n

\n Message streaming support: This API can also be used\n together with the StartContactStreaming API to create a participant connection for chat\n contacts that are not using a websocket. For more information about message streaming,\n Enable real-time chat\n message streaming in the Amazon Connect Administrator\n Guide.

\n

\n Feature specifications: For information about feature\n specifications, such as the allowed number of open websocket connections per\n participant, see Feature specifications in the Amazon Connect Administrator\n Guide.

\n \n

The Amazon Connect Participant Service APIs do not use Signature Version 4\n authentication.

\n
", + "smithy.api#documentation": "

Creates the participant's connection.

\n

For security recommendations, see Amazon Connect Chat security best practices.

\n \n

\n ParticipantToken is used for invoking this API instead of\n ConnectionToken.

\n
\n

The participant token is valid for the lifetime of the participant – until they are\n part of a contact.

\n

The response URL for WEBSOCKET Type has a connect expiry timeout of 100s.\n Clients must manually connect to the returned websocket URL and subscribe to the desired\n topic.

\n

For chat, you need to publish the following on the established websocket\n connection:

\n

\n {\"topic\":\"aws/subscribe\",\"content\":{\"topics\":[\"aws/chat\"]}}\n

\n

Upon websocket URL expiry, as specified in the response ConnectionExpiry parameter,\n clients need to call this API again to obtain a new websocket URL and perform the same\n steps as before.

\n

\n Message streaming support: This API can also be used\n together with the StartContactStreaming API to create a participant connection for chat\n contacts that are not using a websocket. For more information about message streaming,\n Enable real-time chat\n message streaming in the Amazon Connect Administrator\n Guide.

\n

\n Feature specifications: For information about feature\n specifications, such as the allowed number of open websocket connections per\n participant, see Feature specifications in the Amazon Connect Administrator\n Guide.

\n \n

The Amazon Connect Participant Service APIs do not use Signature Version 4\n authentication.

\n
", "smithy.api#http": { "method": "POST", "uri": "/participant/connection", @@ -1253,7 +1329,7 @@ } ], "traits": { - "smithy.api#documentation": "

Retrieves the view for the specified view token.

", + "smithy.api#documentation": "

Retrieves the view for the specified view token.

\n

For security recommendations, see Amazon Connect Chat security best practices.

", "smithy.api#http": { "method": "GET", "uri": "/participant/views/{ViewToken}", @@ -1322,7 +1398,7 @@ } ], "traits": { - "smithy.api#documentation": "

Disconnects a participant.

\n \n

\n ConnectionToken is used for invoking this API instead of\n ParticipantToken.

\n
\n

The Amazon Connect Participant Service APIs do not use Signature Version 4\n authentication.

", + "smithy.api#documentation": "

Disconnects a participant.

\n

For security recommendations, see Amazon Connect Chat security best practices.

\n \n

\n ConnectionToken is used for invoking this API instead of\n ParticipantToken.

\n
\n

The Amazon Connect Participant Service APIs do not use Signature Version 4\n authentication.

", "smithy.api#http": { "method": "POST", "uri": "/participant/disconnect", @@ -1392,7 +1468,7 @@ } ], "traits": { - "smithy.api#documentation": "

Provides a pre-signed URL for download of a completed attachment. This is an\n asynchronous API for use with active contacts.

\n \n

\n ConnectionToken is used for invoking this API instead of\n ParticipantToken.

\n
\n

The Amazon Connect Participant Service APIs do not use Signature Version 4\n authentication.

", + "smithy.api#documentation": "

Provides a pre-signed URL for download of a completed attachment. This is an\n asynchronous API for use with active contacts.

\n

For security recommendations, see Amazon Connect Chat security best practices.

\n \n

\n ConnectionToken is used for invoking this API instead of\n ParticipantToken.

\n
\n

The Amazon Connect Participant Service APIs do not use Signature Version 4\n authentication.

", "smithy.api#http": { "method": "POST", "uri": "/participant/attachment", @@ -1417,6 +1493,12 @@ "smithy.api#httpHeader": "X-Amz-Bearer", "smithy.api#required": {} } + }, + "UrlExpiryInSeconds": { + "target": "com.amazonaws.connectparticipant#URLExpiryInSeconds", + "traits": { + "smithy.api#documentation": "

The expiration time of the URL in ISO timestamp. It's specified in ISO 8601 format:\n yyyy-MM-ddThh:mm:ss.SSSZ. For example, 2019-11-08T02:41:28.172Z.

" + } } }, "traits": { @@ -1437,6 +1519,89 @@ "traits": { "smithy.api#documentation": "

The expiration time of the URL in ISO timestamp. It's specified in ISO 8601 format: yyyy-MM-ddThh:mm:ss.SSSZ. For example, 2019-11-08T02:41:28.172Z.

" } + }, + "AttachmentSizeInBytes": { + "target": "com.amazonaws.connectparticipant#AttachmentSizeInBytes", + "traits": { + "smithy.api#default": null, + "smithy.api#documentation": "

The size of the attachment in bytes.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.connectparticipant#GetAuthenticationUrl": { + "type": "operation", + "input": { + "target": "com.amazonaws.connectparticipant#GetAuthenticationUrlRequest" + }, + "output": { + "target": "com.amazonaws.connectparticipant#GetAuthenticationUrlResponse" + }, + "errors": [ + { + "target": "com.amazonaws.connectparticipant#AccessDeniedException" + }, + { + "target": "com.amazonaws.connectparticipant#InternalServerException" + }, + { + "target": "com.amazonaws.connectparticipant#ThrottlingException" + }, + { + "target": "com.amazonaws.connectparticipant#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Retrieves the AuthenticationUrl for the current authentication session for the\n AuthenticateCustomer flow block.

\n

For security recommendations, see Amazon Connect Chat security best practices.

\n \n
    \n
  • \n

    This API can only be called within one minute of receiving the\n authenticationInitiated event.

    \n
  • \n
  • \n

    The current supported channel is chat. This API is not supported for Apple\n Messages for Business, WhatsApp, or SMS chats.

    \n
  • \n
\n
", + "smithy.api#http": { + "method": "POST", + "uri": "/participant/authentication-url", + "code": 200 + } + } + }, + "com.amazonaws.connectparticipant#GetAuthenticationUrlRequest": { + "type": "structure", + "members": { + "SessionId": { + "target": "com.amazonaws.connectparticipant#SessionId", + "traits": { + "smithy.api#documentation": "

The sessionId provided in the authenticationInitiated event.

", + "smithy.api#required": {} + } + }, + "RedirectUri": { + "target": "com.amazonaws.connectparticipant#RedirectURI", + "traits": { + "smithy.api#documentation": "

The URL where the customer will be redirected after Amazon Cognito authorizes the\n user.

", + "smithy.api#required": {} + } + }, + "ConnectionToken": { + "target": "com.amazonaws.connectparticipant#ParticipantToken", + "traits": { + "smithy.api#documentation": "

The authentication token associated with the participant's connection.

", + "smithy.api#httpHeader": "X-Amz-Bearer", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.connectparticipant#GetAuthenticationUrlResponse": { + "type": "structure", + "members": { + "AuthenticationUrl": { + "target": "com.amazonaws.connectparticipant#AuthenticationUrl", + "traits": { + "smithy.api#documentation": "

The URL where the customer will sign in to the identity provider. This URL contains\n the authorize endpoint for the Cognito UserPool used in the authentication.

" + } } }, "traits": { @@ -1466,7 +1631,7 @@ } ], "traits": { - "smithy.api#documentation": "

Retrieves a transcript of the session, including details about any attachments. For\n information about accessing past chat contact transcripts for a persistent chat, see\n Enable persistent chat.

\n

If you have a process that consumes events in the transcript of an chat that has ended, note that chat\n transcripts contain the following event content types if the event has occurred\n during the chat session:

\n
    \n
  • \n

    \n application/vnd.amazonaws.connect.event.participant.left\n

    \n
  • \n
  • \n

    \n application/vnd.amazonaws.connect.event.participant.joined\n

    \n
  • \n
  • \n

    \n application/vnd.amazonaws.connect.event.chat.ended\n

    \n
  • \n
  • \n

    \n application/vnd.amazonaws.connect.event.transfer.succeeded\n

    \n
  • \n
  • \n

    \n application/vnd.amazonaws.connect.event.transfer.failed\n

    \n
  • \n
\n \n

\n ConnectionToken is used for invoking this API instead of\n ParticipantToken.

\n
\n

The Amazon Connect Participant Service APIs do not use Signature Version 4\n authentication.

", + "smithy.api#documentation": "

Retrieves a transcript of the session, including details about any attachments. For\n information about accessing past chat contact transcripts for a persistent chat, see\n Enable persistent chat.

\n

For security recommendations, see Amazon Connect Chat security best practices.

\n

If you have a process that consumes events in the transcript of an chat that has\n ended, note that chat transcripts contain the following event content types if the event\n has occurred during the chat session:

\n
    \n
  • \n

    \n application/vnd.amazonaws.connect.event.participant.left\n

    \n
  • \n
  • \n

    \n application/vnd.amazonaws.connect.event.participant.joined\n

    \n
  • \n
  • \n

    \n application/vnd.amazonaws.connect.event.chat.ended\n

    \n
  • \n
  • \n

    \n application/vnd.amazonaws.connect.event.transfer.succeeded\n

    \n
  • \n
  • \n

    \n application/vnd.amazonaws.connect.event.transfer.failed\n

    \n
  • \n
\n \n

\n ConnectionToken is used for invoking this API instead of\n ParticipantToken.

\n
\n

The Amazon Connect Participant Service APIs do not use Signature Version 4\n authentication.

", "smithy.api#http": { "method": "POST", "uri": "/participant/transcript", @@ -1839,6 +2004,15 @@ "target": "com.amazonaws.connectparticipant#Receipt" } }, + "com.amazonaws.connectparticipant#RedirectURI": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 1024 + } + } + }, "com.amazonaws.connectparticipant#ResourceId": { "type": "string" }, @@ -1963,7 +2137,7 @@ } ], "traits": { - "smithy.api#documentation": "\n

The application/vnd.amazonaws.connect.event.connection.acknowledged\n ContentType will no longer be supported starting December 31, 2024. This event has\n been migrated to the CreateParticipantConnection API using the\n ConnectParticipant field.

\n
\n

Sends an event. Message receipts are not supported when there are more than two active\n participants in the chat. Using the SendEvent API for message receipts when a supervisor\n is barged-in will result in a conflict exception.

\n \n

\n ConnectionToken is used for invoking this API instead of\n ParticipantToken.

\n
\n

The Amazon Connect Participant Service APIs do not use Signature Version 4\n authentication.

", + "smithy.api#documentation": "\n

The application/vnd.amazonaws.connect.event.connection.acknowledged\n ContentType will no longer be supported starting December 31, 2024. This event has\n been migrated to the CreateParticipantConnection API using the\n ConnectParticipant field.

\n
\n

Sends an event. Message receipts are not supported when there are more than two active\n participants in the chat. Using the SendEvent API for message receipts when a supervisor\n is barged-in will result in a conflict exception.

\n

For security recommendations, see Amazon Connect Chat security best practices.

\n \n

\n ConnectionToken is used for invoking this API instead of\n ParticipantToken.

\n
\n

The Amazon Connect Participant Service APIs do not use Signature Version 4\n authentication.

", "smithy.api#http": { "method": "POST", "uri": "/participant/event", @@ -2050,7 +2224,7 @@ } ], "traits": { - "smithy.api#documentation": "

Sends a message.

\n \n

\n ConnectionToken is used for invoking this API instead of\n ParticipantToken.

\n
\n

The Amazon Connect Participant Service APIs do not use Signature Version 4\n authentication.

", + "smithy.api#documentation": "

Sends a message.

\n

For security recommendations, see Amazon Connect Chat security best practices.

\n \n

\n ConnectionToken is used for invoking this API instead of\n ParticipantToken.

\n
\n

The Amazon Connect Participant Service APIs do not use Signature Version 4\n authentication.

", "smithy.api#http": { "method": "POST", "uri": "/participant/message", @@ -2131,6 +2305,15 @@ "smithy.api#httpError": 402 } }, + "com.amazonaws.connectparticipant#SessionId": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 36, + "max": 36 + } + } + }, "com.amazonaws.connectparticipant#SortKey": { "type": "enum", "members": { @@ -2174,7 +2357,7 @@ } ], "traits": { - "smithy.api#documentation": "

Provides a pre-signed Amazon S3 URL in response for uploading the file directly to\n S3.

\n \n

\n ConnectionToken is used for invoking this API instead of\n ParticipantToken.

\n
\n

The Amazon Connect Participant Service APIs do not use Signature Version 4\n authentication.

", + "smithy.api#documentation": "

Provides a pre-signed Amazon S3 URL in response for uploading the file directly to\n S3.

\n

For security recommendations, see Amazon Connect Chat security best practices.

\n \n

\n ConnectionToken is used for invoking this API instead of\n ParticipantToken.

\n
\n

The Amazon Connect Participant Service APIs do not use Signature Version 4\n authentication.

", "smithy.api#http": { "method": "POST", "uri": "/participant/start-attachment-upload", @@ -2240,7 +2423,7 @@ "UploadMetadata": { "target": "com.amazonaws.connectparticipant#UploadMetadata", "traits": { - "smithy.api#documentation": "

Fields to be used while uploading the attachment.

" + "smithy.api#documentation": "

The headers to be provided while uploading the file to the URL.

" } } }, @@ -2297,6 +2480,15 @@ "target": "com.amazonaws.connectparticipant#Item" } }, + "com.amazonaws.connectparticipant#URLExpiryInSeconds": { + "type": "integer", + "traits": { + "smithy.api#range": { + "min": 5, + "max": 300 + } + } + }, "com.amazonaws.connectparticipant#UploadMetadata": { "type": "structure", "members": { diff --git a/models/cost-explorer.json b/models/cost-explorer.json index f85925cd1f..952662ae11 100644 --- a/models/cost-explorer.json +++ b/models/cost-explorer.json @@ -1529,6 +1529,16 @@ "smithy.api#error": "client" } }, + "com.amazonaws.costexplorer#BillingViewArn": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 20, + "max": 2048 + }, + "smithy.api#pattern": "^arn:aws[a-z-]*:(billing)::[0-9]{12}:billingview/[-a-zA-Z0-9/:_+=.-@]{1,43}$" + } + }, "com.amazonaws.costexplorer#CommitmentPurchaseAnalysisConfiguration": { "type": "structure", "members": { @@ -4288,6 +4298,9 @@ }, { "target": "com.amazonaws.costexplorer#RequestChangedException" + }, + { + "target": "com.amazonaws.costexplorer#ResourceNotFoundException" } ], "traits": { @@ -4330,6 +4343,12 @@ "smithy.api#documentation": "

You can group Amazon Web Services costs using up to two different groups, either\n dimensions, tag keys, cost categories, or any two group by types.

\n

Valid values for the DIMENSION type are AZ,\n INSTANCE_TYPE, LEGAL_ENTITY_NAME, INVOICING_ENTITY,\n LINKED_ACCOUNT, OPERATION, PLATFORM,\n PURCHASE_TYPE, SERVICE, TENANCY,\n RECORD_TYPE, and USAGE_TYPE.

\n

When you group by the TAG type and include a valid tag key, you get all\n tag values, including empty strings.

" } }, + "BillingViewArn": { + "target": "com.amazonaws.costexplorer#BillingViewArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) that uniquely identifies a specific billing view. The ARN\n is used to specify which particular billing view you want to interact with or retrieve\n information from when making API calls related to Amazon Web Services Billing and Cost\n Management features. The BillingViewArn can be retrieved by calling the ListBillingViews\n API.

" + } + }, "NextPageToken": { "target": "com.amazonaws.costexplorer#NextPageToken", "traits": { @@ -4396,6 +4415,9 @@ }, { "target": "com.amazonaws.costexplorer#RequestChangedException" + }, + { + "target": "com.amazonaws.costexplorer#ResourceNotFoundException" } ], "traits": { @@ -4438,6 +4460,12 @@ "smithy.api#documentation": "

You can group Amazon Web Services costs using up to two different groups:\n DIMENSION, TAG, COST_CATEGORY.

" } }, + "BillingViewArn": { + "target": "com.amazonaws.costexplorer#BillingViewArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) that uniquely identifies a specific billing view. The ARN\n is used to specify which particular billing view you want to interact with or retrieve\n information from when making API calls related to Amazon Web Services Billing and Cost\n Management features. The BillingViewArn can be retrieved by calling the ListBillingViews\n API.

" + } + }, "NextPageToken": { "target": "com.amazonaws.costexplorer#NextPageToken", "traits": { @@ -4504,6 +4532,9 @@ }, { "target": "com.amazonaws.costexplorer#RequestChangedException" + }, + { + "target": "com.amazonaws.costexplorer#ResourceNotFoundException" } ], "traits": { @@ -4537,6 +4568,12 @@ "smithy.api#documentation": "

The value that you sort the data by.

\n

The key represents the cost and usage metrics. The following values are supported:

\n
    \n
  • \n

    \n BlendedCost\n

    \n
  • \n
  • \n

    \n UnblendedCost\n

    \n
  • \n
  • \n

    \n AmortizedCost\n

    \n
  • \n
  • \n

    \n NetAmortizedCost\n

    \n
  • \n
  • \n

    \n NetUnblendedCost\n

    \n
  • \n
  • \n

    \n UsageQuantity\n

    \n
  • \n
  • \n

    \n NormalizedUsageAmount\n

    \n
  • \n
\n

The supported key values for the SortOrder value are ASCENDING\n and DESCENDING.

\n

When you use the SortBy value, the NextPageToken and\n SearchString key values aren't supported.

" } }, + "BillingViewArn": { + "target": "com.amazonaws.costexplorer#BillingViewArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) that uniquely identifies a specific billing view. The ARN\n is used to specify which particular billing view you want to interact with or retrieve\n information from when making API calls related to Amazon Web Services Billing and Cost\n Management features. The BillingViewArn can be retrieved by calling the ListBillingViews\n API.

" + } + }, "MaxResults": { "target": "com.amazonaws.costexplorer#MaxResults", "traits": { @@ -4608,6 +4645,9 @@ }, { "target": "com.amazonaws.costexplorer#LimitExceededException" + }, + { + "target": "com.amazonaws.costexplorer#ResourceNotFoundException" } ], "traits": { @@ -4644,6 +4684,12 @@ "smithy.api#documentation": "

The filters that you want to use to filter your forecast. The\n GetCostForecast API supports filtering by the following dimensions:

\n
    \n
  • \n

    \n AZ\n

    \n
  • \n
  • \n

    \n INSTANCE_TYPE\n

    \n
  • \n
  • \n

    \n LINKED_ACCOUNT\n

    \n
  • \n
  • \n

    \n LINKED_ACCOUNT_NAME\n

    \n
  • \n
  • \n

    \n OPERATION\n

    \n
  • \n
  • \n

    \n PURCHASE_TYPE\n

    \n
  • \n
  • \n

    \n REGION\n

    \n
  • \n
  • \n

    \n SERVICE\n

    \n
  • \n
  • \n

    \n USAGE_TYPE\n

    \n
  • \n
  • \n

    \n USAGE_TYPE_GROUP\n

    \n
  • \n
  • \n

    \n RECORD_TYPE\n

    \n
  • \n
  • \n

    \n OPERATING_SYSTEM\n

    \n
  • \n
  • \n

    \n TENANCY\n

    \n
  • \n
  • \n

    \n SCOPE\n

    \n
  • \n
  • \n

    \n PLATFORM\n

    \n
  • \n
  • \n

    \n SUBSCRIPTION_ID\n

    \n
  • \n
  • \n

    \n LEGAL_ENTITY_NAME\n

    \n
  • \n
  • \n

    \n DEPLOYMENT_OPTION\n

    \n
  • \n
  • \n

    \n DATABASE_ENGINE\n

    \n
  • \n
  • \n

    \n INSTANCE_TYPE_FAMILY\n

    \n
  • \n
  • \n

    \n BILLING_ENTITY\n

    \n
  • \n
  • \n

    \n RESERVATION_ID\n

    \n
  • \n
  • \n

    \n SAVINGS_PLAN_ARN\n

    \n
  • \n
" } }, + "BillingViewArn": { + "target": "com.amazonaws.costexplorer#BillingViewArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) that uniquely identifies a specific billing view. The ARN\n is used to specify which particular billing view you want to interact with or retrieve\n information from when making API calls related to Amazon Web Services Billing and Cost\n Management features. The BillingViewArn can be retrieved by calling the ListBillingViews\n API.

" + } + }, "PredictionIntervalLevel": { "target": "com.amazonaws.costexplorer#PredictionIntervalLevel", "traits": { @@ -4698,6 +4744,9 @@ }, { "target": "com.amazonaws.costexplorer#RequestChangedException" + }, + { + "target": "com.amazonaws.costexplorer#ResourceNotFoundException" } ], "traits": { @@ -4742,6 +4791,12 @@ "smithy.api#documentation": "

The value that you want to sort the data by.

\n

The key represents cost and usage metrics. The following values are supported:

\n
    \n
  • \n

    \n BlendedCost\n

    \n
  • \n
  • \n

    \n UnblendedCost\n

    \n
  • \n
  • \n

    \n AmortizedCost\n

    \n
  • \n
  • \n

    \n NetAmortizedCost\n

    \n
  • \n
  • \n

    \n NetUnblendedCost\n

    \n
  • \n
  • \n

    \n UsageQuantity\n

    \n
  • \n
  • \n

    \n NormalizedUsageAmount\n

    \n
  • \n
\n

The supported values for the SortOrder key are ASCENDING or\n DESCENDING.

\n

When you specify a SortBy paramater, the context must be\n COST_AND_USAGE. Further, when using SortBy,\n NextPageToken and SearchString aren't supported.

" } }, + "BillingViewArn": { + "target": "com.amazonaws.costexplorer#BillingViewArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) that uniquely identifies a specific billing view. The ARN\n is used to specify which particular billing view you want to interact with or retrieve\n information from when making API calls related to Amazon Web Services Billing and Cost\n Management features. The BillingViewArn can be retrieved by calling the ListBillingViews\n API.

" + } + }, "MaxResults": { "target": "com.amazonaws.costexplorer#MaxResults", "traits": { @@ -5685,6 +5740,9 @@ }, { "target": "com.amazonaws.costexplorer#RequestChangedException" + }, + { + "target": "com.amazonaws.costexplorer#ResourceNotFoundException" } ], "traits": { @@ -5722,6 +5780,12 @@ "smithy.api#documentation": "

The value that you want to sort the data by.

\n

The key represents cost and usage metrics. The following values are supported:

\n
    \n
  • \n

    \n BlendedCost\n

    \n
  • \n
  • \n

    \n UnblendedCost\n

    \n
  • \n
  • \n

    \n AmortizedCost\n

    \n
  • \n
  • \n

    \n NetAmortizedCost\n

    \n
  • \n
  • \n

    \n NetUnblendedCost\n

    \n
  • \n
  • \n

    \n UsageQuantity\n

    \n
  • \n
  • \n

    \n NormalizedUsageAmount\n

    \n
  • \n
\n

The supported values for SortOrder are ASCENDING and\n DESCENDING.

\n

When you use SortBy, NextPageToken and SearchString\n aren't supported.

" } }, + "BillingViewArn": { + "target": "com.amazonaws.costexplorer#BillingViewArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) that uniquely identifies a specific billing view. The ARN\n is used to specify which particular billing view you want to interact with or retrieve\n information from when making API calls related to Amazon Web Services Billing and Cost\n Management features. The BillingViewArn can be retrieved by calling the ListBillingViews\n API.

" + } + }, "MaxResults": { "target": "com.amazonaws.costexplorer#MaxResults", "traits": { @@ -5789,6 +5853,9 @@ { "target": "com.amazonaws.costexplorer#LimitExceededException" }, + { + "target": "com.amazonaws.costexplorer#ResourceNotFoundException" + }, { "target": "com.amazonaws.costexplorer#UnresolvableUsageUnitException" } @@ -5827,6 +5894,12 @@ "smithy.api#documentation": "

The filters that you want to use to filter your forecast. The\n GetUsageForecast API supports filtering by the following dimensions:

\n
    \n
  • \n

    \n AZ\n

    \n
  • \n
  • \n

    \n INSTANCE_TYPE\n

    \n
  • \n
  • \n

    \n LINKED_ACCOUNT\n

    \n
  • \n
  • \n

    \n LINKED_ACCOUNT_NAME\n

    \n
  • \n
  • \n

    \n OPERATION\n

    \n
  • \n
  • \n

    \n PURCHASE_TYPE\n

    \n
  • \n
  • \n

    \n REGION\n

    \n
  • \n
  • \n

    \n SERVICE\n

    \n
  • \n
  • \n

    \n USAGE_TYPE\n

    \n
  • \n
  • \n

    \n USAGE_TYPE_GROUP\n

    \n
  • \n
  • \n

    \n RECORD_TYPE\n

    \n
  • \n
  • \n

    \n OPERATING_SYSTEM\n

    \n
  • \n
  • \n

    \n TENANCY\n

    \n
  • \n
  • \n

    \n SCOPE\n

    \n
  • \n
  • \n

    \n PLATFORM\n

    \n
  • \n
  • \n

    \n SUBSCRIPTION_ID\n

    \n
  • \n
  • \n

    \n LEGAL_ENTITY_NAME\n

    \n
  • \n
  • \n

    \n DEPLOYMENT_OPTION\n

    \n
  • \n
  • \n

    \n DATABASE_ENGINE\n

    \n
  • \n
  • \n

    \n INSTANCE_TYPE_FAMILY\n

    \n
  • \n
  • \n

    \n BILLING_ENTITY\n

    \n
  • \n
  • \n

    \n RESERVATION_ID\n

    \n
  • \n
  • \n

    \n SAVINGS_PLAN_ARN\n

    \n
  • \n
" } }, + "BillingViewArn": { + "target": "com.amazonaws.costexplorer#BillingViewArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) that uniquely identifies a specific billing view. The ARN\n is used to specify which particular billing view you want to interact with or retrieve\n information from when making API calls related to Amazon Web Services Billing and Cost\n Management features. The BillingViewArn can be retrieved by calling the ListBillingViews\n API.

" + } + }, "PredictionIntervalLevel": { "target": "com.amazonaws.costexplorer#PredictionIntervalLevel", "traits": { diff --git a/models/datasync.json b/models/datasync.json index 4f914e3337..a789c12628 100644 --- a/models/datasync.json +++ b/models/datasync.json @@ -626,7 +626,7 @@ "Subdirectory": { "target": "com.amazonaws.datasync#EfsSubdirectory", "traits": { - "smithy.api#documentation": "

Specifies a mount path for your Amazon EFS file system. This is where DataSync reads or writes data (depending on if this is a source or destination location)\n on your file system.

\n

By default, DataSync uses the root directory (or access point if you provide one by using\n AccessPointArn). You can also include subdirectories using forward slashes (for\n example, /path/to/folder).

" + "smithy.api#documentation": "

Specifies a mount path for your Amazon EFS file system. This is where DataSync reads or writes data on your file system (depending on if this is a source or destination location).

\n

By default, DataSync uses the root directory (or access point if you provide one by using\n AccessPointArn). You can also include subdirectories using forward slashes (for\n example, /path/to/folder).

" } }, "EfsFilesystemArn": { @@ -714,27 +714,27 @@ "FsxFilesystemArn": { "target": "com.amazonaws.datasync#FsxFilesystemArn", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) for the FSx for Lustre file system.

", + "smithy.api#documentation": "

Specifies the Amazon Resource Name (ARN) of the FSx for Lustre file system.

", "smithy.api#required": {} } }, "SecurityGroupArns": { "target": "com.amazonaws.datasync#Ec2SecurityGroupArnList", "traits": { - "smithy.api#documentation": "

The Amazon Resource Names (ARNs) of the security groups that are used to configure the\n FSx for Lustre file system.

", + "smithy.api#documentation": "

Specifies the Amazon Resource Names (ARNs) of up to five security groups that provide access to your\n FSx for Lustre file system.

\n

The security groups must be able to access the file system's ports. The file system must\n also allow access from the security groups. For information about file system access, see the\n \n Amazon FSx for Lustre User Guide\n .

", "smithy.api#required": {} } }, "Subdirectory": { "target": "com.amazonaws.datasync#FsxLustreSubdirectory", "traits": { - "smithy.api#documentation": "

A subdirectory in the location's path. This subdirectory in the FSx for Lustre\n file system is used to read data from the FSx for Lustre source location or write\n data to the FSx for Lustre destination.

" + "smithy.api#documentation": "

Specifies a mount path for your FSx for Lustre file system. The path can include subdirectories.

\n

When the location is used as a source, DataSync reads data from the mount path. When the location is used as a destination, DataSync writes data to the mount path. If you don't include this parameter, DataSync uses the file system's root directory (/).

" } }, "Tags": { "target": "com.amazonaws.datasync#InputTagList", "traits": { - "smithy.api#documentation": "

The key-value pair that represents a tag that you want to add to the resource. The value\n can be an empty string. This value helps you manage, filter, and search for your resources. We\n recommend that you create a name tag for your location.

" + "smithy.api#documentation": "

Specifies labels that help you categorize, filter, and search for your Amazon Web Services resources. We recommend creating at least a name tag for your location.

" } } }, @@ -748,7 +748,7 @@ "LocationArn": { "target": "com.amazonaws.datasync#LocationArn", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the FSx for Lustre file system location that's\n created.

" + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the FSx for Lustre file system location that\n you created.

" } } }, @@ -802,7 +802,7 @@ "Subdirectory": { "target": "com.amazonaws.datasync#FsxOntapSubdirectory", "traits": { - "smithy.api#documentation": "

Specifies a path to the file share in the SVM where you'll copy your data.

\n

You can specify a junction path (also known as a mount point), qtree path (for NFS file\n shares), or share name (for SMB file shares). For example, your mount path might be\n /vol1, /vol1/tree1, or /share1.

\n \n

Don't specify a junction path in the SVM's root volume. For more information, see Managing FSx for ONTAP storage virtual machines in the Amazon FSx for NetApp ONTAP User Guide.

\n
" + "smithy.api#documentation": "

Specifies a path to the file share in the SVM where you want to transfer data to or from.

\n

You can specify a junction path (also known as a mount point), qtree path (for NFS file\n shares), or share name (for SMB file shares). For example, your mount path might be\n /vol1, /vol1/tree1, or /share1.

\n \n

Don't specify a junction path in the SVM's root volume. For more information, see Managing FSx for ONTAP storage virtual machines in the Amazon FSx for NetApp ONTAP User Guide.

\n
" } }, "Tags": { @@ -964,7 +964,7 @@ "Domain": { "target": "com.amazonaws.datasync#SmbDomain", "traits": { - "smithy.api#documentation": "

Specifies the name of the Microsoft Active Directory domain that the FSx for Windows File Server file system belongs to.

\n

If you have multiple Active Directory domains in your environment, configuring this\n parameter makes sure that DataSync connects to the right file system.

" + "smithy.api#documentation": "

Specifies the name of the Windows domain that the FSx for Windows File Server file system belongs to.

\n

If you have multiple Active Directory domains in your environment, configuring this\n parameter makes sure that DataSync connects to the right file system.

" } }, "Password": { @@ -1133,7 +1133,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates a transfer location for a Network File System (NFS) file\n server. DataSync can use this location as a source or destination for\n transferring data.

\n

Before you begin, make sure that you understand how DataSync\n accesses\n NFS file servers.

\n \n

If you're copying data to or from an Snowcone device, you can also use\n CreateLocationNfs to create your transfer location. For more information, see\n Configuring transfers with Snowcone.

\n
" + "smithy.api#documentation": "

Creates a transfer location for a Network File System (NFS) file\n server. DataSync can use this location as a source or destination for\n transferring data.

\n

Before you begin, make sure that you understand how DataSync\n accesses\n NFS file servers.

" } }, "com.amazonaws.datasync#CreateLocationNfsRequest": { @@ -4112,6 +4112,21 @@ { "target": "com.amazonaws.datasync#UpdateLocationAzureBlob" }, + { + "target": "com.amazonaws.datasync#UpdateLocationEfs" + }, + { + "target": "com.amazonaws.datasync#UpdateLocationFsxLustre" + }, + { + "target": "com.amazonaws.datasync#UpdateLocationFsxOntap" + }, + { + "target": "com.amazonaws.datasync#UpdateLocationFsxOpenZfs" + }, + { + "target": "com.amazonaws.datasync#UpdateLocationFsxWindows" + }, { "target": "com.amazonaws.datasync#UpdateLocationHdfs" }, @@ -4121,6 +4136,9 @@ { "target": "com.amazonaws.datasync#UpdateLocationObjectStorage" }, + { + "target": "com.amazonaws.datasync#UpdateLocationS3" + }, { "target": "com.amazonaws.datasync#UpdateLocationSmb" }, @@ -5197,7 +5215,7 @@ } }, "traits": { - "smithy.api#documentation": "

Specifies the Network File System (NFS) protocol configuration that DataSync\n uses to access your Amazon FSx for OpenZFS or Amazon FSx for NetApp ONTAP file\n system.

" + "smithy.api#documentation": "

Specifies the Network File System (NFS) protocol configuration that DataSync\n uses to access your FSx for OpenZFS file system or FSx for ONTAP file\n system's storage virtual machine (SVM).

" } }, "com.amazonaws.datasync#FsxProtocolSmb": { @@ -5206,7 +5224,7 @@ "Domain": { "target": "com.amazonaws.datasync#SmbDomain", "traits": { - "smithy.api#documentation": "

Specifies the fully qualified domain name (FQDN) of the Microsoft Active Directory that\n your storage virtual machine (SVM) belongs to.

\n

If you have multiple domains in your environment, configuring this setting makes sure that\n DataSync connects to the right SVM.

" + "smithy.api#documentation": "

Specifies the name of the Windows domain that your storage virtual machine (SVM) belongs to.

\n

If you have multiple domains in your environment, configuring this setting makes sure that\n DataSync connects to the right SVM.

\n

If you have multiple Active Directory domains in your environment, configuring this parameter makes sure that DataSync connects to the right SVM.

" } }, "MountOptions": { @@ -5228,7 +5246,63 @@ } }, "traits": { - "smithy.api#documentation": "

Specifies the Server Message Block (SMB) protocol configuration that DataSync uses to access your Amazon FSx for NetApp ONTAP file system. For more information, see\n Accessing FSx for ONTAP file systems.

" + "smithy.api#documentation": "

Specifies the Server Message Block (SMB) protocol configuration that DataSync uses to access your Amazon FSx for NetApp ONTAP file system's storage virtual machine (SVM). For more information, see\n Providing DataSync access to FSx for ONTAP file systems.

" + } + }, + "com.amazonaws.datasync#FsxUpdateProtocol": { + "type": "structure", + "members": { + "NFS": { + "target": "com.amazonaws.datasync#FsxProtocolNfs" + }, + "SMB": { + "target": "com.amazonaws.datasync#FsxUpdateProtocolSmb", + "traits": { + "smithy.api#documentation": "

Specifies the Server Message Block (SMB) protocol configuration that DataSync\n uses to access your FSx for ONTAP file system's storage virtual machine (SVM).

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Specifies the data transfer protocol that DataSync uses to access your\n Amazon FSx file system.

\n \n

You can't update the Network File System (NFS) protocol configuration for FSx for ONTAP locations. DataSync currently only supports NFS version 3 with this location type.

\n
" + } + }, + "com.amazonaws.datasync#FsxUpdateProtocolSmb": { + "type": "structure", + "members": { + "Domain": { + "target": "com.amazonaws.datasync#FsxUpdateSmbDomain", + "traits": { + "smithy.api#documentation": "

Specifies the name of the Windows domain that your storage virtual machine (SVM) belongs to.

\n

If you have multiple Active Directory domains in your environment, configuring this parameter makes sure that DataSync connects to the right SVM.

" + } + }, + "MountOptions": { + "target": "com.amazonaws.datasync#SmbMountOptions" + }, + "Password": { + "target": "com.amazonaws.datasync#SmbPassword", + "traits": { + "smithy.api#documentation": "

Specifies the password of a user who has permission to access your SVM.

" + } + }, + "User": { + "target": "com.amazonaws.datasync#SmbUser", + "traits": { + "smithy.api#documentation": "

Specifies a user that can mount and access the files, folders, and metadata in your SVM.

\n

For information about choosing a user with the right level of access for your transfer, see Using\n the SMB protocol.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Specifies the Server Message Block (SMB) protocol configuration that DataSync uses to access your Amazon FSx for NetApp ONTAP file system's storage virtual machine (SVM). For more information, see\n Providing DataSync access to FSx for ONTAP file systems.

" + } + }, + "com.amazonaws.datasync#FsxUpdateSmbDomain": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 253 + }, + "smithy.api#pattern": "^([A-Za-z0-9]((\\.|-+)?[A-Za-z0-9]){0,252})?$" } }, "com.amazonaws.datasync#FsxWindowsSubdirectory": { @@ -7750,7 +7824,7 @@ } }, "traits": { - "smithy.api#documentation": "

Specifies the Amazon Resource Name (ARN) of the Identity and Access Management (IAM) role that DataSync uses to access your S3 bucket.

\n

For more information, see Accessing\n S3 buckets.

" + "smithy.api#documentation": "

Specifies the Amazon Resource Name (ARN) of the Identity and Access Management (IAM) role that DataSync uses to access your S3 bucket.

\n

For more information, see Providing DataSync access to S3 buckets.

" } }, "com.amazonaws.datasync#S3ManifestConfig": { @@ -9174,7 +9248,7 @@ } ], "traits": { - "smithy.api#documentation": "

Modifies some configurations of the Microsoft Azure Blob Storage transfer location that you're using with DataSync.

" + "smithy.api#documentation": "

Modifies the following configurations of the Microsoft Azure Blob Storage transfer location that you're using with DataSync.

\n

For more information, see Configuring DataSync transfers with Azure Blob Storage.

" } }, "com.amazonaws.datasync#UpdateLocationAzureBlobRequest": { @@ -9235,6 +9309,291 @@ "smithy.api#output": {} } }, + "com.amazonaws.datasync#UpdateLocationEfs": { + "type": "operation", + "input": { + "target": "com.amazonaws.datasync#UpdateLocationEfsRequest" + }, + "output": { + "target": "com.amazonaws.datasync#UpdateLocationEfsResponse" + }, + "errors": [ + { + "target": "com.amazonaws.datasync#InternalException" + }, + { + "target": "com.amazonaws.datasync#InvalidRequestException" + } + ], + "traits": { + "smithy.api#documentation": "

Modifies the following configuration parameters of the Amazon EFS transfer location that you're using with DataSync.

\n

For more information, see Configuring DataSync transfers with Amazon EFS.

" + } + }, + "com.amazonaws.datasync#UpdateLocationEfsRequest": { + "type": "structure", + "members": { + "LocationArn": { + "target": "com.amazonaws.datasync#LocationArn", + "traits": { + "smithy.api#documentation": "

Specifies the Amazon Resource Name (ARN) of the Amazon EFS transfer location that you're updating.

", + "smithy.api#required": {} + } + }, + "Subdirectory": { + "target": "com.amazonaws.datasync#EfsSubdirectory", + "traits": { + "smithy.api#documentation": "

Specifies a mount path for your Amazon EFS file system. This is where DataSync reads or writes data on your file system (depending on if this is a source or destination location).

\n

By default, DataSync uses the root directory (or access point if you provide one by using\n AccessPointArn). You can also include subdirectories using forward slashes (for\n example, /path/to/folder).

" + } + }, + "AccessPointArn": { + "target": "com.amazonaws.datasync#UpdatedEfsAccessPointArn", + "traits": { + "smithy.api#documentation": "

Specifies the Amazon Resource Name (ARN) of the access point that DataSync uses\n to mount your Amazon EFS file system.

\n

For more information, see Accessing restricted Amazon EFS file systems.

" + } + }, + "FileSystemAccessRoleArn": { + "target": "com.amazonaws.datasync#UpdatedEfsIamRoleArn", + "traits": { + "smithy.api#documentation": "

Specifies an Identity and Access Management (IAM) role that allows DataSync to access your Amazon EFS file system.

\n

For information on creating this role, see Creating a DataSync IAM role for Amazon EFS file system access.

" + } + }, + "InTransitEncryption": { + "target": "com.amazonaws.datasync#EfsInTransitEncryption", + "traits": { + "smithy.api#documentation": "

Specifies whether you want DataSync to use Transport Layer Security (TLS) 1.2\n encryption when it transfers data to or from your Amazon EFS file system.

\n

If you specify an access point using AccessPointArn or an IAM\n role using FileSystemAccessRoleArn, you must set this parameter to\n TLS1_2.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.datasync#UpdateLocationEfsResponse": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.datasync#UpdateLocationFsxLustre": { + "type": "operation", + "input": { + "target": "com.amazonaws.datasync#UpdateLocationFsxLustreRequest" + }, + "output": { + "target": "com.amazonaws.datasync#UpdateLocationFsxLustreResponse" + }, + "errors": [ + { + "target": "com.amazonaws.datasync#InternalException" + }, + { + "target": "com.amazonaws.datasync#InvalidRequestException" + } + ], + "traits": { + "smithy.api#documentation": "

Modifies the following configuration parameters of the Amazon FSx for Lustre transfer location that you're using with DataSync.

\n

For more information, see Configuring DataSync transfers with FSx for Lustre.

" + } + }, + "com.amazonaws.datasync#UpdateLocationFsxLustreRequest": { + "type": "structure", + "members": { + "LocationArn": { + "target": "com.amazonaws.datasync#LocationArn", + "traits": { + "smithy.api#documentation": "

Specifies the Amazon Resource Name (ARN) of the FSx for Lustre transfer location that you're updating.

", + "smithy.api#required": {} + } + }, + "Subdirectory": { + "target": "com.amazonaws.datasync#SmbSubdirectory", + "traits": { + "smithy.api#documentation": "

Specifies a mount path for your FSx for Lustre file system. The path can include subdirectories.

\n

When the location is used as a source, DataSync reads data from the mount path. When the location is used as a destination, DataSync writes data to the mount path. If you don't include this parameter, DataSync uses the file system's root directory (/).

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.datasync#UpdateLocationFsxLustreResponse": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.datasync#UpdateLocationFsxOntap": { + "type": "operation", + "input": { + "target": "com.amazonaws.datasync#UpdateLocationFsxOntapRequest" + }, + "output": { + "target": "com.amazonaws.datasync#UpdateLocationFsxOntapResponse" + }, + "errors": [ + { + "target": "com.amazonaws.datasync#InternalException" + }, + { + "target": "com.amazonaws.datasync#InvalidRequestException" + } + ], + "traits": { + "smithy.api#documentation": "

Modifies the following configuration parameters of the Amazon FSx for NetApp ONTAP transfer location that you're using with DataSync.

\n

For more information, see Configuring DataSync transfers with FSx for ONTAP.

" + } + }, + "com.amazonaws.datasync#UpdateLocationFsxOntapRequest": { + "type": "structure", + "members": { + "LocationArn": { + "target": "com.amazonaws.datasync#LocationArn", + "traits": { + "smithy.api#documentation": "

Specifies the Amazon Resource Name (ARN) of the FSx for ONTAP transfer location that you're updating.

", + "smithy.api#required": {} + } + }, + "Protocol": { + "target": "com.amazonaws.datasync#FsxUpdateProtocol", + "traits": { + "smithy.api#documentation": "

Specifies the data transfer protocol that DataSync uses to access your Amazon FSx file system.

" + } + }, + "Subdirectory": { + "target": "com.amazonaws.datasync#FsxOntapSubdirectory", + "traits": { + "smithy.api#documentation": "

Specifies a path to the file share in the storage virtual machine (SVM) where you want to transfer data to or from.

\n

You can specify a junction path (also known as a mount point), qtree path (for NFS file\n shares), or share name (for SMB file shares). For example, your mount path might be\n /vol1, /vol1/tree1, or /share1.

\n \n

Don't specify a junction path in the SVM's root volume. For more information, see Managing FSx for ONTAP storage virtual machines in the Amazon FSx for NetApp ONTAP User Guide.

\n
" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.datasync#UpdateLocationFsxOntapResponse": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.datasync#UpdateLocationFsxOpenZfs": { + "type": "operation", + "input": { + "target": "com.amazonaws.datasync#UpdateLocationFsxOpenZfsRequest" + }, + "output": { + "target": "com.amazonaws.datasync#UpdateLocationFsxOpenZfsResponse" + }, + "errors": [ + { + "target": "com.amazonaws.datasync#InternalException" + }, + { + "target": "com.amazonaws.datasync#InvalidRequestException" + } + ], + "traits": { + "smithy.api#documentation": "

Modifies the following configuration parameters of the Amazon FSx for OpenZFS transfer location that you're using with DataSync.

\n

For more information, see Configuring DataSync transfers with FSx for OpenZFS.

\n \n

Request parameters related to SMB aren't supported with the\n UpdateLocationFsxOpenZfs operation.

\n
" + } + }, + "com.amazonaws.datasync#UpdateLocationFsxOpenZfsRequest": { + "type": "structure", + "members": { + "LocationArn": { + "target": "com.amazonaws.datasync#LocationArn", + "traits": { + "smithy.api#documentation": "

Specifies the Amazon Resource Name (ARN) of the FSx for OpenZFS transfer location that you're updating.

", + "smithy.api#required": {} + } + }, + "Protocol": { + "target": "com.amazonaws.datasync#FsxProtocol" + }, + "Subdirectory": { + "target": "com.amazonaws.datasync#SmbSubdirectory", + "traits": { + "smithy.api#documentation": "

Specifies a subdirectory in the location's path that must begin with /fsx. DataSync uses this subdirectory to read or write data (depending on whether the file\n system is a source or destination location).

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.datasync#UpdateLocationFsxOpenZfsResponse": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.datasync#UpdateLocationFsxWindows": { + "type": "operation", + "input": { + "target": "com.amazonaws.datasync#UpdateLocationFsxWindowsRequest" + }, + "output": { + "target": "com.amazonaws.datasync#UpdateLocationFsxWindowsResponse" + }, + "errors": [ + { + "target": "com.amazonaws.datasync#InternalException" + }, + { + "target": "com.amazonaws.datasync#InvalidRequestException" + } + ], + "traits": { + "smithy.api#documentation": "

Modifies the following configuration parameters of the Amazon FSx for Windows File Server transfer location that you're using with DataSync.

\n

For more information, see Configuring DataSync transfers with FSx for Windows File Server.

" + } + }, + "com.amazonaws.datasync#UpdateLocationFsxWindowsRequest": { + "type": "structure", + "members": { + "LocationArn": { + "target": "com.amazonaws.datasync#LocationArn", + "traits": { + "smithy.api#documentation": "

Specifies the ARN of the FSx for Windows File Server transfer location that you're updating.

", + "smithy.api#required": {} + } + }, + "Subdirectory": { + "target": "com.amazonaws.datasync#FsxWindowsSubdirectory", + "traits": { + "smithy.api#documentation": "

Specifies a mount path for your file system using forward slashes. DataSync uses this subdirectory to read or write data (depending on whether the file\n system is a source or destination location).

" + } + }, + "Domain": { + "target": "com.amazonaws.datasync#FsxUpdateSmbDomain", + "traits": { + "smithy.api#documentation": "

Specifies the name of the Windows domain that your FSx for Windows File Server file system belongs to.

\n

If you have multiple Active Directory domains in your environment, configuring this parameter makes sure that DataSync connects to the right file system.

" + } + }, + "User": { + "target": "com.amazonaws.datasync#SmbUser", + "traits": { + "smithy.api#documentation": "

Specifies the user with the permissions to mount and access the files, folders, and file\n metadata in your FSx for Windows File Server file system.

\n

For information about choosing a user with the right level of access for your transfer, see required permissions for FSx for Windows File Server locations.

" + } + }, + "Password": { + "target": "com.amazonaws.datasync#SmbPassword", + "traits": { + "smithy.api#documentation": "

Specifies the password of the user with the permissions to mount and access the files,\n folders, and file metadata in your FSx for Windows File Server file system.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.datasync#UpdateLocationFsxWindowsResponse": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.datasync#UpdateLocationHdfs": { "type": "operation", "input": { @@ -9252,7 +9611,7 @@ } ], "traits": { - "smithy.api#documentation": "

Updates some parameters of a previously created location for a Hadoop Distributed File\n System cluster.

" + "smithy.api#documentation": "

Modifies the following configuration parameters of the Hadoop Distributed File\n System (HDFS) transfer location that you're using with DataSync.

\n

For more information, see Configuring DataSync transfers with an HDFS cluster.

" } }, "com.amazonaws.datasync#UpdateLocationHdfsRequest": { @@ -9366,7 +9725,7 @@ } ], "traits": { - "smithy.api#documentation": "

Modifies some configurations of the Network File System (NFS) transfer location that\n you're using with DataSync.

\n

For more information, see Configuring transfers to or from an\n NFS file server.

" + "smithy.api#documentation": "

Modifies the following configuration parameters of the Network File System (NFS) transfer location that you're using with DataSync.

\n

For more information, see Configuring transfers with an\n NFS file server.

" } }, "com.amazonaws.datasync#UpdateLocationNfsRequest": { @@ -9420,7 +9779,7 @@ } ], "traits": { - "smithy.api#documentation": "

Updates some parameters of an existing DataSync location for an object\n storage system.

" + "smithy.api#documentation": "

Modifies the following configuration parameters of the object storage transfer location that you're using with DataSync.

\n

For more information, see Configuring DataSync transfers with an object storage system.

" } }, "com.amazonaws.datasync#UpdateLocationObjectStorageRequest": { @@ -9487,6 +9846,63 @@ "smithy.api#output": {} } }, + "com.amazonaws.datasync#UpdateLocationS3": { + "type": "operation", + "input": { + "target": "com.amazonaws.datasync#UpdateLocationS3Request" + }, + "output": { + "target": "com.amazonaws.datasync#UpdateLocationS3Response" + }, + "errors": [ + { + "target": "com.amazonaws.datasync#InternalException" + }, + { + "target": "com.amazonaws.datasync#InvalidRequestException" + } + ], + "traits": { + "smithy.api#documentation": "

Modifies the following configuration parameters of the Amazon S3 transfer location that you're using with DataSync.

\n \n

Before you begin, make sure that you read the following topics:

\n \n
" + } + }, + "com.amazonaws.datasync#UpdateLocationS3Request": { + "type": "structure", + "members": { + "LocationArn": { + "target": "com.amazonaws.datasync#LocationArn", + "traits": { + "smithy.api#documentation": "

Specifies the Amazon Resource Name (ARN) of the Amazon S3 transfer location that you're updating.

", + "smithy.api#required": {} + } + }, + "Subdirectory": { + "target": "com.amazonaws.datasync#S3Subdirectory", + "traits": { + "smithy.api#documentation": "

Specifies a prefix in the S3 bucket that DataSync reads from or writes to\n (depending on whether the bucket is a source or destination location).

\n \n

DataSync can't transfer objects with a prefix that begins with a slash\n (/) or includes //, /./, or\n /../ patterns. For example:

\n
    \n
  • \n

    \n /photos\n

    \n
  • \n
  • \n

    \n photos//2006/January\n

    \n
  • \n
  • \n

    \n photos/./2006/February\n

    \n
  • \n
  • \n

    \n photos/../2006/March\n

    \n
  • \n
\n
" + } + }, + "S3StorageClass": { + "target": "com.amazonaws.datasync#S3StorageClass", + "traits": { + "smithy.api#documentation": "

Specifies the storage class that you want your objects to use when Amazon S3 is a\n transfer destination.

\n

For buckets in Amazon Web Services Regions, the storage class defaults to\n STANDARD. For buckets on Outposts, the storage class defaults to\n OUTPOSTS.

\n

For more information, see Storage class\n considerations with Amazon S3 transfers.

" + } + }, + "S3Config": { + "target": "com.amazonaws.datasync#S3Config" + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.datasync#UpdateLocationS3Response": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.datasync#UpdateLocationSmb": { "type": "operation", "input": { @@ -9504,7 +9920,7 @@ } ], "traits": { - "smithy.api#documentation": "

Updates some of the parameters of a Server Message Block\n (SMB) file server location that you can use for DataSync transfers.

" + "smithy.api#documentation": "

Modifies the following configuration parameters of the Server Message Block\n (SMB) transfer location that you're using with DataSync.

\n

For more information, see Configuring DataSync transfers with an SMB file server.

" } }, "com.amazonaws.datasync#UpdateLocationSmbRequest": { @@ -9773,6 +10189,26 @@ "smithy.api#output": {} } }, + "com.amazonaws.datasync#UpdatedEfsAccessPointArn": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 128 + }, + "smithy.api#pattern": "^(^arn:(aws|aws-cn|aws-us-gov|aws-iso|aws-iso-b):elasticfilesystem:[a-z\\-0-9]+:[0-9]{12}:access-point/fsap-[0-9a-f]{8,40}$)|(^$)$" + } + }, + "com.amazonaws.datasync#UpdatedEfsIamRoleArn": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 2048 + }, + "smithy.api#pattern": "^(^arn:(aws|aws-cn|aws-us-gov|aws-iso|aws-iso-b):iam::[0-9]{12}:role/.*$)|(^$)$" + } + }, "com.amazonaws.datasync#VerifyMode": { "type": "enum", "members": { diff --git a/models/detective.json b/models/detective.json index 68f27094e6..2d7bcd3c20 100644 --- a/models/detective.json +++ b/models/detective.json @@ -343,7 +343,7 @@ "name": "detective" }, "aws.protocols#restJson1": {}, - "smithy.api#documentation": "

Detective uses machine learning and purpose-built visualizations to help you to\n analyze and investigate security issues across your Amazon Web Services (Amazon Web Services) workloads. Detective automatically extracts time-based events such\n as login attempts, API calls, and network traffic from CloudTrail and Amazon Virtual Private Cloud (Amazon VPC) flow logs. It also extracts findings detected by\n Amazon GuardDuty.

\n

The Detective API primarily supports the creation and management of behavior\n graphs. A behavior graph contains the extracted data from a set of member accounts, and is\n created and managed by an administrator account.

\n

To add a member account to the behavior graph, the administrator account sends an\n invitation to the account. When the account accepts the invitation, it becomes a member\n account in the behavior graph.

\n

Detective is also integrated with Organizations. The organization\n management account designates the Detective administrator account for the\n organization. That account becomes the administrator account for the organization behavior\n graph. The Detective administrator account is also the delegated administrator\n account for Detective in Organizations.

\n

The Detective administrator account can enable any organization account as a\n member account in the organization behavior graph. The organization accounts do not receive\n invitations. The Detective administrator account can also invite other accounts to\n the organization behavior graph.

\n

Every behavior graph is specific to a Region. You can only use the API to manage\n behavior graphs that belong to the Region that is associated with the currently selected\n endpoint.

\n

The administrator account for a behavior graph can use the Detective API to do\n the following:

\n
    \n
  • \n

    Enable and disable Detective. Enabling Detective creates a new\n behavior graph.

    \n
  • \n
  • \n

    View the list of member accounts in a behavior graph.

    \n
  • \n
  • \n

    Add member accounts to a behavior graph.

    \n
  • \n
  • \n

    Remove member accounts from a behavior graph.

    \n
  • \n
  • \n

    Apply tags to a behavior graph.

    \n
  • \n
\n

The organization management account can use the Detective API to select the\n delegated administrator for Detective.

\n

The Detective administrator account for an organization can use the Detective API to do the following:

\n
    \n
  • \n

    Perform all of the functions of an administrator account.

    \n
  • \n
  • \n

    Determine whether to automatically enable new organization accounts as member\n accounts in the organization behavior graph.

    \n
  • \n
\n

An invited member account can use the Detective API to do the following:

\n
    \n
  • \n

    View the list of behavior graphs that they are invited to.

    \n
  • \n
  • \n

    Accept an invitation to contribute to a behavior graph.

    \n
  • \n
  • \n

    Decline an invitation to contribute to a behavior graph.

    \n
  • \n
  • \n

    Remove their account from a behavior graph.

    \n
  • \n
\n

All API actions are logged as CloudTrail events. See Logging Detective API Calls with CloudTrail.

\n \n

We replaced the term \"master account\" with the term \"administrator account\". An\n administrator account is used to centrally manage multiple accounts. In the case of\n Detective, the administrator account manages the accounts in their behavior\n graph.

\n
", + "smithy.api#documentation": "

Detective uses machine learning and purpose-built visualizations to help you to\n analyze and investigate security issues across your Amazon Web Services (Amazon Web Services) workloads. Detective automatically extracts time-based events such\n as login attempts, API calls, and network traffic from CloudTrail and Amazon Virtual Private Cloud (Amazon VPC) flow logs. It also extracts findings detected by\n Amazon GuardDuty.

\n

The Detective API primarily supports the creation and management of behavior\n graphs. A behavior graph contains the extracted data from a set of member accounts, and is\n created and managed by an administrator account.

\n

To add a member account to the behavior graph, the administrator account sends an\n invitation to the account. When the account accepts the invitation, it becomes a member\n account in the behavior graph.

\n

Detective is also integrated with Organizations. The organization\n management account designates the Detective administrator account for the\n organization. That account becomes the administrator account for the organization behavior\n graph. The Detective administrator account is also the delegated administrator\n account for Detective in Organizations.

\n

The Detective administrator account can enable any organization account as a\n member account in the organization behavior graph. The organization accounts do not receive\n invitations. The Detective administrator account can also invite other accounts to\n the organization behavior graph.

\n

Every behavior graph is specific to a Region. You can only use the API to manage\n behavior graphs that belong to the Region that is associated with the currently selected\n endpoint.

\n

The administrator account for a behavior graph can use the Detective API to do\n the following:

\n
    \n
  • \n

    Enable and disable Detective. Enabling Detective creates a new\n behavior graph.

    \n
  • \n
  • \n

    View the list of member accounts in a behavior graph.

    \n
  • \n
  • \n

    Add member accounts to a behavior graph.

    \n
  • \n
  • \n

    Remove member accounts from a behavior graph.

    \n
  • \n
  • \n

    Apply tags to a behavior graph.

    \n
  • \n
\n

The organization management account can use the Detective API to select the\n delegated administrator for Detective.

\n

The Detective administrator account for an organization can use the Detective API to do the following:

\n
    \n
  • \n

    Perform all of the functions of an administrator account.

    \n
  • \n
  • \n

    Determine whether to automatically enable new organization accounts as member\n accounts in the organization behavior graph.

    \n
  • \n
\n

An invited member account can use the Detective API to do the following:

\n
    \n
  • \n

    View the list of behavior graphs that they are invited to.

    \n
  • \n
  • \n

    Accept an invitation to contribute to a behavior graph.

    \n
  • \n
  • \n

    Decline an invitation to contribute to a behavior graph.

    \n
  • \n
  • \n

    Remove their account from a behavior graph.

    \n
  • \n
\n

All API actions are logged as CloudTrail events. See Logging Detective API Calls with CloudTrail.

\n \n

We replaced the term \"master account\" with the term \"administrator account\". An\n administrator account is used to centrally manage multiple accounts. In the case of\n Detective, the administrator account manages the accounts in their behavior\n graph.

\n
", "smithy.api#title": "Amazon Detective", "smithy.rules#endpointRuleSet": { "version": "1.0", @@ -2567,7 +2567,7 @@ } }, "traits": { - "smithy.api#documentation": "

Details about the indicators of compromise which are used to determine if a resource is involved in a security incident. An indicator of compromise (IOC) is an artifact observed in or on a network, system, or environment that can (with a high level of confidence) identify malicious activity or a security incident. For the list of indicators of compromise that are generated by Detective investigations, see Detective investigations.

" + "smithy.api#documentation": "

Details about the indicators of compromise which are used to determine if a resource is involved in a security incident. An indicator of compromise (IOC) is an artifact observed in or on a network, system, or environment that can (with a high level of confidence) identify malicious activity or a security incident. For the list of indicators of compromise that are generated by Detective investigations, see Detective investigations.

" } }, "com.amazonaws.detective#IndicatorType": { @@ -2953,7 +2953,7 @@ "IndicatorType": { "target": "com.amazonaws.detective#IndicatorType", "traits": { - "smithy.api#documentation": "

For the list of indicators of compromise that are generated by Detective investigations, see Detective investigations.

" + "smithy.api#documentation": "

For the list of indicators of compromise that are generated by Detective investigations, see Detective investigations.

" } }, "NextToken": { @@ -4472,7 +4472,7 @@ } ], "traits": { - "smithy.api#documentation": "

Starts a data source packages for the behavior graph.

", + "smithy.api#documentation": "

Starts a data source package for the Detective behavior graph.

", "smithy.api#http": { "method": "POST", "uri": "/graph/datasources/update", @@ -4493,7 +4493,7 @@ "DatasourcePackages": { "target": "com.amazonaws.detective#DatasourcePackageList", "traits": { - "smithy.api#documentation": "

The data source package start for the behavior graph.

", + "smithy.api#documentation": "

The data source package to start for the behavior graph.

", "smithy.api#required": {} } } diff --git a/models/docdb.json b/models/docdb.json index bad56a44ce..cd716554b9 100644 --- a/models/docdb.json +++ b/models/docdb.json @@ -1636,6 +1636,32 @@ "smithy.api#documentation": "

The configuration setting for the log types to be enabled for export to Amazon\n CloudWatch Logs for a specific instance or cluster.

\n

The EnableLogTypes and DisableLogTypes arrays determine\n which logs are exported (or not exported) to CloudWatch Logs. The values within these\n arrays depend on the engine that is being used.

" } }, + "com.amazonaws.docdb#ClusterMasterUserSecret": { + "type": "structure", + "members": { + "SecretArn": { + "target": "com.amazonaws.docdb#String", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the secret.

" + } + }, + "SecretStatus": { + "target": "com.amazonaws.docdb#String", + "traits": { + "smithy.api#documentation": "

The status of the secret.

\n

The possible status values include the following:

\n
    \n
  • \n

    creating - The secret is being created.

    \n
  • \n
  • \n

    active - The secret is available for normal use and rotation.

    \n
  • \n
  • \n

    rotating - The secret is being rotated.

    \n
  • \n
  • \n

    impaired - The secret can be used to access database credentials, but it can't be rotated. \n A secret might have this status if, for example, permissions are changed so that Amazon DocumentDB can no longer access either the secret or the KMS key for the secret.

    \n

    When a secret has this status, you can correct the condition that caused the status. \n Alternatively, modify the instance to turn off automatic management of database credentials, and then modify the instance again to turn on automatic management of database credentials.

    \n
  • \n
" + } + }, + "KmsKeyId": { + "target": "com.amazonaws.docdb#String", + "traits": { + "smithy.api#documentation": "

The Amazon Web Services KMS key identifier that is used to encrypt the secret.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Contains the secret managed by Amazon DocumentDB in Amazon Web Services Secrets Manager for the master user password.

" + } + }, "com.amazonaws.docdb#CopyDBClusterParameterGroup": { "type": "operation", "input": { @@ -1998,6 +2024,18 @@ "traits": { "smithy.api#documentation": "

The storage type to associate with the DB cluster.

\n

For information on storage types for Amazon DocumentDB clusters, see \n Cluster storage configurations in the Amazon DocumentDB Developer Guide.

\n

Valid values for storage type - standard | iopt1\n

\n

Default value is standard \n

\n \n

When you create a DocumentDB DB cluster with the storage type set to iopt1, the storage type is returned\n in the response. The storage type isn't returned when you set it to standard.

\n
" } + }, + "ManageMasterUserPassword": { + "target": "com.amazonaws.docdb#BooleanOptional", + "traits": { + "smithy.api#documentation": "

Specifies whether to manage the master user password with Amazon Web Services Secrets Manager.

\n

Constraint: You can't manage the master user password with Amazon Web Services Secrets Manager if MasterUserPassword is specified.

" + } + }, + "MasterUserSecretKmsKeyId": { + "target": "com.amazonaws.docdb#String", + "traits": { + "smithy.api#documentation": "

The Amazon Web Services KMS key identifier to encrypt a secret that is automatically generated and managed in Amazon Web Services Secrets Manager.\n This setting is valid only if the master user password is managed by Amazon DocumentDB in Amazon Web Services Secrets Manager for the DB cluster.

\n

The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key. \n To use a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN.

\n

If you don't specify MasterUserSecretKmsKeyId, then the aws/secretsmanager KMS key is used to encrypt the secret. \n If the secret is in a different Amazon Web Services account, then you can't use the aws/secretsmanager KMS key to encrypt the secret, and you must use a customer managed KMS key.

\n

There is a default KMS key for your Amazon Web Services account. \n Your Amazon Web Services account has a different default KMS key for each Amazon Web Services Region.

" + } } }, "traits": { @@ -2792,6 +2830,12 @@ "traits": { "smithy.api#documentation": "

Storage type associated with your cluster

\n

Storage type associated with your cluster

\n

For information on storage types for Amazon DocumentDB clusters, see \n Cluster storage configurations in the Amazon DocumentDB Developer Guide.

\n

Valid values for storage type - standard | iopt1\n

\n

Default value is standard \n

" } + }, + "MasterUserSecret": { + "target": "com.amazonaws.docdb#ClusterMasterUserSecret", + "traits": { + "smithy.api#documentation": "

The secret managed by Amazon DocumentDB in Amazon Web Services Secrets Manager for the master user password.

" + } } }, "traits": { @@ -4791,6 +4835,21 @@ "smithy.api#suppress": [ "WaitableTraitInvalidErrorType" ], + "smithy.test#smokeTests": [ + { + "id": "DescribeDBInstancesFailure", + "params": { + "DBInstanceIdentifier": "fake-id" + }, + "vendorParams": { + "region": "us-west-2" + }, + "vendorParamsShape": "aws.test#AwsVendorParams", + "expect": { + "failure": {} + } + } + ], "smithy.waiters#waitable": { "DBInstanceAvailable": { "acceptors": [ @@ -6597,6 +6656,24 @@ "traits": { "smithy.api#documentation": "

The storage type to associate with the DB cluster.

\n

For information on storage types for Amazon DocumentDB clusters, see \n Cluster storage configurations in the Amazon DocumentDB Developer Guide.

\n

Valid values for storage type - standard | iopt1\n

\n

Default value is standard \n

" } + }, + "ManageMasterUserPassword": { + "target": "com.amazonaws.docdb#BooleanOptional", + "traits": { + "smithy.api#documentation": "

Specifies whether to manage the master user password with Amazon Web Services Secrets Manager.\n If the cluster doesn't manage the master user password with Amazon Web Services Secrets Manager, you can turn on this management. \n In this case, you can't specify MasterUserPassword.\n If the cluster already manages the master user password with Amazon Web Services Secrets Manager, and you specify that the master user password is not managed with Amazon Web Services Secrets Manager, then you must specify MasterUserPassword. \n In this case, Amazon DocumentDB deletes the secret and uses the new password for the master user specified by MasterUserPassword.

" + } + }, + "MasterUserSecretKmsKeyId": { + "target": "com.amazonaws.docdb#String", + "traits": { + "smithy.api#documentation": "

The Amazon Web Services KMS key identifier to encrypt a secret that is automatically generated and managed in Amazon Web Services Secrets Manager.

\n

This setting is valid only if both of the following conditions are met:

\n
    \n
  • \n

    The cluster doesn't manage the master user password in Amazon Web Services Secrets Manager. \n If the cluster already manages the master user password in Amazon Web Services Secrets Manager, you can't change the KMS key that is used to encrypt the secret.

    \n
  • \n
  • \n

    You are enabling ManageMasterUserPassword to manage the master user password in Amazon Web Services Secrets Manager. \n If you are turning on ManageMasterUserPassword and don't specify MasterUserSecretKmsKeyId, then the aws/secretsmanager KMS key is used to encrypt the secret. \n If the secret is in a different Amazon Web Services account, then you can't use the aws/secretsmanager KMS key to encrypt the secret, and you must use a customer managed KMS key.

    \n
  • \n
\n

The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key. \n To use a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN.

\n

There is a default KMS key for your Amazon Web Services account. \n Your Amazon Web Services account has a different default KMS key for each Amazon Web Services Region.

" + } + }, + "RotateMasterUserPassword": { + "target": "com.amazonaws.docdb#BooleanOptional", + "traits": { + "smithy.api#documentation": "

Specifies whether to rotate the secret managed by Amazon Web Services Secrets Manager for the master user password.

\n

This setting is valid only if the master user password is managed by Amazon DocumentDB in Amazon Web Services Secrets Manager for the cluster. \n The secret value contains the updated password.

\n

Constraint: You must apply the change immediately when rotating the master user password.

" + } } }, "traits": { diff --git a/models/dynamodb.json b/models/dynamodb.json index f5cf40a15a..464a6e071c 100644 --- a/models/dynamodb.json +++ b/models/dynamodb.json @@ -125,7 +125,7 @@ } }, "traits": { - "smithy.api#documentation": "

Represents an attribute for describing the schema for the table and\n indexes.

" + "smithy.api#documentation": "

Represents an attribute for describing the schema for the table and indexes.

" } }, "com.amazonaws.dynamodb#AttributeDefinitions": { @@ -1009,7 +1009,7 @@ "Item": { "target": "com.amazonaws.dynamodb#AttributeMap", "traits": { - "smithy.api#documentation": "

The item which caused the condition check to fail. This will be set if ReturnValuesOnConditionCheckFailure is specified as ALL_OLD.

" + "smithy.api#documentation": "

The item which caused the condition check to fail. This will be set if\n ReturnValuesOnConditionCheckFailure is specified as ALL_OLD.

" } } }, @@ -1575,27 +1575,27 @@ "TableName": { "target": "com.amazonaws.dynamodb#TableArn", "traits": { - "smithy.api#documentation": "

Name of the table for the check item request. You can also provide the Amazon Resource Name (ARN) of the table in this parameter.

", + "smithy.api#documentation": "

Name of the table for the check item request. You can also provide the Amazon Resource Name (ARN) of\n the table in this parameter.

", "smithy.api#required": {} } }, "ConditionExpression": { "target": "com.amazonaws.dynamodb#ConditionExpression", "traits": { - "smithy.api#documentation": "

A condition that must be satisfied in order for a conditional update to\n succeed. For more information, see Condition expressions in the Amazon DynamoDB Developer\n Guide.

", + "smithy.api#documentation": "

A condition that must be satisfied in order for a conditional update to succeed. For\n more information, see Condition expressions in the Amazon DynamoDB Developer\n Guide.

", "smithy.api#required": {} } }, "ExpressionAttributeNames": { "target": "com.amazonaws.dynamodb#ExpressionAttributeNameMap", "traits": { - "smithy.api#documentation": "

One or more substitution tokens for attribute names in an expression. For more information, see\n Expression attribute names \n in the Amazon DynamoDB Developer Guide.

" + "smithy.api#documentation": "

One or more substitution tokens for attribute names in an expression. For more\n information, see Expression attribute names in the Amazon DynamoDB Developer\n Guide.

" } }, "ExpressionAttributeValues": { "target": "com.amazonaws.dynamodb#ExpressionAttributeValueMap", "traits": { - "smithy.api#documentation": "

One or more values that can be substituted in an expression. For more information, see Condition expressions in the Amazon DynamoDB Developer Guide.

" + "smithy.api#documentation": "

One or more values that can be substituted in an expression. For more information, see\n Condition expressions in the Amazon DynamoDB Developer\n Guide.

" } }, "ReturnValuesOnConditionCheckFailure": { @@ -1665,7 +1665,7 @@ "TableName": { "target": "com.amazonaws.dynamodb#TableArn", "traits": { - "smithy.api#documentation": "

The name of the table that was affected by the operation. If you had specified the Amazon Resource Name (ARN) of a table in the input, you'll see the table ARN in the response.

" + "smithy.api#documentation": "

The name of the table that was affected by the operation. If you had specified the\n Amazon Resource Name (ARN) of a table in the input, you'll see the table ARN in the response.

" } }, "CapacityUnits": { @@ -1971,13 +1971,13 @@ "OnDemandThroughput": { "target": "com.amazonaws.dynamodb#OnDemandThroughput", "traits": { - "smithy.api#documentation": "

The maximum number of read and write units for the global secondary index being created. If you use this parameter, you must specify MaxReadRequestUnits, MaxWriteRequestUnits, or both.

" + "smithy.api#documentation": "

The maximum number of read and write units for the global secondary index being\n created. If you use this parameter, you must specify MaxReadRequestUnits,\n MaxWriteRequestUnits, or both.

" } }, "WarmThroughput": { "target": "com.amazonaws.dynamodb#WarmThroughput", "traits": { - "smithy.api#documentation": "

Represents the warm throughput value (in read units per second and write units per second) when creating a secondary index.

" + "smithy.api#documentation": "

Represents the warm throughput value (in read units per second and write units per\n second) when creating a secondary index.

" } } }, @@ -2093,7 +2093,7 @@ "OnDemandThroughputOverride": { "target": "com.amazonaws.dynamodb#OnDemandThroughputOverride", "traits": { - "smithy.api#documentation": "

The maximum on-demand throughput settings for the specified replica table being created. You can only modify MaxReadRequestUnits, because you can't modify MaxWriteRequestUnits for individual replica tables.\n

" + "smithy.api#documentation": "

The maximum on-demand throughput settings for the specified replica table being\n created. You can only modify MaxReadRequestUnits, because you can't modify\n MaxWriteRequestUnits for individual replica tables.

" } }, "GlobalSecondaryIndexes": { @@ -2327,7 +2327,7 @@ "TableName": { "target": "com.amazonaws.dynamodb#TableArn", "traits": { - "smithy.api#documentation": "

Name of the table in which the item to be deleted resides. You can also provide the Amazon Resource Name (ARN) of the table in this parameter.

", + "smithy.api#documentation": "

Name of the table in which the item to be deleted resides. You can also provide the\n Amazon Resource Name (ARN) of the table in this parameter.

", "smithy.api#required": {} } }, @@ -2885,7 +2885,7 @@ "aws.api#clientDiscoveredEndpoint": { "required": false }, - "smithy.api#documentation": "

Checks the status of continuous backups and point in time recovery on the specified\n table. Continuous backups are ENABLED on all tables at table creation. If\n point in time recovery is enabled, PointInTimeRecoveryStatus will be set to\n ENABLED.

\n

After continuous backups and point in time recovery are enabled, you can restore to\n any point in time within EarliestRestorableDateTime and\n LatestRestorableDateTime.

\n

\n LatestRestorableDateTime is typically 5 minutes before the current time.\n You can restore your table to any point in time during the last 35 days.

\n

You can call DescribeContinuousBackups at a maximum rate of 10 times per\n second.

" + "smithy.api#documentation": "

Checks the status of continuous backups and point in time recovery on the specified\n table. Continuous backups are ENABLED on all tables at table creation. If\n point in time recovery is enabled, PointInTimeRecoveryStatus will be set to\n ENABLED.

\n

After continuous backups and point in time recovery are enabled, you can restore to\n any point in time within EarliestRestorableDateTime and\n LatestRestorableDateTime.

\n

\n LatestRestorableDateTime is typically 5 minutes before the current time.\n You can restore your table to any point in time in the last 35 days. You can set the recovery period to any value between 1 and 35 days.

\n

You can call DescribeContinuousBackups at a maximum rate of 10 times per\n second.

" } }, "com.amazonaws.dynamodb#DescribeContinuousBackupsInput": { @@ -4043,9 +4043,9 @@ "properties": { "authSchemes": [ { - "signingRegion": "us-east-1", + "name": "sigv4", "signingName": "dynamodb", - "name": "sigv4" + "signingRegion": "us-east-1" } ] }, @@ -6150,7 +6150,7 @@ "ApproximateCreationDateTimePrecision": { "target": "com.amazonaws.dynamodb#ApproximateCreationDateTimePrecision", "traits": { - "smithy.api#documentation": "

Toggle for the precision of Kinesis data stream timestamp. The values are either MILLISECOND or MICROSECOND.

" + "smithy.api#documentation": "

Toggle for the precision of Kinesis data stream timestamp. The values are either\n MILLISECOND or MICROSECOND.

" } } }, @@ -6617,7 +6617,7 @@ "ExportType": { "target": "com.amazonaws.dynamodb#ExportType", "traits": { - "smithy.api#documentation": "

The type of export that was performed. Valid values are FULL_EXPORT or INCREMENTAL_EXPORT.

" + "smithy.api#documentation": "

The type of export that was performed. Valid values are FULL_EXPORT or\n INCREMENTAL_EXPORT.

" } }, "IncrementalExportSpecification": { @@ -6722,7 +6722,7 @@ "ExportType": { "target": "com.amazonaws.dynamodb#ExportType", "traits": { - "smithy.api#documentation": "

The type of export that was performed. Valid values are FULL_EXPORT or INCREMENTAL_EXPORT.

" + "smithy.api#documentation": "

The type of export that was performed. Valid values are FULL_EXPORT or\n INCREMENTAL_EXPORT.

" } } }, @@ -6965,7 +6965,7 @@ "TableName": { "target": "com.amazonaws.dynamodb#TableArn", "traits": { - "smithy.api#documentation": "

The name of the table from which to retrieve the specified item. You can also provide the Amazon Resource Name (ARN) of the table in this parameter.

", + "smithy.api#documentation": "

The name of the table from which to retrieve the specified item. You can also provide\n the Amazon Resource Name (ARN) of the table in this parameter.

", "smithy.api#required": {} } }, @@ -7216,13 +7216,13 @@ "OnDemandThroughput": { "target": "com.amazonaws.dynamodb#OnDemandThroughput", "traits": { - "smithy.api#documentation": "

The maximum number of read and write units for the specified global secondary index. If you use this parameter, you must specify MaxReadRequestUnits, MaxWriteRequestUnits, or both.

" + "smithy.api#documentation": "

The maximum number of read and write units for the specified global secondary index.\n If you use this parameter, you must specify MaxReadRequestUnits,\n MaxWriteRequestUnits, or both.

" } }, "WarmThroughput": { "target": "com.amazonaws.dynamodb#WarmThroughput", "traits": { - "smithy.api#documentation": "

Represents the warm throughput value (in read units per second and write units per second) for the specified secondary index. If you use this parameter, you must specify ReadUnitsPerSecond, WriteUnitsPerSecond, or both.

" + "smithy.api#documentation": "

Represents the warm throughput value (in read units per second and write units per\n second) for the specified secondary index. If you use this parameter, you must specify\n ReadUnitsPerSecond, WriteUnitsPerSecond, or both.

" } } }, @@ -7318,13 +7318,13 @@ "OnDemandThroughput": { "target": "com.amazonaws.dynamodb#OnDemandThroughput", "traits": { - "smithy.api#documentation": "

The maximum number of read and write units for the specified global secondary index. If you use this parameter, you must specify MaxReadRequestUnits, MaxWriteRequestUnits, or both.

" + "smithy.api#documentation": "

The maximum number of read and write units for the specified global secondary index.\n If you use this parameter, you must specify MaxReadRequestUnits,\n MaxWriteRequestUnits, or both.

" } }, "WarmThroughput": { "target": "com.amazonaws.dynamodb#GlobalSecondaryIndexWarmThroughputDescription", "traits": { - "smithy.api#documentation": "

Represents the warm throughput value (in read units per second and write units per second) for the specified secondary index.

" + "smithy.api#documentation": "

Represents the warm throughput value (in read units per second and write units per\n second) for the specified secondary index.

" } } }, @@ -7417,19 +7417,19 @@ "ReadUnitsPerSecond": { "target": "com.amazonaws.dynamodb#PositiveLongObject", "traits": { - "smithy.api#documentation": "

Represents warm throughput read units per second value for a global secondary index.

" + "smithy.api#documentation": "

Represents warm throughput read units per second value for a global secondary\n index.

" } }, "WriteUnitsPerSecond": { "target": "com.amazonaws.dynamodb#PositiveLongObject", "traits": { - "smithy.api#documentation": "

Represents warm throughput write units per second value for a global secondary index.

" + "smithy.api#documentation": "

Represents warm throughput write units per second value for a global secondary\n index.

" } }, "Status": { "target": "com.amazonaws.dynamodb#IndexStatus", "traits": { - "smithy.api#documentation": "

Represents the warm throughput status being created or updated on a global secondary index. The status can only be UPDATING or ACTIVE.

" + "smithy.api#documentation": "

Represents the warm throughput status being created or updated on a global secondary\n index. The status can only be UPDATING or ACTIVE.

" } } }, @@ -7986,19 +7986,19 @@ "ExportFromTime": { "target": "com.amazonaws.dynamodb#ExportFromTime", "traits": { - "smithy.api#documentation": "

Time in the past which provides the inclusive start range for the export table's data, counted in seconds from the start of the Unix epoch. The incremental export will reflect the table's state including and after this point in time.

" + "smithy.api#documentation": "

Time in the past which provides the inclusive start range for the export table's data,\n counted in seconds from the start of the Unix epoch. The incremental export will reflect\n the table's state including and after this point in time.

" } }, "ExportToTime": { "target": "com.amazonaws.dynamodb#ExportToTime", "traits": { - "smithy.api#documentation": "

Time in the past which provides the exclusive end range for the export table's data, counted in seconds from the start of the Unix epoch. The incremental export will reflect the table's state just prior to this point in time. If this is not provided, the latest time with data available will be used.

" + "smithy.api#documentation": "

Time in the past which provides the exclusive end range for the export table's data,\n counted in seconds from the start of the Unix epoch. The incremental export will reflect\n the table's state just prior to this point in time. If this is not provided, the latest\n time with data available will be used.

" } }, "ExportViewType": { "target": "com.amazonaws.dynamodb#ExportViewType", "traits": { - "smithy.api#documentation": "

The view type that was chosen for the export. Valid values are NEW_AND_OLD_IMAGES and NEW_IMAGES. The default value is NEW_AND_OLD_IMAGES.

" + "smithy.api#documentation": "

The view type that was chosen for the export. Valid values are\n NEW_AND_OLD_IMAGES and NEW_IMAGES. The default value is\n NEW_AND_OLD_IMAGES.

" } } }, @@ -8447,7 +8447,7 @@ "ApproximateCreationDateTimePrecision": { "target": "com.amazonaws.dynamodb#ApproximateCreationDateTimePrecision", "traits": { - "smithy.api#documentation": "

The precision of the Kinesis data stream timestamp. The values are either MILLISECOND or MICROSECOND.

" + "smithy.api#documentation": "

The precision of the Kinesis data stream timestamp. The values are either\n MILLISECOND or MICROSECOND.

" } } }, @@ -9311,18 +9311,18 @@ "MaxReadRequestUnits": { "target": "com.amazonaws.dynamodb#LongObject", "traits": { - "smithy.api#documentation": "

Maximum number of read request units for the specified table.

\n

To specify a maximum OnDemandThroughput on your table, set the value of MaxReadRequestUnits as greater than or equal to 1. To remove the maximum OnDemandThroughput that is currently set on your table, set the value of MaxReadRequestUnits to -1.

" + "smithy.api#documentation": "

Maximum number of read request units for the specified table.

\n

To specify a maximum OnDemandThroughput on your table, set the value of\n MaxReadRequestUnits as greater than or equal to 1. To remove the\n maximum OnDemandThroughput that is currently set on your table, set the\n value of MaxReadRequestUnits to -1.

" } }, "MaxWriteRequestUnits": { "target": "com.amazonaws.dynamodb#LongObject", "traits": { - "smithy.api#documentation": "

Maximum number of write request units for the specified table.

\n

To specify a maximum OnDemandThroughput on your table, set the value of MaxWriteRequestUnits as greater than or equal to 1. To remove the maximum OnDemandThroughput that is currently set on your table, set the value of MaxWriteRequestUnits to -1.

" + "smithy.api#documentation": "

Maximum number of write request units for the specified table.

\n

To specify a maximum OnDemandThroughput on your table, set the value of\n MaxWriteRequestUnits as greater than or equal to 1. To remove the\n maximum OnDemandThroughput that is currently set on your table, set the\n value of MaxWriteRequestUnits to -1.

" } } }, "traits": { - "smithy.api#documentation": "

Sets the maximum number of read and write units for the specified on-demand table. If you use this parameter, you must specify MaxReadRequestUnits, MaxWriteRequestUnits, or both.

" + "smithy.api#documentation": "

Sets the maximum number of read and write units for the specified on-demand table. If\n you use this parameter, you must specify MaxReadRequestUnits,\n MaxWriteRequestUnits, or both.

" } }, "com.amazonaws.dynamodb#OnDemandThroughputOverride": { @@ -9336,7 +9336,7 @@ } }, "traits": { - "smithy.api#documentation": "

Overrides the on-demand throughput settings for this replica table. If you don't specify a value for this parameter, it uses the source table's on-demand throughput settings.

" + "smithy.api#documentation": "

Overrides the on-demand throughput settings for this replica table. If you don't\n specify a value for this parameter, it uses the source table's on-demand throughput\n settings.

" } }, "com.amazonaws.dynamodb#ParameterizedStatement": { @@ -9358,7 +9358,7 @@ "ReturnValuesOnConditionCheckFailure": { "target": "com.amazonaws.dynamodb#ReturnValuesOnConditionCheckFailure", "traits": { - "smithy.api#documentation": "

An optional parameter that returns the item attributes for a PartiQL\n ParameterizedStatement operation that failed a condition check.

\n

There is no additional cost associated with requesting a return value aside from the\n small network and processing overhead of receiving a larger response. No read capacity\n units are consumed.

" + "smithy.api#documentation": "

An optional parameter that returns the item attributes for a PartiQL\n ParameterizedStatement operation that failed a condition check.

\n

There is no additional cost associated with requesting a return value aside from the\n small network and processing overhead of receiving a larger response. No read capacity\n units are consumed.

" } } }, @@ -9423,6 +9423,12 @@ "smithy.api#documentation": "

The current state of point in time recovery:

\n
    \n
  • \n

    \n ENABLED - Point in time recovery is enabled.

    \n
  • \n
  • \n

    \n DISABLED - Point in time recovery is disabled.

    \n
  • \n
" } }, + "RecoveryPeriodInDays": { + "target": "com.amazonaws.dynamodb#RecoveryPeriodInDays", + "traits": { + "smithy.api#documentation": "

The number of preceding days for which continuous backups are taken and maintained.\n Your table data is only recoverable to any point-in-time from within the configured\n recovery period. This parameter is optional. If no value is provided, the value will\n default to 35.

" + } + }, "EarliestRestorableDateTime": { "target": "com.amazonaws.dynamodb#Date", "traits": { @@ -9449,6 +9455,12 @@ "smithy.api#documentation": "

Indicates whether point in time recovery is enabled (true) or disabled (false) on the\n table.

", "smithy.api#required": {} } + }, + "RecoveryPeriodInDays": { + "target": "com.amazonaws.dynamodb#RecoveryPeriodInDays", + "traits": { + "smithy.api#documentation": "

The number of preceding days for which continuous backups are taken and maintained.\n Your table data is only recoverable to any point-in-time from within the configured\n recovery period. This parameter is optional. If no value is provided, the value will\n default to 35.

" + } } }, "traits": { @@ -9593,14 +9605,14 @@ "ReadCapacityUnits": { "target": "com.amazonaws.dynamodb#PositiveLongObject", "traits": { - "smithy.api#documentation": "

The maximum number of strongly consistent reads consumed per second before DynamoDB\n returns a ThrottlingException. For more information, see Specifying Read and Write Requirements in the Amazon DynamoDB\n Developer Guide.

\n

If read/write capacity mode is PAY_PER_REQUEST the value is set to\n 0.

", + "smithy.api#documentation": "

The maximum number of strongly consistent reads consumed per second before DynamoDB\n returns a ThrottlingException. For more information, see Specifying\n Read and Write Requirements in the Amazon DynamoDB Developer\n Guide.

\n

If read/write capacity mode is PAY_PER_REQUEST the value is set to\n 0.

", "smithy.api#required": {} } }, "WriteCapacityUnits": { "target": "com.amazonaws.dynamodb#PositiveLongObject", "traits": { - "smithy.api#documentation": "

The maximum number of writes consumed per second before DynamoDB returns a\n ThrottlingException. For more information, see Specifying Read and Write Requirements in the Amazon DynamoDB\n Developer Guide.

\n

If read/write capacity mode is PAY_PER_REQUEST the value is set to\n 0.

", + "smithy.api#documentation": "

The maximum number of writes consumed per second before DynamoDB returns a\n ThrottlingException. For more information, see Specifying\n Read and Write Requirements in the Amazon DynamoDB Developer\n Guide.

\n

If read/write capacity mode is PAY_PER_REQUEST the value is set to\n 0.

", "smithy.api#required": {} } } @@ -9689,7 +9701,7 @@ "TableName": { "target": "com.amazonaws.dynamodb#TableArn", "traits": { - "smithy.api#documentation": "

Name of the table in which to write the item. You can also provide the Amazon Resource Name (ARN) of the table in this parameter.

", + "smithy.api#documentation": "

Name of the table in which to write the item. You can also provide the Amazon Resource Name (ARN) of\n the table in this parameter.

", "smithy.api#required": {} } }, @@ -10222,6 +10234,15 @@ "smithy.api#output": {} } }, + "com.amazonaws.dynamodb#RecoveryPeriodInDays": { + "type": "integer", + "traits": { + "smithy.api#range": { + "min": 1, + "max": 35 + } + } + }, "com.amazonaws.dynamodb#RegionName": { "type": "string" }, @@ -10366,7 +10387,7 @@ "OnDemandThroughputOverride": { "target": "com.amazonaws.dynamodb#OnDemandThroughputOverride", "traits": { - "smithy.api#documentation": "

Overrides the maximum on-demand throughput settings for the specified replica table.

" + "smithy.api#documentation": "

Overrides the maximum on-demand throughput settings for the specified replica\n table.

" } }, "WarmThroughput": { @@ -10420,7 +10441,7 @@ "OnDemandThroughputOverride": { "target": "com.amazonaws.dynamodb#OnDemandThroughputOverride", "traits": { - "smithy.api#documentation": "

Overrides the maximum on-demand throughput settings for the specified global secondary index in the specified replica table.

" + "smithy.api#documentation": "

Overrides the maximum on-demand throughput settings for the specified global secondary\n index in the specified replica table.

" } } }, @@ -10501,7 +10522,7 @@ "OnDemandThroughputOverride": { "target": "com.amazonaws.dynamodb#OnDemandThroughputOverride", "traits": { - "smithy.api#documentation": "

Overrides the maximum on-demand throughput for the specified global secondary index in the specified replica table.

" + "smithy.api#documentation": "

Overrides the maximum on-demand throughput for the specified global secondary index in\n the specified replica table.

" } }, "WarmThroughput": { @@ -11121,7 +11142,7 @@ "aws.api#clientDiscoveredEndpoint": { "required": false }, - "smithy.api#documentation": "

Restores the specified table to the specified point in time within\n EarliestRestorableDateTime and LatestRestorableDateTime.\n You can restore your table to any point in time during the last 35 days. Any number of\n users can execute up to 50 concurrent restores (any type of restore) in a given account.

\n

When you restore using point in time recovery, DynamoDB restores your table data to\n the state based on the selected date and time (day:hour:minute:second) to a new table.

\n

Along with data, the following are also included on the new restored table using point\n in time recovery:

\n
    \n
  • \n

    Global secondary indexes (GSIs)

    \n
  • \n
  • \n

    Local secondary indexes (LSIs)

    \n
  • \n
  • \n

    Provisioned read and write capacity

    \n
  • \n
  • \n

    Encryption settings

    \n \n

    All these settings come from the current settings of the source table at\n the time of restore.

    \n
    \n
  • \n
\n

You must manually set up the following on the restored table:

\n
    \n
  • \n

    Auto scaling policies

    \n
  • \n
  • \n

    IAM policies

    \n
  • \n
  • \n

    Amazon CloudWatch metrics and alarms

    \n
  • \n
  • \n

    Tags

    \n
  • \n
  • \n

    Stream settings

    \n
  • \n
  • \n

    Time to Live (TTL) settings

    \n
  • \n
  • \n

    Point in time recovery settings

    \n
  • \n
" + "smithy.api#documentation": "

Restores the specified table to the specified point in time within\n EarliestRestorableDateTime and LatestRestorableDateTime.\n You can restore your table to any point in time in the last 35 days. You can set the recovery period to any value between 1 and 35 days. Any number of\n users can execute up to 50 concurrent restores (any type of restore) in a given account.

\n

When you restore using point in time recovery, DynamoDB restores your table data to\n the state based on the selected date and time (day:hour:minute:second) to a new table.

\n

Along with data, the following are also included on the new restored table using point\n in time recovery:

\n
    \n
  • \n

    Global secondary indexes (GSIs)

    \n
  • \n
  • \n

    Local secondary indexes (LSIs)

    \n
  • \n
  • \n

    Provisioned read and write capacity

    \n
  • \n
  • \n

    Encryption settings

    \n \n

    All these settings come from the current settings of the source table at\n the time of restore.

    \n
    \n
  • \n
\n

You must manually set up the following on the restored table:

\n
    \n
  • \n

    Auto scaling policies

    \n
  • \n
  • \n

    IAM policies

    \n
  • \n
  • \n

    Amazon CloudWatch metrics and alarms

    \n
  • \n
  • \n

    Tags

    \n
  • \n
  • \n

    Stream settings

    \n
  • \n
  • \n

    Time to Live (TTL) settings

    \n
  • \n
  • \n

    Point in time recovery settings

    \n
  • \n
" } }, "com.amazonaws.dynamodb#RestoreTableToPointInTimeInput": { @@ -12260,13 +12281,13 @@ "DeletionProtectionEnabled": { "target": "com.amazonaws.dynamodb#DeletionProtectionEnabled", "traits": { - "smithy.api#documentation": "

Indicates whether deletion protection is enabled (true) or disabled (false) on the table.

" + "smithy.api#documentation": "

Indicates whether deletion protection is enabled (true) or disabled (false) on the\n table.

" } }, "OnDemandThroughput": { "target": "com.amazonaws.dynamodb#OnDemandThroughput", "traits": { - "smithy.api#documentation": "

The maximum number of read and write units for the specified on-demand table. If you use this parameter, you must specify MaxReadRequestUnits, MaxWriteRequestUnits, or both.

" + "smithy.api#documentation": "

The maximum number of read and write units for the specified on-demand table. If you\n use this parameter, you must specify MaxReadRequestUnits,\n MaxWriteRequestUnits, or both.

" } }, "WarmThroughput": { @@ -12402,7 +12423,7 @@ } }, "traits": { - "smithy.api#documentation": "

Represents the warm throughput value (in read units per second and write units per second) of the base table.

" + "smithy.api#documentation": "

Represents the warm throughput value (in read units per second and write units per\n second) of the base table.

" } }, "com.amazonaws.dynamodb#Tag": { @@ -12955,7 +12976,7 @@ "TableName": { "target": "com.amazonaws.dynamodb#TableArn", "traits": { - "smithy.api#documentation": "

Name of the table for the UpdateItem request. You can also provide the Amazon Resource Name (ARN) of the table in this parameter.

", + "smithy.api#documentation": "

Name of the table for the UpdateItem request. You can also provide the\n Amazon Resource Name (ARN) of the table in this parameter.

", "smithy.api#required": {} } }, @@ -13014,7 +13035,7 @@ "aws.api#clientDiscoveredEndpoint": { "required": false }, - "smithy.api#documentation": "

\n UpdateContinuousBackups enables or disables point in time recovery for\n the specified table. A successful UpdateContinuousBackups call returns the\n current ContinuousBackupsDescription. Continuous backups are\n ENABLED on all tables at table creation. If point in time recovery is\n enabled, PointInTimeRecoveryStatus will be set to ENABLED.

\n

Once continuous backups and point in time recovery are enabled, you can restore to\n any point in time within EarliestRestorableDateTime and\n LatestRestorableDateTime.

\n

\n LatestRestorableDateTime is typically 5 minutes before the current time.\n You can restore your table to any point in time during the last 35 days.

" + "smithy.api#documentation": "

\n UpdateContinuousBackups enables or disables point in time recovery for\n the specified table. A successful UpdateContinuousBackups call returns the\n current ContinuousBackupsDescription. Continuous backups are\n ENABLED on all tables at table creation. If point in time recovery is\n enabled, PointInTimeRecoveryStatus will be set to ENABLED.

\n

Once continuous backups and point in time recovery are enabled, you can restore to\n any point in time within EarliestRestorableDateTime and\n LatestRestorableDateTime.

\n

\n LatestRestorableDateTime is typically 5 minutes before the current time.\n You can restore your table to any point in time in the last 35 days. You can set the recovery period to any value between 1 and 35 days.

" } }, "com.amazonaws.dynamodb#UpdateContinuousBackupsInput": { @@ -13149,13 +13170,13 @@ "OnDemandThroughput": { "target": "com.amazonaws.dynamodb#OnDemandThroughput", "traits": { - "smithy.api#documentation": "

Updates the maximum number of read and write units for the specified global secondary index. If you use this parameter, you must specify MaxReadRequestUnits, MaxWriteRequestUnits, or both.

" + "smithy.api#documentation": "

Updates the maximum number of read and write units for the specified global secondary\n index. If you use this parameter, you must specify MaxReadRequestUnits,\n MaxWriteRequestUnits, or both.

" } }, "WarmThroughput": { "target": "com.amazonaws.dynamodb#WarmThroughput", "traits": { - "smithy.api#documentation": "

Represents the warm throughput value of the new provisioned throughput settings to be applied to a global secondary index.

" + "smithy.api#documentation": "

Represents the warm throughput value of the new provisioned throughput settings to be\n applied to a global secondary index.

" } } }, @@ -13972,18 +13993,18 @@ "ReadUnitsPerSecond": { "target": "com.amazonaws.dynamodb#LongObject", "traits": { - "smithy.api#documentation": "

Represents the number of read operations your base table can instantaneously support.

" + "smithy.api#documentation": "

Represents the number of read operations your base table can instantaneously\n support.

" } }, "WriteUnitsPerSecond": { "target": "com.amazonaws.dynamodb#LongObject", "traits": { - "smithy.api#documentation": "

Represents the number of write operations your base table can instantaneously support.

" + "smithy.api#documentation": "

Represents the number of write operations your base table can instantaneously\n support.

" } } }, "traits": { - "smithy.api#documentation": "

Provides visibility into the number of read and write operations your table or secondary index can instantaneously support. The settings can be modified using the UpdateTable operation to meet the throughput requirements of an upcoming peak event.

" + "smithy.api#documentation": "

Provides visibility into the number of read and write operations your table or\n secondary index can instantaneously support. The settings can be modified using the\n UpdateTable operation to meet the throughput requirements of an\n upcoming peak event.

" } }, "com.amazonaws.dynamodb#WriteRequest": { diff --git a/models/ec2.json b/models/ec2.json index 336ecc3308..ec7e1cf141 100644 --- a/models/ec2.json +++ b/models/ec2.json @@ -2088,6 +2088,12 @@ "traits": { "smithy.api#enumValue": "used" } + }, + "future": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "future" + } } } }, @@ -13506,6 +13512,14 @@ "smithy.api#documentation": "

Options for enabling a customizable text banner that will be displayed on Amazon Web Services provided clients when a VPN session is\n\t\t\testablished.

", "smithy.api#xmlName": "clientLoginBannerOptions" } + }, + "DisconnectOnSessionTimeout": { + "target": "com.amazonaws.ec2#Boolean", + "traits": { + "aws.protocols#ec2QueryName": "DisconnectOnSessionTimeout", + "smithy.api#documentation": "

Indicates whether the client VPN session is disconnected after the maximum sessionTimeoutHours is reached. If true, users are prompted to reconnect client VPN. If false, client VPN attempts to reconnect automatically. The default value is false.

", + "smithy.api#xmlName": "disconnectOnSessionTimeout" + } } }, "traits": { @@ -15635,6 +15649,12 @@ "traits": { "smithy.api#documentation": "

Options for enabling a customizable text banner that will be displayed on\n\t\t\tAmazon Web Services provided clients when a VPN session is established.

" } + }, + "DisconnectOnSessionTimeout": { + "target": "com.amazonaws.ec2#Boolean", + "traits": { + "smithy.api#documentation": "

Indicates whether the client VPN session is disconnected after the maximum timeout specified in SessionTimeoutHours is reached. If true, users are prompted to reconnect client VPN. If false, client VPN attempts to reconnect automatically. \n The default value is false.

" + } } }, "traits": { @@ -70386,6 +70406,54 @@ "traits": { "smithy.api#enumValue": "i8g.metal-24xl" } + }, + "u7i_6tb_112xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "u7i-6tb.112xlarge" + } + }, + "u7i_8tb_112xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "u7i-8tb.112xlarge" + } + }, + "u7inh_32tb_480xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "u7inh-32tb.480xlarge" + } + }, + "p5e_48xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "p5e.48xlarge" + } + }, + "p5en_48xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "p5en.48xlarge" + } + }, + "f2_12xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "f2.12xlarge" + } + }, + "f2_48xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "f2.48xlarge" + } + }, + "trn2_48xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "trn2.48xlarge" + } } } }, @@ -79214,6 +79282,12 @@ "traits": { "smithy.api#documentation": "

Options for enabling a customizable text banner that will be displayed on\n\t\t\tAmazon Web Services provided clients when a VPN session is established.

" } + }, + "DisconnectOnSessionTimeout": { + "target": "com.amazonaws.ec2#Boolean", + "traits": { + "smithy.api#documentation": "

Indicates whether the client VPN session is disconnected after the maximum timeout specified in sessionTimeoutHours is reached. If true, users are prompted to reconnect client VPN. If false, client VPN attempts to reconnect automatically. The default value is false.

" + } } }, "traits": { diff --git a/models/ecr-public.json b/models/ecr-public.json index c0d80f9bc1..1144dedbd1 100644 --- a/models/ecr-public.json +++ b/models/ecr-public.json @@ -2901,7 +2901,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -2944,7 +2943,8 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -2957,7 +2957,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -2971,7 +2970,6 @@ "assign": "PartitionResult" } ], - "type": "tree", "rules": [ { "conditions": [ @@ -2994,7 +2992,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -3029,7 +3026,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -3040,14 +3036,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS and DualStack are enabled, but this partition does not support one or both", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -3061,14 +3059,12 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { "fn": "getAttr", "argv": [ @@ -3077,11 +3073,11 @@ }, "supportsFIPS" ] - } + }, + true ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -3092,14 +3088,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS is enabled but this partition does not support FIPS", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -3113,7 +3111,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -3133,7 +3130,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -3144,14 +3140,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "DualStack is enabled but this partition does not support DualStack", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [], @@ -3162,9 +3160,11 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" }, { "conditions": [], diff --git a/models/ecr.json b/models/ecr.json index fe5e547ed2..d0ae29a1b6 100644 --- a/models/ecr.json +++ b/models/ecr.json @@ -3499,7 +3499,7 @@ "encryptionType": { "target": "com.amazonaws.ecr#EncryptionType", "traits": { - "smithy.api#documentation": "

The encryption type to use.

\n

If you use the KMS encryption type, the contents of the repository will\n be encrypted using server-side encryption with Key Management Service key stored in KMS. When you\n use KMS to encrypt your data, you can either use the default Amazon Web Services managed KMS key\n for Amazon ECR, or specify your own KMS key, which you already created.

\n

If you use the KMS_DSSE encryption type, the contents of the repository\n will be encrypted with two layers of encryption using server-side encryption with the\n KMS Management Service key stored in KMS. Similar to the KMS encryption type, you\n can either use the default Amazon Web Services managed KMS key for Amazon ECR, or specify your own KMS\n key, which you've already created.

\n

If you use the AES256 encryption type, Amazon ECR uses server-side encryption\n with Amazon S3-managed encryption keys which encrypts the images in the repository using an\n AES256 encryption algorithm.

\n

For more information, see Amazon ECR encryption at\n rest in the Amazon Elastic Container Registry User Guide.

", + "smithy.api#documentation": "

The encryption type to use.

\n

If you use the KMS encryption type, the contents of the repository will\n be encrypted using server-side encryption with Key Management Service key stored in KMS. When you\n use KMS to encrypt your data, you can either use the default Amazon Web Services managed KMS key\n for Amazon ECR, or specify your own KMS key, which you already created.

\n

If you use the KMS_DSSE encryption type, the contents of the repository\n will be encrypted with two layers of encryption using server-side encryption with the\n KMS Management Service key stored in KMS. Similar to the KMS encryption\n type, you can either use the default Amazon Web Services managed KMS key for Amazon ECR, or specify your\n own KMS key, which you've already created.

\n

If you use the AES256 encryption type, Amazon ECR uses server-side encryption\n with Amazon S3-managed encryption keys which encrypts the images in the repository using an\n AES256 encryption algorithm.

\n

For more information, see Amazon ECR encryption at\n rest in the Amazon Elastic Container Registry User Guide.

", "smithy.api#required": {} } }, @@ -3784,7 +3784,7 @@ } ], "traits": { - "smithy.api#documentation": "

Retrieves the basic scan type version name.

" + "smithy.api#documentation": "

Retrieves the account setting value for the specified setting name.

" } }, "com.amazonaws.ecr#GetAccountSettingRequest": { @@ -3793,7 +3793,7 @@ "name": { "target": "com.amazonaws.ecr#AccountSettingName", "traits": { - "smithy.api#documentation": "

Basic scan type version name.

", + "smithy.api#documentation": "

The name of the account setting, such as BASIC_SCAN_TYPE_VERSION or\n REGISTRY_POLICY_SCOPE.

", "smithy.api#required": {} } } @@ -3808,13 +3808,13 @@ "name": { "target": "com.amazonaws.ecr#AccountSettingName", "traits": { - "smithy.api#documentation": "

Retrieves the basic scan type version name.

" + "smithy.api#documentation": "

Retrieves the name of the account setting.

" } }, "value": { "target": "com.amazonaws.ecr#AccountSettingName", "traits": { - "smithy.api#documentation": "

Retrieves the value that specifies what basic scan type is being used:\n AWS_NATIVE or CLAIR.

" + "smithy.api#documentation": "

The setting value for the setting name. The following are valid values for the basic scan\n type being used: AWS_NATIVE or CLAIR. The following are valid\n values for the registry policy scope being used: V1 or\n V2.

" } } }, @@ -5995,7 +5995,7 @@ } ], "traits": { - "smithy.api#documentation": "

Allows you to change the basic scan type version by setting the name\n parameter to either CLAIR to AWS_NATIVE.

" + "smithy.api#documentation": "

Allows you to change the basic scan type version or registry policy scope.

" } }, "com.amazonaws.ecr#PutAccountSettingRequest": { @@ -6004,14 +6004,14 @@ "name": { "target": "com.amazonaws.ecr#AccountSettingName", "traits": { - "smithy.api#documentation": "

Basic scan type version name.

", + "smithy.api#documentation": "

The name of the account setting, such as BASIC_SCAN_TYPE_VERSION or\n REGISTRY_POLICY_SCOPE.

", "smithy.api#required": {} } }, "value": { "target": "com.amazonaws.ecr#AccountSettingValue", "traits": { - "smithy.api#documentation": "

Setting value that determines what basic scan type is being used:\n AWS_NATIVE or CLAIR.

", + "smithy.api#documentation": "

Setting value that is specified. The following are valid values for the basic scan\n type being used: AWS_NATIVE or CLAIR. The following are valid\n values for the registry policy scope being used: V1 or\n V2.

", "smithy.api#required": {} } } @@ -6026,13 +6026,13 @@ "name": { "target": "com.amazonaws.ecr#AccountSettingName", "traits": { - "smithy.api#documentation": "

Retrieves the the basic scan type version name.

" + "smithy.api#documentation": "

Retrieves the name of the account setting.

" } }, "value": { "target": "com.amazonaws.ecr#AccountSettingValue", "traits": { - "smithy.api#documentation": "

Retrieves the basic scan type value, either AWS_NATIVE or\n -.

" + "smithy.api#documentation": "

Retrieves the value of the specified account setting.

" } } }, diff --git a/models/ecs.json b/models/ecs.json index a18d106fa8..45d15364d6 100644 --- a/models/ecs.json +++ b/models/ecs.json @@ -1526,14 +1526,14 @@ "subnets": { "target": "com.amazonaws.ecs#StringList", "traits": { - "smithy.api#documentation": "

The IDs of the subnets associated with the task or service. There's a limit of 16\n\t\t\tsubnets that can be specified per awsvpcConfiguration.

\n \n

All specified subnets must be from the same VPC.

\n
", + "smithy.api#documentation": "

The IDs of the subnets associated with the task or service. There's a limit of 16\n\t\t\tsubnets that can be specified.

\n \n

All specified subnets must be from the same VPC.

\n
", "smithy.api#required": {} } }, "securityGroups": { "target": "com.amazonaws.ecs#StringList", "traits": { - "smithy.api#documentation": "

The IDs of the security groups associated with the task or service. If you don't\n\t\t\tspecify a security group, the default security group for the VPC is used. There's a\n\t\t\tlimit of 5 security groups that can be specified per\n\t\t\tawsvpcConfiguration.

\n \n

All specified security groups must be from the same VPC.

\n
" + "smithy.api#documentation": "

The IDs of the security groups associated with the task or service. If you don't\n\t\t\tspecify a security group, the default security group for the VPC is used. There's a\n\t\t\tlimit of 5 security groups that can be specified.

\n \n

All specified security groups must be from the same VPC.

\n
" } }, "assignPublicIp": { @@ -2402,7 +2402,7 @@ "linuxParameters": { "target": "com.amazonaws.ecs#LinuxParameters", "traits": { - "smithy.api#documentation": "

Linux-specific modifications that are applied to the container, such as Linux kernel\n\t\t\tcapabilities. For more information see KernelCapabilities.

\n \n

This parameter is not supported for Windows containers.

\n
" + "smithy.api#documentation": "

Linux-specific modifications that are applied to the default Docker container configuration, such as Linux kernel\n\t\t\tcapabilities. For more information see KernelCapabilities.

\n \n

This parameter is not supported for Windows containers.

\n
" } }, "secrets": { @@ -3007,7 +3007,43 @@ } ], "traits": { - "smithy.api#documentation": "

Creates a new capacity provider. Capacity providers are associated with an Amazon ECS\n\t\t\tcluster and are used in capacity provider strategies to facilitate cluster auto\n\t\t\tscaling.

\n

Only capacity providers that use an Auto Scaling group can be created. Amazon ECS tasks on\n\t\t\tFargate use the FARGATE and FARGATE_SPOT capacity providers.\n\t\t\tThese providers are available to all accounts in the Amazon Web Services Regions that Fargate\n\t\t\tsupports.

" + "smithy.api#documentation": "

Creates a new capacity provider. Capacity providers are associated with an Amazon ECS\n\t\t\tcluster and are used in capacity provider strategies to facilitate cluster auto\n\t\t\tscaling.

\n

Only capacity providers that use an Auto Scaling group can be created. Amazon ECS tasks on\n\t\t\tFargate use the FARGATE and FARGATE_SPOT capacity providers.\n\t\t\tThese providers are available to all accounts in the Amazon Web Services Regions that Fargate\n\t\t\tsupports.

", + "smithy.api#examples": [ + { + "title": "To create a capacity provider ", + "documentation": "This example creates a capacity provider that uses the specified Auto Scaling group MyASG and has managed scaling and manager termination protection enabled. ", + "input": { + "name": "MyCapacityProvider", + "autoScalingGroupProvider": { + "autoScalingGroupArn": "arn:aws:autoscaling:us-east-1:123456789012:autoScalingGroup:57ffcb94-11f0-4d6d-bf60-3bac5EXAMPLE:autoScalingGroupName/MyASG", + "managedScaling": { + "status": "ENABLED", + "targetCapacity": 100 + }, + "managedTerminationProtection": "ENABLED" + } + }, + "output": { + "capacityProvider": { + "capacityProviderArn": "arn:aws:ecs:us-east-1:123456789012:capacity-provider/MyCapacityProvider", + "name": "MyCapacityProvider", + "status": "ACTIVE", + "autoScalingGroupProvider": { + "autoScalingGroupArn": "arn:aws:autoscaling:us-east-1:132456789012:autoScalingGroup:57ffcb94-11f0-4d6d-bf60-3bac5EXAMPLE:autoScalingGroupName/MyASG", + "managedScaling": { + "status": "ENABLED", + "targetCapacity": 100, + "minimumScalingStepSize": 1, + "maximumScalingStepSize": 10000, + "instanceWarmupPeriod": 300 + }, + "managedTerminationProtection": "ENABLED" + }, + "tags": [] + } + } + } + ] } }, "com.amazonaws.ecs#CreateCapacityProviderRequest": { @@ -3535,7 +3571,61 @@ } ], "traits": { - "smithy.api#documentation": "

Create a task set in the specified cluster and service. This is used when a service\n\t\t\tuses the EXTERNAL deployment controller type. For more information, see\n\t\t\t\tAmazon ECS deployment\n\t\t\t\ttypes in the Amazon Elastic Container Service Developer Guide.

\n \n

On March 21, 2024, a change was made to resolve the task definition revision before authorization. When a task definition revision is not specified, authorization will occur using the latest revision of a task definition.

\n
\n

For information about the maximum number of task sets and other quotas, see Amazon ECS\n\t\t\t\tservice quotas in the Amazon Elastic Container Service Developer Guide.

" + "smithy.api#documentation": "

Create a task set in the specified cluster and service. This is used when a service\n\t\t\tuses the EXTERNAL deployment controller type. For more information, see\n\t\t\t\tAmazon ECS deployment\n\t\t\t\ttypes in the Amazon Elastic Container Service Developer Guide.

\n \n

On March 21, 2024, a change was made to resolve the task definition revision before authorization. When a task definition revision is not specified, authorization will occur using the latest revision of a task definition.

\n
\n

For information about the maximum number of task sets and other quotas, see Amazon ECS\n\t\t\t\tservice quotas in the Amazon Elastic Container Service Developer Guide.

", + "smithy.api#examples": [ + { + "title": "To create a task set", + "documentation": "This example creates a task set in a service that uses the EXTERNAL deployment controller.", + "input": { + "service": "MyService", + "cluster": "MyCluster", + "taskDefinition": "MyTaskDefinition:2", + "networkConfiguration": { + "awsvpcConfiguration": { + "subnets": [ + "subnet-12344321" + ], + "securityGroups": [ + "sg-12344321" + ] + } + } + }, + "output": { + "taskSet": { + "id": "ecs-svc/1234567890123456789", + "taskSetArn": "arn:aws:ecs:us-west-2:123456789012:task-set/MyCluster/MyService/ecs-svc/1234567890123456789", + "status": "ACTIVE", + "taskDefinition": "arn:aws:ecs:us-west-2:123456789012:task-definition/MyTaskDefinition:2", + "computedDesiredCount": 0, + "pendingCount": 0, + "runningCount": 0, + "createdAt": 1.557128360711E9, + "updatedAt": 1.557128360711E9, + "launchType": "EC2", + "networkConfiguration": { + "awsvpcConfiguration": { + "subnets": [ + "subnet-12344321" + ], + "securityGroups": [ + "sg-12344321" + ], + "assignPublicIp": "DISABLED" + } + }, + "loadBalancers": [], + "serviceRegistries": [], + "scale": { + "value": 0, + "unit": "PERCENT" + }, + "stabilityStatus": "STABILIZING", + "stabilityStatusAt": 1.557128360711E9 + } + } + } + ] } }, "com.amazonaws.ecs#CreateTaskSetRequest": { @@ -3770,7 +3860,30 @@ } ], "traits": { - "smithy.api#documentation": "

Deletes one or more custom attributes from an Amazon ECS resource.

" + "smithy.api#documentation": "

Deletes one or more custom attributes from an Amazon ECS resource.

", + "smithy.api#examples": [ + { + "title": "To delete a custom attribute from an Amazon ECS instance", + "documentation": "This example deletes an attribute named stack from a container instance. ", + "input": { + "attributes": [ + { + "name": "stack", + "targetId": "aws:ecs:us-west-2:130757420319:container-instance/1c3be8ed-df30-47b4-8f1e-6e68ebd01f34" + } + ] + }, + "output": { + "attributes": [ + { + "name": "stack", + "targetId": "aws:ecs:us-west-2:130757420319:container-instance/1c3be8ed-df30-47b4-8f1e-6e68ebd01f34", + "value": "production" + } + ] + } + } + ] } }, "com.amazonaws.ecs#DeleteAttributesRequest": { @@ -3828,7 +3941,35 @@ } ], "traits": { - "smithy.api#documentation": "

Deletes the specified capacity provider.

\n \n

The FARGATE and FARGATE_SPOT capacity providers are\n\t\t\t\treserved and can't be deleted. You can disassociate them from a cluster using either\n\t\t\t\t\tPutClusterCapacityProviders or by deleting the cluster.

\n
\n

Prior to a capacity provider being deleted, the capacity provider must be removed from\n\t\t\tthe capacity provider strategy from all services. The UpdateService API\n\t\t\tcan be used to remove a capacity provider from a service's capacity provider strategy.\n\t\t\tWhen updating a service, the forceNewDeployment option can be used to\n\t\t\tensure that any tasks using the Amazon EC2 instance capacity provided by the capacity\n\t\t\tprovider are transitioned to use the capacity from the remaining capacity providers.\n\t\t\tOnly capacity providers that aren't associated with a cluster can be deleted. To remove\n\t\t\ta capacity provider from a cluster, you can either use PutClusterCapacityProviders or delete the cluster.

" + "smithy.api#documentation": "

Deletes the specified capacity provider.

\n \n

The FARGATE and FARGATE_SPOT capacity providers are\n\t\t\t\treserved and can't be deleted. You can disassociate them from a cluster using either\n\t\t\t\t\tPutClusterCapacityProviders or by deleting the cluster.

\n
\n

Prior to a capacity provider being deleted, the capacity provider must be removed from\n\t\t\tthe capacity provider strategy from all services. The UpdateService API\n\t\t\tcan be used to remove a capacity provider from a service's capacity provider strategy.\n\t\t\tWhen updating a service, the forceNewDeployment option can be used to\n\t\t\tensure that any tasks using the Amazon EC2 instance capacity provided by the capacity\n\t\t\tprovider are transitioned to use the capacity from the remaining capacity providers.\n\t\t\tOnly capacity providers that aren't associated with a cluster can be deleted. To remove\n\t\t\ta capacity provider from a cluster, you can either use PutClusterCapacityProviders or delete the cluster.

", + "smithy.api#examples": [ + { + "title": "To delete a specified capacity provider", + "documentation": "This example deletes a specified capacity provider. ", + "input": { + "capacityProvider": "arn:aws:ecs:us-west-2:123456789012:capacity-provider/ExampleCapacityProvider" + }, + "output": { + "capacityProvider": { + "capacityProviderArn": "arn:aws:ecs:us-west-2:123456789012:capacity-provider/ExampleCapacityProvider", + "name": "ExampleCapacityProvider", + "status": "ACTIVE", + "autoScalingGroupProvider": { + "autoScalingGroupArn": "arn:aws:autoscaling:us-west-2:123456789012:autoScalingGroup:a1b2c3d4-5678-90ab-cdef-EXAMPLE11111:autoScalingGroupName/MyAutoScalingGroup", + "managedScaling": { + "status": "ENABLED", + "targetCapacity": 100, + "minimumScalingStepSize": 1, + "maximumScalingStepSize": 10000 + }, + "managedTerminationProtection": "DISABLED" + }, + "updateStatus": "DELETE_IN_PROGRESS", + "tags": [] + } + } + } + ] } }, "com.amazonaws.ecs#DeleteCapacityProviderRequest": { @@ -4050,7 +4191,50 @@ } ], "traits": { - "smithy.api#documentation": "

Deletes one or more task definitions.

\n

You must deregister a task definition revision before you delete it. For more\n\t\t\tinformation, see DeregisterTaskDefinition.

\n

When you delete a task definition revision, it is immediately transitions from the\n\t\t\t\tINACTIVE to DELETE_IN_PROGRESS. Existing tasks and\n\t\t\tservices that reference a DELETE_IN_PROGRESS task definition revision\n\t\t\tcontinue to run without disruption. Existing services that reference a\n\t\t\t\tDELETE_IN_PROGRESS task definition revision can still scale up or down\n\t\t\tby modifying the service's desired count.

\n

You can't use a DELETE_IN_PROGRESS task definition revision to run new\n\t\t\ttasks or create new services. You also can't update an existing service to reference a\n\t\t\t\tDELETE_IN_PROGRESS task definition revision.

\n

A task definition revision will stay in DELETE_IN_PROGRESS status until\n\t\t\tall the associated tasks and services have been terminated.

\n

When you delete all INACTIVE task definition revisions, the task\n\t\t\tdefinition name is not displayed in the console and not returned in the API. If a task\n\t\t\tdefinition revisions are in the DELETE_IN_PROGRESS state, the task\n\t\t\tdefinition name is displayed in the console and returned in the API. The task definition\n\t\t\tname is retained by Amazon ECS and the revision is incremented the next time you create a\n\t\t\ttask definition with that name.

" + "smithy.api#documentation": "

Deletes one or more task definitions.

\n

You must deregister a task definition revision before you delete it. For more\n\t\t\tinformation, see DeregisterTaskDefinition.

\n

When you delete a task definition revision, it is immediately transitions from the\n\t\t\t\tINACTIVE to DELETE_IN_PROGRESS. Existing tasks and\n\t\t\tservices that reference a DELETE_IN_PROGRESS task definition revision\n\t\t\tcontinue to run without disruption. Existing services that reference a\n\t\t\t\tDELETE_IN_PROGRESS task definition revision can still scale up or down\n\t\t\tby modifying the service's desired count.

\n

You can't use a DELETE_IN_PROGRESS task definition revision to run new\n\t\t\ttasks or create new services. You also can't update an existing service to reference a\n\t\t\t\tDELETE_IN_PROGRESS task definition revision.

\n

A task definition revision will stay in DELETE_IN_PROGRESS status until\n\t\t\tall the associated tasks and services have been terminated.

\n

When you delete all INACTIVE task definition revisions, the task\n\t\t\tdefinition name is not displayed in the console and not returned in the API. If a task\n\t\t\tdefinition revisions are in the DELETE_IN_PROGRESS state, the task\n\t\t\tdefinition name is displayed in the console and returned in the API. The task definition\n\t\t\tname is retained by Amazon ECS and the revision is incremented the next time you create a\n\t\t\ttask definition with that name.

", + "smithy.api#examples": [ + { + "title": "To delete a task definition that has been deregistered", + "documentation": "This example deletes a specified deregistered task definition. ", + "input": { + "taskDefinitions": [ + "Example-task-definition:1" + ] + }, + "output": { + "failures": [], + "taskDefinitions": [ + { + "containerDefinitions": [ + { + "command": [ + "apt-get update; apt-get install stress; while true; do stress --cpu $(( RANDOM % 4 )) -t $(( RANDOM % 10 )); done" + ], + "cpu": 50, + "entryPoint": [ + "bash", + "-c" + ], + "environment": [], + "essential": true, + "image": "ubuntu", + "memory": 100, + "mountPoints": [], + "name": "wave", + "portMappings": [], + "volumesFrom": [] + } + ], + "family": "cpu-wave", + "revision": 1, + "status": "DELETE_IN_PROGRESS", + "taskDefinitionArn": "arn:aws:ecs:us-east-1:012345678910:task-definition/Example-task-definition:1", + "volumes": [] + } + ] + } + } + ] } }, "com.amazonaws.ecs#DeleteTaskDefinitionsRequest": { @@ -4126,7 +4310,52 @@ } ], "traits": { - "smithy.api#documentation": "

Deletes a specified task set within a service. This is used when a service uses the\n\t\t\t\tEXTERNAL deployment controller type. For more information, see Amazon ECS deployment types in the Amazon Elastic Container Service Developer Guide.

" + "smithy.api#documentation": "

Deletes a specified task set within a service. This is used when a service uses the\n\t\t\t\tEXTERNAL deployment controller type. For more information, see Amazon ECS deployment types in the Amazon Elastic Container Service Developer Guide.

", + "smithy.api#examples": [ + { + "title": "To delete a task set within a service that uses the EXTERNAL deployment controller type", + "documentation": "This example deletes a task set and uses the force flag to force deletion if it hasn't scaled to zero.", + "input": { + "cluster": "MyCluster", + "service": "MyService", + "taskSet": "arn:aws:ecs:us-west-2:123456789012:task-set/MyCluster/MyService/ecs-svc/1234567890123456789", + "force": true + }, + "output": { + "taskSet": { + "id": "ecs-svc/1234567890123456789", + "taskSetArn": "arn:aws:ecs:us-west-2:123456789012:task-set/MyCluster/MyService/ecs-svc/1234567890123456789", + "status": "DRAINING", + "taskDefinition": "arn:aws:ecs:us-west-2:123456789012:task-definition/sample-fargate:2", + "computedDesiredCount": 0, + "pendingCount": 0, + "runningCount": 0, + "createdAt": 1.557130260276E9, + "updatedAt": 1.557130290707E9, + "launchType": "EC2", + "networkConfiguration": { + "awsvpcConfiguration": { + "subnets": [ + "subnet-12345678" + ], + "securityGroups": [ + "sg-12345678" + ], + "assignPublicIp": "DISABLED" + } + }, + "loadBalancers": [], + "serviceRegistries": [], + "scale": { + "value": 0, + "unit": "PERCENT" + }, + "stabilityStatus": "STABILIZING", + "stabilityStatusAt": 1.557130290707E9 + } + } + } + ] } }, "com.amazonaws.ecs#DeleteTaskSetRequest": { @@ -4583,7 +4812,42 @@ } ], "traits": { - "smithy.api#documentation": "

Deregisters the specified task definition by family and revision. Upon deregistration,\n\t\t\tthe task definition is marked as INACTIVE. Existing tasks and services that\n\t\t\treference an INACTIVE task definition continue to run without disruption.\n\t\t\tExisting services that reference an INACTIVE task definition can still\n\t\t\tscale up or down by modifying the service's desired count. If you want to delete a task\n\t\t\tdefinition revision, you must first deregister the task definition revision.

\n

You can't use an INACTIVE task definition to run new tasks or create new\n\t\t\tservices, and you can't update an existing service to reference an INACTIVE\n\t\t\ttask definition. However, there may be up to a 10-minute window following deregistration\n\t\t\twhere these restrictions have not yet taken effect.

\n \n

At this time, INACTIVE task definitions remain discoverable in your\n\t\t\t\taccount indefinitely. However, this behavior is subject to change in the future. We\n\t\t\t\tdon't recommend that you rely on INACTIVE task definitions persisting\n\t\t\t\tbeyond the lifecycle of any associated tasks and services.

\n
\n

You must deregister a task definition revision before you delete it. For more\n\t\t\tinformation, see DeleteTaskDefinitions.

" + "smithy.api#documentation": "

Deregisters the specified task definition by family and revision. Upon deregistration,\n\t\t\tthe task definition is marked as INACTIVE. Existing tasks and services that\n\t\t\treference an INACTIVE task definition continue to run without disruption.\n\t\t\tExisting services that reference an INACTIVE task definition can still\n\t\t\tscale up or down by modifying the service's desired count. If you want to delete a task\n\t\t\tdefinition revision, you must first deregister the task definition revision.

\n

You can't use an INACTIVE task definition to run new tasks or create new\n\t\t\tservices, and you can't update an existing service to reference an INACTIVE\n\t\t\ttask definition. However, there may be up to a 10-minute window following deregistration\n\t\t\twhere these restrictions have not yet taken effect.

\n \n

At this time, INACTIVE task definitions remain discoverable in your\n\t\t\t\taccount indefinitely. However, this behavior is subject to change in the future. We\n\t\t\t\tdon't recommend that you rely on INACTIVE task definitions persisting\n\t\t\t\tbeyond the lifecycle of any associated tasks and services.

\n
\n

You must deregister a task definition revision before you delete it. For more\n\t\t\tinformation, see DeleteTaskDefinitions.

", + "smithy.api#examples": [ + { + "title": "To deregister a revision of a task definition", + "documentation": "This example deregisters the first revision of the curler task definition", + "input": { + "taskDefinition": "curler:1" + }, + "output": { + "taskDefinition": { + "status": "INACTIVE", + "family": "curler", + "volumes": [], + "taskDefinitionArn": "arn:aws:ecs:us-west-2:123456789012:task-definition/curler:1", + "containerDefinitions": [ + { + "environment": [], + "name": "curler", + "mountPoints": [], + "image": "curl:latest", + "cpu": 100, + "portMappings": [], + "entryPoint": [], + "memory": 256, + "command": [ + "curl -v http://example.com/" + ], + "essential": true, + "volumesFrom": [] + } + ], + "revision": 1 + } + } + } + ] } }, "com.amazonaws.ecs#DeregisterTaskDefinitionRequest": { @@ -4635,7 +4899,82 @@ } ], "traits": { - "smithy.api#documentation": "

Describes one or more of your capacity providers.

" + "smithy.api#documentation": "

Describes one or more of your capacity providers.

", + "smithy.api#examples": [ + { + "title": "To describe a specific capacity provider", + "documentation": "This example retrieves details about the capacity provider MyCapacityProvider", + "input": { + "capacityProviders": [ + "MyCapacityProvider" + ], + "include": [ + "TAGS" + ] + }, + "output": { + "capacityProviders": [ + { + "capacityProviderArn": "arn:aws:ecs:us-west-2:123456789012:capacity-provider/MyCapacityProvider", + "name": "MyCapacityProvider", + "status": "ACTIVE", + "autoScalingGroupProvider": { + "autoScalingGroupArn": "arn:aws:autoscaling:us-west-2:123456789012:autoScalingGroup:a1b2c3d4-5678-90ab-cdef-EXAMPLE11111:autoScalingGroupName/MyAutoScalingGroup", + "managedScaling": { + "status": "ENABLED", + "targetCapacity": 100, + "minimumScalingStepSize": 1, + "maximumScalingStepSize": 1000 + }, + "managedTerminationProtection": "ENABLED" + }, + "tags": [ + { + "key": "environment", + "value": "production" + } + ] + } + ] + } + }, + { + "title": "To describe all capacity providers", + "documentation": "This example retrieves details about all capacity providers. ", + "output": { + "capacityProviders": [ + { + "capacityProviderArn": "arn:aws:ecs:us-west-2:123456789012:capacity-provider/MyCapacityProvider", + "name": "MyCapacityProvider", + "status": "ACTIVE", + "autoScalingGroupProvider": { + "autoScalingGroupArn": "arn:aws:autoscaling:us-west-2:123456789012:autoScalingGroup:a1b2c3d4-5678-90ab-cdef-EXAMPLE11111:autoScalingGroupName/MyAutoScalingGroup", + "managedScaling": { + "status": "ENABLED", + "targetCapacity": 100, + "minimumScalingStepSize": 1, + "maximumScalingStepSize": 1000 + }, + "managedTerminationProtection": "ENABLED" + }, + "tags": [] + }, + { + "capacityProviderArn": "arn:aws:ecs:us-west-2:123456789012:capacity-provider/FARGATE", + "name": "FARGATE", + "status": "ACTIVE", + "tags": [] + }, + { + "capacityProviderArn": "arn:aws:ecs:us-west-2:123456789012:capacity-provider/FARGATE_SPOT", + "name": "FARGATE_SPOT", + "status": "ACTIVE", + "tags": [] + } + ] + } + } + ] } }, "com.amazonaws.ecs#DescribeCapacityProvidersRequest": { @@ -5426,7 +5765,56 @@ } ], "traits": { - "smithy.api#documentation": "

Describes the task sets in the specified cluster and service. This is used when a\n\t\t\tservice uses the EXTERNAL deployment controller type. For more information,\n\t\t\tsee Amazon ECS Deployment\n\t\t\t\tTypes in the Amazon Elastic Container Service Developer Guide.

" + "smithy.api#documentation": "

Describes the task sets in the specified cluster and service. This is used when a\n\t\t\tservice uses the EXTERNAL deployment controller type. For more information,\n\t\t\tsee Amazon ECS Deployment\n\t\t\t\tTypes in the Amazon Elastic Container Service Developer Guide.

", + "smithy.api#examples": [ + { + "title": "To describe a task set ", + "documentation": "This example describes a task set in service MyService that uses an EXTERNAL deployment controller. ", + "input": { + "cluster": "MyCluster", + "service": "MyService", + "taskSets": [ + "arn:aws:ecs:us-west-2:123456789012:task-set/MyCluster/MyService/ecs-svc/1234567890123456789" + ] + }, + "output": { + "taskSets": [ + { + "id": "ecs-svc/1234567890123456789", + "taskSetArn": "arn:aws:ecs:us-west-2:123456789012:task-set/MyCluster/MyService/ecs-svc/1234567890123456789", + "status": "ACTIVE", + "taskDefinition": "arn:aws:ecs:us-west-2:123456789012:task-definition/sample-fargate:2", + "computedDesiredCount": 0, + "pendingCount": 0, + "runningCount": 0, + "createdAt": 1.557207715195E9, + "updatedAt": 1.557207740014E9, + "launchType": "EC2", + "networkConfiguration": { + "awsvpcConfiguration": { + "subnets": [ + "subnet-12344321" + ], + "securityGroups": [ + "sg-1234431" + ], + "assignPublicIp": "DISABLED" + } + }, + "loadBalancers": [], + "serviceRegistries": [], + "scale": { + "value": 0, + "unit": "PERCENT" + }, + "stabilityStatus": "STEADY_STATE", + "stabilityStatusAt": 1.557207740014E9 + } + ], + "failures": [] + } + } + ] } }, "com.amazonaws.ecs#DescribeTaskSetsRequest": { @@ -6107,7 +6495,32 @@ } ], "traits": { - "smithy.api#documentation": "

Runs a command remotely on a container within a task.

\n

If you use a condition key in your IAM policy to refine the conditions for the\n\t\t\tpolicy statement, for example limit the actions to a specific cluster, you receive an\n\t\t\t\tAccessDeniedException when there is a mismatch between the condition\n\t\t\tkey value and the corresponding parameter value.

\n

For information about required permissions and considerations, see Using Amazon ECS\n\t\t\t\tExec for debugging in the Amazon ECS Developer Guide.\n\t\t

" + "smithy.api#documentation": "

Runs a command remotely on a container within a task.

\n

If you use a condition key in your IAM policy to refine the conditions for the\n\t\t\tpolicy statement, for example limit the actions to a specific cluster, you receive an\n\t\t\t\tAccessDeniedException when there is a mismatch between the condition\n\t\t\tkey value and the corresponding parameter value.

\n

For information about required permissions and considerations, see Using Amazon ECS\n\t\t\t\tExec for debugging in the Amazon ECS Developer Guide.\n\t\t

", + "smithy.api#examples": [ + { + "title": "To run a command remotely on a container in a task", + "documentation": "This example runs an interactive /bin/sh command on a container MyContainer. ", + "input": { + "cluster": "MyCluster", + "container": "MyContainer", + "command": "/bin/sh", + "interactive": true, + "task": "arn:aws:ecs:us-east-1:123456789012:task/MyCluster/d789e94343414c25b9f6bd59eEXAMPLE" + }, + "output": { + "clusterArn": "arn:aws:ecs:us-east-1:123456789012:cluster/MyCluster", + "containerArn": "arn:aws:ecs:us-east-1:123456789012:container/MyCluster/d789e94343414c25b9f6bd59eEXAMPLE/43ba4b77-37f7-4a41-b923-69d4abEXAMPLE", + "containerName": "MyContainer", + "interactive": true, + "session": { + "sessionId": "ecs-execute-command-794nnsxobsg4p2hiur6gxu9a9e", + "streamUrl": "wss://ssmmessages.us-east-1.amazonaws.com/v1/data-channel/ecs-execute-command-794nnsxobsg4p2hiur6gxu9a9e?role=publish_subscribe&cell-number=AAEAAfiZG4oybxqsYj3Zhm15s4J0W1k7d9nxVRenNO8Kl5nzAAAAAGdbWGl479/y/4IrTWPadUic3eBrMu3vmB7aPvI+s12lbpDc142y1KZy", + "tokenValue": "AAEAAcVb7ww10N9aNUI5Cl7K7DbHjbD2Ed4Mw6uaGYIc+UFNAAAAAGdbWGmMDaPbGfDkzrVIhyKEsc4CPT2hcToPU6yzlddPm7rRZvYQtpaAgsvQdjbCAd9OB6ohtDYfqZI9gzMqLKegXq0E+KbDcGPnQVODFNHmQxnR1BvC6vNcHqh6HAJuKnQD7RSYx/J5bfYNHj4hCYHuN0HNcueSDOOTRB/MBt5DBDY7Djv2uzs9FD0N1kcsGljZkZWLuPTVKHHyrU3zh0awfrFFC3RXvgaUCBnloIIvZeq2CjTesxn9JJS+3N4I0DVxfkHdWWBbBY/5+wH82JVTJpqN3yOAt74u/W7TvYBd7Xu2lQbvtpuAnEszl++bFG2ZoV3dfnmBkSnfD/qV1FJcEskbxUHKgmqe0Paouv4zwrQKNfWYfcv9xkWskqcExh07IeaxZz1tp/WegZ5D76sD6xYeuH+35TMNXMoY7oudLgxIXsA7b39ElM7orGi4Jy3W2tLyuNIvDoI2JI6ww4tYdEjYZnld9rhKwV9rDHk1Z8wjHMs++3BIkHrFQRsv7BFUWlZ9lyqO9GWlXeBe7dQtOeFNahBuJUE9z/xLHJn1x13VkdROKqUVHTJrT4sXAnI5roWiGPoQPVY7aHVYJnwjSxrPRWJBsgyHiVN3dAWTmeVMjp0VbOiJaLlpBI+AUWs8OeVRzuJSZ+1alETpK7Ukag7ma9K4lxq/N7IxYo2ub0cG/bvX42zQqdJAW+9St9sQ1QMaMvkSq1tdbLoOuY0QjN7JrkuKLFQA5bhs+o1YwItzIp7bNrzQ9Z9IN51qoGL5HDXQzi1kNFfYtAryhwt6BgtQU9Z0k+RpE+V5G+V68E0MMUvb313f0nRBYj1u5VKonWb708wADPbUU+s7nvbWuD5oLp1Z6A4iqI9Om0R4RrFASj/7fVY7r3raNXcIYA=" + }, + "taskArn": "arn:aws:ecs:us-east-1:123456789012:task/MyCluster/d789e94343414c25b9f6bd59eEXAMPLE" + } + } + ] } }, "com.amazonaws.ecs#ExecuteCommandConfiguration": { @@ -7105,6 +7518,27 @@ ], "traits": { "smithy.api#documentation": "

Lists the attributes for Amazon ECS resources within a specified target type and cluster.\n\t\t\tWhen you specify a target type and cluster, ListAttributes returns a list\n\t\t\tof attribute objects, one for each attribute on each resource. You can filter the list\n\t\t\tof results to a single attribute name to only return results that have that name. You\n\t\t\tcan also filter the results by attribute name and value. You can do this, for example,\n\t\t\tto see which container instances in a cluster are running a Linux AMI\n\t\t\t\t(ecs.os-type=linux).

", + "smithy.api#examples": [ + { + "title": "To list container instances that have a specific attribute", + "documentation": "This example lists attributes for a container instance with the attribute \"stack\" equal to the value \"production\".", + "input": { + "cluster": "MyCluster", + "targetType": "container-instance", + "attributeName": "stack", + "attributeValue": "production" + }, + "output": { + "attributes": [ + { + "name": "stack", + "targetId": "arn:aws:ecs:us-west-2:123456789012:container-instance/1c3be8ed-df30-47b4-8f1e-6e68ebd01f34", + "value": "production" + } + ] + } + } + ], "smithy.api#paginated": { "inputToken": "nextToken", "outputToken": "nextToken", @@ -7406,7 +7840,31 @@ } ], "traits": { - "smithy.api#documentation": "

This operation lists all the service deployments that meet the specified filter\n\t\t\tcriteria.

\n

A service deployment happens when you release a softwre update for the service. You\n\t\t\troute traffic from the running service revisions to the new service revison and control\n\t\t\tthe number of running tasks.

\n

This API returns the values that you use for the request parameters in DescribeServiceRevisions.

" + "smithy.api#documentation": "

This operation lists all the service deployments that meet the specified filter\n\t\t\tcriteria.

\n

A service deployment happens when you release a software update for the service. You\n\t\t\troute traffic from the running service revisions to the new service revison and control\n\t\t\tthe number of running tasks.

\n

This API returns the values that you use for the request parameters in DescribeServiceRevisions.

", + "smithy.api#examples": [ + { + "title": "To list service deployments that meet the specified criteria", + "documentation": "This example lists all successful service deployments for the service \"sd-example\" in the cluster \"example\".", + "input": { + "service": "sd-example", + "cluster": "example", + "status": [ + "SUCCESSFUL" + ] + }, + "output": { + "serviceDeployments": [ + { + "serviceDeploymentArn": "arn:aws:ecs:us-west-2:123456789012:service-deployment/example/sd-example/NCWGC2ZR-taawPAYrIaU5", + "serviceArn": "arn:aws:ecs:us-west-2:123456789012:service/example/sd-example", + "clusterArn": "arn:aws:ecs:us-west-2:123456789012:cluster/example", + "targetServiceRevisionArn": "arn:aws:ecs:us-west-2:123456789012:service-revision/example/sd-example/4980306466373577095", + "status": "SUCCESSFUL" + } + ] + } + } + ] } }, "com.amazonaws.ecs#ListServiceDeploymentsRequest": { @@ -9245,7 +9703,32 @@ } ], "traits": { - "smithy.api#documentation": "

Create or update an attribute on an Amazon ECS resource. If the attribute doesn't exist,\n\t\t\tit's created. If the attribute exists, its value is replaced with the specified value.\n\t\t\tTo delete an attribute, use DeleteAttributes. For more information, see Attributes in the Amazon Elastic Container Service Developer Guide.

" + "smithy.api#documentation": "

Create or update an attribute on an Amazon ECS resource. If the attribute doesn't exist,\n\t\t\tit's created. If the attribute exists, its value is replaced with the specified value.\n\t\t\tTo delete an attribute, use DeleteAttributes. For more information, see Attributes in the Amazon Elastic Container Service Developer Guide.

", + "smithy.api#examples": [ + { + "title": "To create or update an attribute on a resource", + "documentation": "This example adds an attribute \"stack\" with the value \"production\" to a container instance.", + "input": { + "cluster": "MyCluster", + "attributes": [ + { + "targetId": "arn:aws:ecs:us-west-2:123456789012:container-instance/1c3be8ed-df30-47b4-8f1e-6e68ebd01f34", + "name": "stack", + "value": "production" + } + ] + }, + "output": { + "attributes": [ + { + "name": "stack", + "targetId": "arn:aws:ecs:us-west-2:123456789012:container-instance/1c3be8ed-df30-47b4-8f1e-6e68ebd01f34", + "value": "production" + } + ] + } + } + ] } }, "com.amazonaws.ecs#PutAttributesRequest": { @@ -9312,40 +9795,273 @@ } ], "traits": { - "smithy.api#documentation": "

Modifies the available capacity providers and the default capacity provider strategy\n\t\t\tfor a cluster.

\n

You must specify both the available capacity providers and a default capacity provider\n\t\t\tstrategy for the cluster. If the specified cluster has existing capacity providers\n\t\t\tassociated with it, you must specify all existing capacity providers in addition to any\n\t\t\tnew ones you want to add. Any existing capacity providers that are associated with a\n\t\t\tcluster that are omitted from a PutClusterCapacityProviders API call will be disassociated with the\n\t\t\tcluster. You can only disassociate an existing capacity provider from a cluster if it's\n\t\t\tnot being used by any existing tasks.

\n

When creating a service or running a task on a cluster, if no capacity provider or\n\t\t\tlaunch type is specified, then the cluster's default capacity provider strategy is used.\n\t\t\tWe recommend that you define a default capacity provider strategy for your cluster.\n\t\t\tHowever, you must specify an empty array ([]) to bypass defining a default\n\t\t\tstrategy.

" - } - }, - "com.amazonaws.ecs#PutClusterCapacityProvidersRequest": { - "type": "structure", - "members": { - "cluster": { - "target": "com.amazonaws.ecs#String", - "traits": { - "smithy.api#documentation": "

The short name or full Amazon Resource Name (ARN) of the cluster to modify the capacity provider\n\t\t\tsettings for. If you don't specify a cluster, the default cluster is assumed.

", - "smithy.api#required": {} - } - }, - "capacityProviders": { - "target": "com.amazonaws.ecs#StringList", - "traits": { - "smithy.api#documentation": "

The name of one or more capacity providers to associate with the cluster.

\n

If specifying a capacity provider that uses an Auto Scaling group, the capacity\n\t\t\tprovider must already be created. New capacity providers can be created with the CreateCapacityProvider API operation.

\n

To use a Fargate capacity provider, specify either the FARGATE or\n\t\t\t\tFARGATE_SPOT capacity providers. The Fargate capacity providers are\n\t\t\tavailable to all accounts and only need to be associated with a cluster to be\n\t\t\tused.

", - "smithy.api#required": {} - } - }, - "defaultCapacityProviderStrategy": { - "target": "com.amazonaws.ecs#CapacityProviderStrategy", - "traits": { - "smithy.api#documentation": "

The capacity provider strategy to use by default for the cluster.

\n

When creating a service or running a task on a cluster, if no capacity provider or\n\t\t\tlaunch type is specified then the default capacity provider strategy for the cluster is\n\t\t\tused.

\n

A capacity provider strategy consists of one or more capacity providers along with the\n\t\t\t\tbase and weight to assign to them. A capacity provider\n\t\t\tmust be associated with the cluster to be used in a capacity provider strategy. The\n\t\t\t\tPutClusterCapacityProviders API is used to associate a capacity provider\n\t\t\twith a cluster. Only capacity providers with an ACTIVE or\n\t\t\t\tUPDATING status can be used.

\n

If specifying a capacity provider that uses an Auto Scaling group, the capacity\n\t\t\tprovider must already be created. New capacity providers can be created with the CreateCapacityProvider API operation.

\n

To use a Fargate capacity provider, specify either the FARGATE or\n\t\t\t\tFARGATE_SPOT capacity providers. The Fargate capacity providers are\n\t\t\tavailable to all accounts and only need to be associated with a cluster to be\n\t\t\tused.

", - "smithy.api#required": {} - } - } - }, - "traits": { - "smithy.api#input": {} - } - }, - "com.amazonaws.ecs#PutClusterCapacityProvidersResponse": { - "type": "structure", + "smithy.api#documentation": "

Modifies the available capacity providers and the default capacity provider strategy\n\t\t\tfor a cluster.

\n

You must specify both the available capacity providers and a default capacity provider\n\t\t\tstrategy for the cluster. If the specified cluster has existing capacity providers\n\t\t\tassociated with it, you must specify all existing capacity providers in addition to any\n\t\t\tnew ones you want to add. Any existing capacity providers that are associated with a\n\t\t\tcluster that are omitted from a PutClusterCapacityProviders API call will be disassociated with the\n\t\t\tcluster. You can only disassociate an existing capacity provider from a cluster if it's\n\t\t\tnot being used by any existing tasks.

\n

When creating a service or running a task on a cluster, if no capacity provider or\n\t\t\tlaunch type is specified, then the cluster's default capacity provider strategy is used.\n\t\t\tWe recommend that you define a default capacity provider strategy for your cluster.\n\t\t\tHowever, you must specify an empty array ([]) to bypass defining a default\n\t\t\tstrategy.

", + "smithy.api#examples": [ + { + "title": "To add an existing capacity provider to a cluuster", + "documentation": "This example adds an existing capacity provider \"MyCapacityProvider2\" to a cluster that already has the capacity provider \"MyCapacityProvider1\" associated with it. Both \"MyCapacityProvider2\" and \"MyCapacityProvider1\" need to be specified. ", + "input": { + "cluster": "MyCluster", + "capacityProviders": [ + "MyCapacityProvider1", + "MyCapacityProvider2" + ], + "defaultCapacityProviderStrategy": [ + { + "capacityProvider": "MyCapacityProvider1", + "weight": 1 + }, + { + "capacityProvider": "MyCapacityProvider2", + "weight": 1 + } + ] + }, + "output": { + "cluster": { + "clusterArn": "arn:aws:ecs:us-west-2:123456789012:cluster/MyCluster", + "clusterName": "MyCluster", + "status": "ACTIVE", + "registeredContainerInstancesCount": 0, + "runningTasksCount": 0, + "pendingTasksCount": 0, + "activeServicesCount": 0, + "statistics": [], + "tags": [], + "settings": [ + { + "name": "containerInsights", + "value": "enabled" + } + ], + "capacityProviders": [ + "MyCapacityProvider1", + "MyCapacityProvider2" + ], + "defaultCapacityProviderStrategy": [ + { + "capacityProvider": "MyCapacityProvider1", + "weight": 1, + "base": 0 + }, + { + "capacityProvider": "MyCapacityProvider2", + "weight": 1, + "base": 0 + } + ], + "attachments": [ + { + "id": "0fb0c8f4-6edd-4de1-9b09-17e470ee1918", + "type": "as_policy", + "status": "ACTIVE", + "details": [ + { + "name": "capacityProviderName", + "value": "MyCapacityProvider1" + }, + { + "name": "scalingPolicyName", + "value": "ECSManagedAutoScalingPolicy-a1b2c3d4-5678-90ab-cdef-EXAMPLE11111" + } + ] + }, + { + "id": "ae592060-2382-4663-9476-b015c685593c", + "type": "as_policy", + "status": "ACTIVE", + "details": [ + { + "name": "capacityProviderName", + "value": "MyCapacityProvider2" + }, + { + "name": "scalingPolicyName", + "value": "ECSManagedAutoScalingPolicy-a1b2c3d4-5678-90ab-cdef-EXAMPLE22222" + } + ] + } + ], + "attachmentsStatus": "UPDATE_IN_PROGRESS" + } + } + }, + { + "title": "To remove a capacity provider from a cluster", + "documentation": "This example removes a capacity provider \"MyCapacityProvider2\" from a cluster that has both \"MyCapacityProvider2\" and \"MyCapacityProvider1\" associated with it. Only \"MyCapacityProvider1\" needs to be specified in this scenario. ", + "input": { + "cluster": "MyCluster", + "capacityProviders": [ + "MyCapacityProvider1" + ], + "defaultCapacityProviderStrategy": [ + { + "capacityProvider": "MyCapacityProvider1", + "weight": 1, + "base": 0 + } + ] + }, + "output": { + "cluster": { + "clusterArn": "arn:aws:ecs:us-west-2:123456789012:cluster/MyCluster", + "clusterName": "MyCluster", + "status": "ACTIVE", + "registeredContainerInstancesCount": 0, + "runningTasksCount": 0, + "pendingTasksCount": 0, + "activeServicesCount": 0, + "statistics": [], + "tags": [], + "settings": [ + { + "name": "containerInsights", + "value": "enabled" + } + ], + "capacityProviders": [ + "MyCapacityProvider1" + ], + "defaultCapacityProviderStrategy": [ + { + "capacityProvider": "MyCapacityProvider1", + "weight": 1, + "base": 0 + } + ], + "attachments": [ + { + "id": "0fb0c8f4-6edd-4de1-9b09-17e470ee1918", + "type": "as_policy", + "status": "ACTIVE", + "details": [ + { + "name": "capacityProviderName", + "value": "MyCapacityProvider1" + }, + { + "name": "scalingPolicyName", + "value": "ECSManagedAutoScalingPolicy-a1b2c3d4-5678-90ab-cdef-EXAMPLE11111" + } + ] + }, + { + "id": "ae592060-2382-4663-9476-b015c685593c", + "type": "as_policy", + "status": "DELETING", + "details": [ + { + "name": "capacityProviderName", + "value": "MyCapacityProvider2" + }, + { + "name": "scalingPolicyName", + "value": "ECSManagedAutoScalingPolicy-a1b2c3d4-5678-90ab-cdef-EXAMPLE22222" + } + ] + } + ], + "attachmentsStatus": "UPDATE_IN_PROGRESS" + } + } + }, + { + "title": "To remove all capacity providers from a cluster", + "documentation": "This example removes all capacity providers associated with a cluster. ", + "input": { + "cluster": "MyCluster", + "capacityProviders": [], + "defaultCapacityProviderStrategy": [] + }, + "output": { + "cluster": { + "clusterArn": "arn:aws:ecs:us-west-2:123456789012:cluster/MyCluster", + "clusterName": "MyCluster", + "status": "ACTIVE", + "registeredContainerInstancesCount": 0, + "runningTasksCount": 0, + "pendingTasksCount": 0, + "activeServicesCount": 0, + "statistics": [], + "tags": [], + "settings": [ + { + "name": "containerInsights", + "value": "enabled" + } + ], + "capacityProviders": [], + "defaultCapacityProviderStrategy": [], + "attachments": [ + { + "id": "0fb0c8f4-6edd-4de1-9b09-17e470ee1918", + "type": "as_policy", + "status": "DELETING", + "details": [ + { + "name": "capacityProviderName", + "value": "MyCapacityProvider1" + }, + { + "name": "scalingPolicyName", + "value": "ECSManagedAutoScalingPolicy-a1b2c3d4-5678-90ab-cdef-EXAMPLE11111" + } + ] + }, + { + "id": "ae592060-2382-4663-9476-b015c685593c", + "type": "as_policy", + "status": "DELETING", + "details": [ + { + "name": "capacityProviderName", + "value": "MyCapacityProvider2" + }, + { + "name": "scalingPolicyName", + "value": "ECSManagedAutoScalingPolicy-a1b2c3d4-5678-90ab-cdef-EXAMPLE22222" + } + ] + } + ], + "attachmentsStatus": "UPDATE_IN_PROGRESS" + } + } + } + ] + } + }, + "com.amazonaws.ecs#PutClusterCapacityProvidersRequest": { + "type": "structure", + "members": { + "cluster": { + "target": "com.amazonaws.ecs#String", + "traits": { + "smithy.api#documentation": "

The short name or full Amazon Resource Name (ARN) of the cluster to modify the capacity provider\n\t\t\tsettings for. If you don't specify a cluster, the default cluster is assumed.

", + "smithy.api#required": {} + } + }, + "capacityProviders": { + "target": "com.amazonaws.ecs#StringList", + "traits": { + "smithy.api#documentation": "

The name of one or more capacity providers to associate with the cluster.

\n

If specifying a capacity provider that uses an Auto Scaling group, the capacity\n\t\t\tprovider must already be created. New capacity providers can be created with the CreateCapacityProvider API operation.

\n

To use a Fargate capacity provider, specify either the FARGATE or\n\t\t\t\tFARGATE_SPOT capacity providers. The Fargate capacity providers are\n\t\t\tavailable to all accounts and only need to be associated with a cluster to be\n\t\t\tused.

", + "smithy.api#required": {} + } + }, + "defaultCapacityProviderStrategy": { + "target": "com.amazonaws.ecs#CapacityProviderStrategy", + "traits": { + "smithy.api#documentation": "

The capacity provider strategy to use by default for the cluster.

\n

When creating a service or running a task on a cluster, if no capacity provider or\n\t\t\tlaunch type is specified then the default capacity provider strategy for the cluster is\n\t\t\tused.

\n

A capacity provider strategy consists of one or more capacity providers along with the\n\t\t\t\tbase and weight to assign to them. A capacity provider\n\t\t\tmust be associated with the cluster to be used in a capacity provider strategy. The\n\t\t\t\tPutClusterCapacityProviders API is used to associate a capacity provider\n\t\t\twith a cluster. Only capacity providers with an ACTIVE or\n\t\t\t\tUPDATING status can be used.

\n

If specifying a capacity provider that uses an Auto Scaling group, the capacity\n\t\t\tprovider must already be created. New capacity providers can be created with the CreateCapacityProvider API operation.

\n

To use a Fargate capacity provider, specify either the FARGATE or\n\t\t\t\tFARGATE_SPOT capacity providers. The Fargate capacity providers are\n\t\t\tavailable to all accounts and only need to be associated with a cluster to be\n\t\t\tused.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.ecs#PutClusterCapacityProvidersResponse": { + "type": "structure", "members": { "cluster": { "target": "com.amazonaws.ecs#Cluster", @@ -11481,7 +12197,59 @@ } ], "traits": { - "smithy.api#documentation": "

Starts a new task from the specified task definition on the specified container\n\t\t\tinstance or instances.

\n \n

On March 21, 2024, a change was made to resolve the task definition revision before authorization. When a task definition revision is not specified, authorization will occur using the latest revision of a task definition.

\n
\n \n

Amazon Elastic Inference (EI) is no longer available to customers.

\n
\n

Alternatively, you can useRunTask to place tasks for you. For more\n\t\t\tinformation, see Scheduling Tasks in the Amazon Elastic Container Service Developer Guide.

\n

You can attach Amazon EBS volumes to Amazon ECS tasks by configuring the volume when creating or\n\t\t\tupdating a service. For more infomation, see Amazon EBS volumes in the Amazon Elastic Container Service Developer Guide.

" + "smithy.api#documentation": "

Starts a new task from the specified task definition on the specified container\n\t\t\tinstance or instances.

\n \n

On March 21, 2024, a change was made to resolve the task definition revision before authorization. When a task definition revision is not specified, authorization will occur using the latest revision of a task definition.

\n
\n \n

Amazon Elastic Inference (EI) is no longer available to customers.

\n
\n

Alternatively, you can useRunTask to place tasks for you. For more\n\t\t\tinformation, see Scheduling Tasks in the Amazon Elastic Container Service Developer Guide.

\n

You can attach Amazon EBS volumes to Amazon ECS tasks by configuring the volume when creating or\n\t\t\tupdating a service. For more infomation, see Amazon EBS volumes in the Amazon Elastic Container Service Developer Guide.

", + "smithy.api#examples": [ + { + "title": "To start a new task", + "documentation": "This example starts a new task in the cluster \"MyCluster\" on the specified container instance using the latest revision of the \"hello-world\" task definition.", + "input": { + "cluster": "MyCluster", + "containerInstances": [ + "4c543eed-f83f-47da-b1d8-3d23f1da4c64" + ], + "taskDefinition": "hello-world" + }, + "output": { + "tasks": [ + { + "clusterArn": "arn:aws:ecs:us-east-1:012345678910:cluster/default", + "containerInstanceArn": "arn:aws:ecs:us-east-1:012345678910:container-instance/default/4c543eed-f83f-47da-b1d8-3d23f1da4c64", + "containers": [ + { + "containerArn": "arn:aws:ecs:us-east-1:012345678910:container/e76594d4-27e1-4c74-98b5-46a6435eb769", + "lastStatus": "PENDING", + "name": "wordpress", + "taskArn": "arn:aws:ecs:us-east-1:012345678910:task/default/fdf2c302-468c-4e55-b884-5331d816e7fb" + }, + { + "containerArn": "arn:aws:ecs:us-east-1:012345678910:container/default/b19106ea-4fa8-4f1d-9767-96922c82b070", + "lastStatus": "PENDING", + "name": "mysql", + "taskArn": "arn:aws:ecs:us-east-1:012345678910:task/default/fdf2c302-468c-4e55-b884-5331d816e7fb" + } + ], + "createdAt": 1.479765460842E9, + "desiredStatus": "RUNNING", + "lastStatus": "PENDING", + "overrides": { + "containerOverrides": [ + { + "name": "wordpress" + }, + { + "name": "mysql" + } + ] + }, + "taskArn": "arn:aws:ecs:us-east-1:012345678910:task/default/fdf2c302-468c-4e55-b884-5331d816e7fb", + "taskDefinitionArn": "arn:aws:ecs:us-east-1:012345678910:task-definition/hello_world:6", + "version": 1 + } + ], + "failures": [] + } + } + ] } }, "com.amazonaws.ecs#StartTaskRequest": { @@ -11623,7 +12391,66 @@ } ], "traits": { - "smithy.api#documentation": "

Stops a running task. Any tags associated with the task will be deleted.

\n

When you call StopTask on a task, the equivalent of docker\n\t\t\t\tstop is issued to the containers running in the task. This results in a\n\t\t\t\tSIGTERM value and a default 30-second timeout, after which the\n\t\t\t\tSIGKILL value is sent and the containers are forcibly stopped. If the\n\t\t\tcontainer handles the SIGTERM value gracefully and exits within 30 seconds\n\t\t\tfrom receiving it, no SIGKILL value is sent.

\n

For Windows containers, POSIX signals do not work and runtime stops the container by\n\t\t\tsending a CTRL_SHUTDOWN_EVENT. For more information, see Unable to react to graceful shutdown\n\t\t\t\tof (Windows) container #25982 on GitHub.

\n \n

The default 30-second timeout can be configured on the Amazon ECS container agent with\n\t\t\t\tthe ECS_CONTAINER_STOP_TIMEOUT variable. For more information, see\n\t\t\t\t\tAmazon ECS Container Agent Configuration in the\n\t\t\t\tAmazon Elastic Container Service Developer Guide.

\n
" + "smithy.api#documentation": "

Stops a running task. Any tags associated with the task will be deleted.

\n

When you call StopTask on a task, the equivalent of docker\n\t\t\t\tstop is issued to the containers running in the task. This results in a\n\t\t\t\tSIGTERM value and a default 30-second timeout, after which the\n\t\t\t\tSIGKILL value is sent and the containers are forcibly stopped. If the\n\t\t\tcontainer handles the SIGTERM value gracefully and exits within 30 seconds\n\t\t\tfrom receiving it, no SIGKILL value is sent.

\n

For Windows containers, POSIX signals do not work and runtime stops the container by\n\t\t\tsending a CTRL_SHUTDOWN_EVENT. For more information, see Unable to react to graceful shutdown\n\t\t\t\tof (Windows) container #25982 on GitHub.

\n \n

The default 30-second timeout can be configured on the Amazon ECS container agent with\n\t\t\t\tthe ECS_CONTAINER_STOP_TIMEOUT variable. For more information, see\n\t\t\t\t\tAmazon ECS Container Agent Configuration in the\n\t\t\t\tAmazon Elastic Container Service Developer Guide.

\n
", + "smithy.api#examples": [ + { + "title": "To stop a task", + "documentation": "This example stops a task with ID \"1dc5c17a-422b-4dc4-b493-371970c6c4d6\" in cluster \"MyCluster\".", + "input": { + "cluster": "MyCluster", + "task": "1dc5c17a-422b-4dc4-b493-371970c6c4d6", + "reason": "testing stop task." + }, + "output": { + "task": { + "clusterArn": "arn:aws:ecs:us-east-1:012345678910:cluster/MyCluster", + "containerInstanceArn": "arn:aws:ecs:us-east-1:012345678910:container-instance/MyCluster/5991d8da-1d59-49d2-a31f-4230f9e73140", + "containers": [ + { + "containerArn": "arn:aws:ecs:us-east-1:012345678910:container/4df26bb4-f057-467b-a079-961675296e64", + "lastStatus": "RUNNING", + "name": "simple-app", + "networkBindings": [ + { + "bindIP": "0.0.0.0", + "containerPort": 80, + "hostPort": 32774, + "protocol": "tcp" + } + ], + "taskArn": "arn:aws:ecs:us-east-1:012345678910:task/MyCluster/1dc5c17a-422b-4dc4-b493-371970c6c4d6" + }, + { + "containerArn": "arn:aws:ecs:us-east-1:012345678910:container/e09064f7-7361-4c87-8ab9-8d073bbdbcb9", + "lastStatus": "RUNNING", + "name": "busybox", + "networkBindings": [], + "taskArn": "arn:aws:ecs:us-east-1:012345678910:task/MyCluster/1dc5c17a-422b-4dc4-b493-371970c6c4d6" + } + ], + "createdAt": 1.476822811295E9, + "desiredStatus": "STOPPED", + "lastStatus": "RUNNING", + "overrides": { + "containerOverrides": [ + { + "name": "simple-app" + }, + { + "name": "busybox" + } + ] + }, + "startedAt": 1.476822833998E9, + "startedBy": "ecs-svc/9223370560032507596", + "stoppedReason": "testing stop task.", + "taskArn": "arn:aws:ecs:us-east-1:012345678910:task/1dc5c17a-422b-4dc4-b493-371970c6c4d6", + "taskDefinitionArn": "arn:aws:ecs:us-east-1:012345678910:task-definition/console-sample-app-dynamic-ports:1", + "version": 0 + } + } + } + ] } }, "com.amazonaws.ecs#StopTaskRequest": { @@ -13436,7 +14263,43 @@ } ], "traits": { - "smithy.api#documentation": "

Modifies the parameters for a capacity provider.

" + "smithy.api#documentation": "

Modifies the parameters for a capacity provider.

", + "smithy.api#examples": [ + { + "title": "To update a capacity provider's parameters", + "documentation": "This example updates the targetCapacity and instanceWarmupPeriod parameters for the capacity provider MyCapacityProvider to 90 and 150 respectively.", + "input": { + "name": "MyCapacityProvider", + "autoScalingGroupProvider": { + "managedScaling": { + "status": "ENABLED", + "targetCapacity": 90, + "instanceWarmupPeriod": 150 + } + } + }, + "output": { + "capacityProvider": { + "capacityProviderArn": "arn:aws:ecs:us-east-1:123456789012:capacity-provider/MyCapacityProvider", + "name": "MyCapacityProvider", + "status": "ACTIVE", + "autoScalingGroupProvider": { + "autoScalingGroupArn": "arn:aws:autoscaling:us-east-1:132456789012:autoScalingGroup:57ffcb94-11f0-4d6d-bf60-3bac5EXAMPLE:autoScalingGroupName/MyASG", + "managedScaling": { + "status": "ENABLED", + "targetCapacity": 90, + "minimumScalingStepSize": 1, + "maximumScalingStepSize": 10000, + "instanceWarmupPeriod": 150 + }, + "managedTerminationProtection": "ENABLED" + }, + "updateStatus": "UPDATE_COMPLETE", + "tags": [] + } + } + } + ] } }, "com.amazonaws.ecs#UpdateCapacityProviderRequest": { @@ -13501,7 +14364,186 @@ } ], "traits": { - "smithy.api#documentation": "

Updates the cluster.

" + "smithy.api#documentation": "

Updates the cluster.

", + "smithy.api#examples": [ + { + "title": "To update a cluster's observability settings.", + "documentation": "This example turns on enhanced containerInsights in an existing cluster. ", + "input": { + "cluster": "ECS-project-update-cluster", + "settings": [ + { + "name": "containerInsights", + "value": "enhanced" + } + ] + }, + "output": { + "cluster": { + "clusterArn": "arn:aws:ecs:us-west-2:123456789012:cluster/ECS-project-update-cluster", + "clusterName": "ECS-project-update-cluster", + "status": "ACTIVE", + "registeredContainerInstancesCount": 0, + "runningTasksCount": 0, + "pendingTasksCount": 0, + "activeServicesCount": 0, + "statistics": [], + "tags": [], + "settings": [ + { + "name": "containerInsights", + "value": "enhanced" + } + ], + "capacityProviders": [ + "Infra-ECS-Cluster-ECS-project-update-cluster-d6bb6d5b-EC2CapacityProvider-3fIpdkLywwFt" + ], + "defaultCapacityProviderStrategy": [ + { + "capacityProvider": "Infra-ECS-Cluster-ECS-project-update-cluster-d6bb6d5b-EC2CapacityProvider-3fIpdkLywwFt", + "weight": 1, + "base": 0 + } + ], + "attachments": [ + { + "id": "069d002b-7634-42e4-b1d4-544f4c8f6380", + "type": "as_policy", + "status": "CREATED", + "details": [ + { + "name": "capacityProviderName", + "value": "Infra-ECS-Cluster-ECS-project-update-cluster-d6bb6d5b-EC2CapacityProvider-3fIpdkLywwFt" + }, + { + "name": "scalingPolicyName", + "value": "ECSManagedAutoScalingPolicy-152363a6-8c65-484c-b721-42c3e070ae93" + } + ] + }, + { + "id": "08b5b6ca-45e9-4209-a65d-e962a27c490a", + "type": "managed_draining", + "status": "CREATED", + "details": [ + { + "name": "capacityProviderName", + "value": "Infra-ECS-Cluster-ECS-project-update-cluster-d6bb6d5b-EC2CapacityProvider-3fIpdkLywwFt" + }, + { + "name": "autoScalingLifecycleHookName", + "value": "ecs-managed-draining-termination-hook" + } + ] + }, + { + "id": "45d0b36f-8cff-46b6-9380-1288744802ab", + "type": "sc", + "status": "ATTACHED", + "details": [] + } + ], + "attachmentsStatus": "UPDATE_COMPLETE", + "serviceConnectDefaults": { + "namespace": "arn:aws:servicediscovery:us-west-2:123456789012:namespace/ns-igwrsylmy3kwvcdx" + } + } + } + }, + { + "title": "To update a cluster's Service Connect defaults.", + "documentation": "This example sets a default Service Connect namespace. ", + "input": { + "cluster": "ECS-project-update-cluster", + "serviceConnectDefaults": { + "namespace": "test" + } + }, + "output": { + "cluster": { + "clusterArn": "arn:aws:ecs:us-west-2:123456789012:cluster/ECS-project-update-cluster", + "clusterName": "ECS-project-update-cluster", + "status": "ACTIVE", + "registeredContainerInstancesCount": 0, + "runningTasksCount": 0, + "pendingTasksCount": 0, + "activeServicesCount": 0, + "statistics": [], + "tags": [], + "settings": [ + { + "name": "containerInsights", + "value": "enhanced" + } + ], + "capacityProviders": [ + "Infra-ECS-Cluster-ECS-project-update-cluster-d6bb6d5b-EC2CapacityProvider-3fIpdkLywwFt" + ], + "defaultCapacityProviderStrategy": [ + { + "capacityProvider": "Infra-ECS-Cluster-ECS-project-update-cluster-d6bb6d5b-EC2CapacityProvider-3fIpdkLywwFt", + "weight": 1, + "base": 0 + } + ], + "attachments": [ + { + "id": "069d002b-7634-42e4-b1d4-544f4c8f6380", + "type": "as_policy", + "status": "CREATED", + "details": [ + { + "name": "capacityProviderName", + "value": "Infra-ECS-Cluster-ECS-project-update-cluster-d6bb6d5b-EC2CapacityProvider-3fIpdkLywwFt" + }, + { + "name": "scalingPolicyName", + "value": "ECSManagedAutoScalingPolicy-152363a6-8c65-484c-b721-42c3e070ae93" + } + ] + }, + { + "id": "08b5b6ca-45e9-4209-a65d-e962a27c490a", + "type": "managed_draining", + "status": "CREATED", + "details": [ + { + "name": "capacityProviderName", + "value": "Infra-ECS-Cluster-ECS-project-update-cluster-d6bb6d5b-EC2CapacityProvider-3fIpdkLywwFt" + }, + { + "name": "autoScalingLifecycleHookName", + "value": "ecs-managed-draining-termination-hook" + } + ] + }, + { + "id": "45d0b36f-8cff-46b6-9380-1288744802ab", + "type": "sc", + "status": "DELETED", + "details": [] + }, + { + "id": "3e6890c3-609c-4832-91de-d6ca891b3ef1", + "type": "sc", + "status": "ATTACHED", + "details": [] + }, + { + "id": "961b8ec1-c2f1-4070-8495-e669b7668e90", + "type": "sc", + "status": "DELETED", + "details": [] + } + ], + "attachmentsStatus": "UPDATE_COMPLETE", + "serviceConnectDefaults": { + "namespace": "arn:aws:servicediscovery:us-west-2:123456789012:namespace/ns-dtjmxqpfi46ht7dr" + } + } + } + } + ] } }, "com.amazonaws.ecs#UpdateClusterRequest": { @@ -13574,7 +14616,41 @@ } ], "traits": { - "smithy.api#documentation": "

Modifies the settings to use for a cluster.

" + "smithy.api#documentation": "

Modifies the settings to use for a cluster.

", + "smithy.api#examples": [ + { + "title": "To update a cluster's settings", + "documentation": "This example enables CloudWatch Container Insights for the default cluster.", + "input": { + "cluster": "default", + "settings": [ + { + "name": "containerInsights", + "value": "enabled" + } + ] + }, + "output": { + "cluster": { + "clusterArn": "arn:aws:ecs:us-west-2:123456789012:cluster/MyCluster", + "clusterName": "default", + "status": "ACTIVE", + "registeredContainerInstancesCount": 0, + "runningTasksCount": 0, + "pendingTasksCount": 0, + "activeServicesCount": 0, + "statistics": [], + "tags": [], + "settings": [ + { + "name": "containerInsights", + "value": "enabled" + } + ] + } + } + } + ] } }, "com.amazonaws.ecs#UpdateClusterSettingsRequest": { @@ -13645,7 +14721,28 @@ } ], "traits": { - "smithy.api#documentation": "

Updates the Amazon ECS container agent on a specified container instance. Updating the\n\t\t\tAmazon ECS container agent doesn't interrupt running tasks or services on the container\n\t\t\tinstance. The process for updating the agent differs depending on whether your container\n\t\t\tinstance was launched with the Amazon ECS-optimized AMI or another operating system.

\n \n

The UpdateContainerAgent API isn't supported for container instances\n\t\t\t\tusing the Amazon ECS-optimized Amazon Linux 2 (arm64) AMI. To update the container agent,\n\t\t\t\tyou can update the ecs-init package. This updates the agent. For more\n\t\t\t\tinformation, see Updating the\n\t\t\t\t\tAmazon ECS container agent in the Amazon Elastic Container Service Developer Guide.

\n
\n \n

Agent updates with the UpdateContainerAgent API operation do not\n\t\t\t\tapply to Windows container instances. We recommend that you launch new container\n\t\t\t\tinstances to update the agent version in your Windows clusters.

\n
\n

The UpdateContainerAgent API requires an Amazon ECS-optimized AMI or Amazon\n\t\t\tLinux AMI with the ecs-init service installed and running. For help\n\t\t\tupdating the Amazon ECS container agent on other operating systems, see Manually updating the Amazon ECS container agent in the Amazon Elastic Container Service Developer Guide.

" + "smithy.api#documentation": "

Updates the Amazon ECS container agent on a specified container instance. Updating the\n\t\t\tAmazon ECS container agent doesn't interrupt running tasks or services on the container\n\t\t\tinstance. The process for updating the agent differs depending on whether your container\n\t\t\tinstance was launched with the Amazon ECS-optimized AMI or another operating system.

\n \n

The UpdateContainerAgent API isn't supported for container instances\n\t\t\t\tusing the Amazon ECS-optimized Amazon Linux 2 (arm64) AMI. To update the container agent,\n\t\t\t\tyou can update the ecs-init package. This updates the agent. For more\n\t\t\t\tinformation, see Updating the\n\t\t\t\t\tAmazon ECS container agent in the Amazon Elastic Container Service Developer Guide.

\n
\n \n

Agent updates with the UpdateContainerAgent API operation do not\n\t\t\t\tapply to Windows container instances. We recommend that you launch new container\n\t\t\t\tinstances to update the agent version in your Windows clusters.

\n
\n

The UpdateContainerAgent API requires an Amazon ECS-optimized AMI or Amazon\n\t\t\tLinux AMI with the ecs-init service installed and running. For help\n\t\t\tupdating the Amazon ECS container agent on other operating systems, see Manually updating the Amazon ECS container agent in the Amazon Elastic Container Service Developer Guide.

", + "smithy.api#examples": [ + { + "title": "To update the container agent version on a container instance", + "documentation": "This example updates the container agent version on the specified container instance in cluster MyCluster.", + "input": { + "cluster": "MyCluster", + "containerInstance": "53ac7152-dcd1-4102-81f5-208962864132" + }, + "output": { + "containerInstance": { + "agentConnected": true, + "agentUpdateStatus": "PENDING", + "versionInfo": { + "agentHash": "4023248", + "agentVersion": "1.0.0", + "dockerVersion": "DockerVersion: 1.5.0" + } + } + } + } + ] } }, "com.amazonaws.ecs#UpdateContainerAgentRequest": { @@ -13706,7 +14803,175 @@ } ], "traits": { - "smithy.api#documentation": "

Modifies the status of an Amazon ECS container instance.

\n

Once a container instance has reached an ACTIVE state, you can change the\n\t\t\tstatus of a container instance to DRAINING to manually remove an instance\n\t\t\tfrom a cluster, for example to perform system updates, update the Docker daemon, or\n\t\t\tscale down the cluster size.

\n \n

A container instance can't be changed to DRAINING until it has\n\t\t\t\treached an ACTIVE status. If the instance is in any other status, an\n\t\t\t\terror will be received.

\n
\n

When you set a container instance to DRAINING, Amazon ECS prevents new tasks\n\t\t\tfrom being scheduled for placement on the container instance and replacement service\n\t\t\ttasks are started on other container instances in the cluster if the resources are\n\t\t\tavailable. Service tasks on the container instance that are in the PENDING\n\t\t\tstate are stopped immediately.

\n

Service tasks on the container instance that are in the RUNNING state are\n\t\t\tstopped and replaced according to the service's deployment configuration parameters,\n\t\t\t\tminimumHealthyPercent and maximumPercent. You can change\n\t\t\tthe deployment configuration of your service using UpdateService.

\n
    \n
  • \n

    If minimumHealthyPercent is below 100%, the scheduler can ignore\n\t\t\t\t\t\tdesiredCount temporarily during task replacement. For example,\n\t\t\t\t\t\tdesiredCount is four tasks, a minimum of 50% allows the\n\t\t\t\t\tscheduler to stop two existing tasks before starting two new tasks. If the\n\t\t\t\t\tminimum is 100%, the service scheduler can't remove existing tasks until the\n\t\t\t\t\treplacement tasks are considered healthy. Tasks for services that do not use a\n\t\t\t\t\tload balancer are considered healthy if they're in the RUNNING\n\t\t\t\t\tstate. Tasks for services that use a load balancer are considered healthy if\n\t\t\t\t\tthey're in the RUNNING state and are reported as healthy by the\n\t\t\t\t\tload balancer.

    \n
  • \n
  • \n

    The maximumPercent parameter represents an upper limit on the\n\t\t\t\t\tnumber of running tasks during task replacement. You can use this to define the\n\t\t\t\t\treplacement batch size. For example, if desiredCount is four tasks,\n\t\t\t\t\ta maximum of 200% starts four new tasks before stopping the four tasks to be\n\t\t\t\t\tdrained, provided that the cluster resources required to do this are available.\n\t\t\t\t\tIf the maximum is 100%, then replacement tasks can't start until the draining\n\t\t\t\t\ttasks have stopped.

    \n
  • \n
\n

Any PENDING or RUNNING tasks that do not belong to a service\n\t\t\taren't affected. You must wait for them to finish or stop them manually.

\n

A container instance has completed draining when it has no more RUNNING\n\t\t\ttasks. You can verify this using ListTasks.

\n

When a container instance has been drained, you can set a container instance to\n\t\t\t\tACTIVE status and once it has reached that status the Amazon ECS scheduler\n\t\t\tcan begin scheduling tasks on the instance again.

" + "smithy.api#documentation": "

Modifies the status of an Amazon ECS container instance.

\n

Once a container instance has reached an ACTIVE state, you can change the\n\t\t\tstatus of a container instance to DRAINING to manually remove an instance\n\t\t\tfrom a cluster, for example to perform system updates, update the Docker daemon, or\n\t\t\tscale down the cluster size.

\n \n

A container instance can't be changed to DRAINING until it has\n\t\t\t\treached an ACTIVE status. If the instance is in any other status, an\n\t\t\t\terror will be received.

\n
\n

When you set a container instance to DRAINING, Amazon ECS prevents new tasks\n\t\t\tfrom being scheduled for placement on the container instance and replacement service\n\t\t\ttasks are started on other container instances in the cluster if the resources are\n\t\t\tavailable. Service tasks on the container instance that are in the PENDING\n\t\t\tstate are stopped immediately.

\n

Service tasks on the container instance that are in the RUNNING state are\n\t\t\tstopped and replaced according to the service's deployment configuration parameters,\n\t\t\t\tminimumHealthyPercent and maximumPercent. You can change\n\t\t\tthe deployment configuration of your service using UpdateService.

\n
    \n
  • \n

    If minimumHealthyPercent is below 100%, the scheduler can ignore\n\t\t\t\t\t\tdesiredCount temporarily during task replacement. For example,\n\t\t\t\t\t\tdesiredCount is four tasks, a minimum of 50% allows the\n\t\t\t\t\tscheduler to stop two existing tasks before starting two new tasks. If the\n\t\t\t\t\tminimum is 100%, the service scheduler can't remove existing tasks until the\n\t\t\t\t\treplacement tasks are considered healthy. Tasks for services that do not use a\n\t\t\t\t\tload balancer are considered healthy if they're in the RUNNING\n\t\t\t\t\tstate. Tasks for services that use a load balancer are considered healthy if\n\t\t\t\t\tthey're in the RUNNING state and are reported as healthy by the\n\t\t\t\t\tload balancer.

    \n
  • \n
  • \n

    The maximumPercent parameter represents an upper limit on the\n\t\t\t\t\tnumber of running tasks during task replacement. You can use this to define the\n\t\t\t\t\treplacement batch size. For example, if desiredCount is four tasks,\n\t\t\t\t\ta maximum of 200% starts four new tasks before stopping the four tasks to be\n\t\t\t\t\tdrained, provided that the cluster resources required to do this are available.\n\t\t\t\t\tIf the maximum is 100%, then replacement tasks can't start until the draining\n\t\t\t\t\ttasks have stopped.

    \n
  • \n
\n

Any PENDING or RUNNING tasks that do not belong to a service\n\t\t\taren't affected. You must wait for them to finish or stop them manually.

\n

A container instance has completed draining when it has no more RUNNING\n\t\t\ttasks. You can verify this using ListTasks.

\n

When a container instance has been drained, you can set a container instance to\n\t\t\t\tACTIVE status and once it has reached that status the Amazon ECS scheduler\n\t\t\tcan begin scheduling tasks on the instance again.

", + "smithy.api#examples": [ + { + "title": "To update the state of a container instance", + "documentation": "This example updates the state of the specified container instance in the default cluster to DRAINING. ", + "input": { + "cluster": "default", + "containerInstances": [ + "1c3be8ed-df30-47b4-8f1e-6e68ebd01f34" + ], + "status": "DRAINING" + }, + "output": { + "containerInstances": [ + { + "agentConnected": true, + "attributes": [ + { + "name": "ecs.availability-zone", + "value": "us-west-2b" + }, + { + "name": "com.amazonaws.ecs.capability.logging-driver.syslog" + }, + { + "name": "ecs.instance-type", + "value": "c4.xlarge" + }, + { + "name": "ecs.ami-id", + "value": "ami-a2ca61c2" + }, + { + "name": "com.amazonaws.ecs.capability.task-iam-role-network-host" + }, + { + "name": "com.amazonaws.ecs.capability.logging-driver.awslogs" + }, + { + "name": "com.amazonaws.ecs.capability.logging-driver.json-file" + }, + { + "name": "com.amazonaws.ecs.capability.docker-remote-api.1.17" + }, + { + "name": "com.amazonaws.ecs.capability.privileged-container" + }, + { + "name": "com.amazonaws.ecs.capability.docker-remote-api.1.18" + }, + { + "name": "com.amazonaws.ecs.capability.docker-remote-api.1.19" + }, + { + "name": "com.amazonaws.ecs.capability.ecr-auth" + }, + { + "name": "ecs.os-type", + "value": "linux" + }, + { + "name": "com.amazonaws.ecs.capability.docker-remote-api.1.20" + }, + { + "name": "com.amazonaws.ecs.capability.docker-remote-api.1.21" + }, + { + "name": "com.amazonaws.ecs.capability.docker-remote-api.1.22" + }, + { + "name": "com.amazonaws.ecs.capability.task-iam-role" + }, + { + "name": "com.amazonaws.ecs.capability.docker-remote-api.1.23" + } + ], + "containerInstanceArn": "arn:aws:ecs:us-west-2:012345678910:container-instance/default/1c3be8ed-df30-47b4-8f1e-6e68ebd01f34", + "ec2InstanceId": "i-05d99c76955727ec6", + "pendingTasksCount": 0, + "registeredResources": [ + { + "doubleValue": 0, + "integerValue": 4096, + "longValue": 0, + "name": "CPU", + "type": "INTEGER" + }, + { + "doubleValue": 0, + "integerValue": 7482, + "longValue": 0, + "name": "MEMORY", + "type": "INTEGER" + }, + { + "doubleValue": 0, + "integerValue": 0, + "longValue": 0, + "name": "PORTS", + "stringSetValue": [ + "22", + "2376", + "2375", + "51678", + "51679" + ], + "type": "STRINGSET" + }, + { + "doubleValue": 0, + "integerValue": 0, + "longValue": 0, + "name": "PORTS_UDP", + "stringSetValue": [], + "type": "STRINGSET" + } + ], + "remainingResources": [ + { + "doubleValue": 0, + "integerValue": 4096, + "longValue": 0, + "name": "CPU", + "type": "INTEGER" + }, + { + "doubleValue": 0, + "integerValue": 7482, + "longValue": 0, + "name": "MEMORY", + "type": "INTEGER" + }, + { + "doubleValue": 0, + "integerValue": 0, + "longValue": 0, + "name": "PORTS", + "stringSetValue": [ + "22", + "2376", + "2375", + "51678", + "51679" + ], + "type": "STRINGSET" + }, + { + "doubleValue": 0, + "integerValue": 0, + "longValue": 0, + "name": "PORTS_UDP", + "stringSetValue": [], + "type": "STRINGSET" + } + ], + "runningTasksCount": 0, + "status": "DRAINING", + "version": 30, + "versionInfo": { + "agentHash": "efe53c6", + "agentVersion": "1.13.1", + "dockerVersion": "DockerVersion: 1.11.2" + } + } + ], + "failures": [] + } + } + ] } }, "com.amazonaws.ecs#UpdateContainerInstancesStateRequest": { @@ -13877,7 +15142,51 @@ } ], "traits": { - "smithy.api#documentation": "

Modifies which task set in a service is the primary task set. Any parameters that are\n\t\t\tupdated on the primary task set in a service will transition to the service. This is\n\t\t\tused when a service uses the EXTERNAL deployment controller type. For more\n\t\t\tinformation, see Amazon ECS Deployment\n\t\t\t\tTypes in the Amazon Elastic Container Service Developer Guide.

" + "smithy.api#documentation": "

Modifies which task set in a service is the primary task set. Any parameters that are\n\t\t\tupdated on the primary task set in a service will transition to the service. This is\n\t\t\tused when a service uses the EXTERNAL deployment controller type. For more\n\t\t\tinformation, see Amazon ECS Deployment\n\t\t\t\tTypes in the Amazon Elastic Container Service Developer Guide.

", + "smithy.api#examples": [ + { + "title": "To update the primary task set for a service", + "documentation": "This example updates the primary task set for a service MyService that uses the EXTERNAL deployment controller type. ", + "input": { + "cluster": "MyCluster", + "service": "MyService", + "primaryTaskSet": "arn:aws:ecs:us-west-2:123456789012:task-set/MyCluster/MyService/ecs-svc/1234567890123456789" + }, + "output": { + "taskSet": { + "id": "ecs-svc/1234567890123456789", + "taskSetArn": "arn:aws:ecs:us-west-2:123456789012:task-set/MyCluster/MyService/ecs-svc/1234567890123456789", + "status": "PRIMARY", + "taskDefinition": "arn:aws:ecs:us-west-2:123456789012:task-definition/sample-fargate:2", + "computedDesiredCount": 1, + "pendingCount": 0, + "runningCount": 0, + "createdAt": 1.557128360711E9, + "updatedAt": 1.557129412653E9, + "launchType": "EC2", + "networkConfiguration": { + "awsvpcConfiguration": { + "subnets": [ + "subnet-12344321" + ], + "securityGroups": [ + "sg-12344312" + ], + "assignPublicIp": "DISABLED" + } + }, + "loadBalancers": [], + "serviceRegistries": [], + "scale": { + "value": 50, + "unit": "PERCENT" + }, + "stabilityStatus": "STABILIZING", + "stabilityStatusAt": 1.557129279914E9 + } + } + } + ] } }, "com.amazonaws.ecs#UpdateServicePrimaryTaskSetRequest": { @@ -14267,7 +15576,55 @@ } ], "traits": { - "smithy.api#documentation": "

Modifies a task set. This is used when a service uses the EXTERNAL\n\t\t\tdeployment controller type. For more information, see Amazon ECS Deployment\n\t\t\t\tTypes in the Amazon Elastic Container Service Developer Guide.

" + "smithy.api#documentation": "

Modifies a task set. This is used when a service uses the EXTERNAL\n\t\t\tdeployment controller type. For more information, see Amazon ECS Deployment\n\t\t\t\tTypes in the Amazon Elastic Container Service Developer Guide.

", + "smithy.api#examples": [ + { + "title": "To update a task set", + "documentation": "This example updates the task set to adjust the scale.", + "input": { + "cluster": "MyCluster", + "service": "MyService", + "taskSet": "arn:aws:ecs:us-west-2:123456789012:task-set/MyCluster/MyService/ecs-svc/1234567890123456789", + "scale": { + "value": 50, + "unit": "PERCENT" + } + }, + "output": { + "taskSet": { + "id": "ecs-svc/1234567890123456789", + "taskSetArn": "arn:aws:ecs:us-west-2:123456789012:task-set/MyCluster/MyService/ecs-svc/1234567890123456789", + "status": "ACTIVE", + "taskDefinition": "arn:aws:ecs:us-west-2:123456789012:task-definition/sample-fargate:2", + "computedDesiredCount": 0, + "pendingCount": 0, + "runningCount": 0, + "createdAt": 1.557128360711E9, + "updatedAt": 1.557129279914E9, + "launchType": "EC2", + "networkConfiguration": { + "awsvpcConfiguration": { + "subnets": [ + "subnet-12344321" + ], + "securityGroups": [ + "sg-12344321" + ], + "assignPublicIp": "DISABLED" + } + }, + "loadBalancers": [], + "serviceRegistries": [], + "scale": { + "value": 50, + "unit": "PERCENT" + }, + "stabilityStatus": "STABILIZING", + "stabilityStatusAt": 1.557129279914E9 + } + } + } + ] } }, "com.amazonaws.ecs#UpdateTaskSetRequest": { diff --git a/models/eks.json b/models/eks.json index d4cc65bd5e..5d9bfb9b36 100644 --- a/models/eks.json +++ b/models/eks.json @@ -203,6 +203,9 @@ { "target": "com.amazonaws.eks#DescribeCluster" }, + { + "target": "com.amazonaws.eks#DescribeClusterVersions" + }, { "target": "com.amazonaws.eks#DescribeEksAnywhereSubscription" }, @@ -315,7 +318,7 @@ "name": "eks" }, "aws.protocols#restJson1": {}, - "smithy.api#documentation": "

Amazon Elastic Kubernetes Service (Amazon EKS) is a managed service that makes it easy\n for you to run Kubernetes on Amazon Web Services without needing to setup or maintain your own\n Kubernetes control plane. Kubernetes is an open-source system for automating the deployment,\n scaling, and management of containerized applications.

\n

Amazon EKS runs up-to-date versions of the open-source Kubernetes software, so you\n can use all the existing plugins and tooling from the Kubernetes community. Applications\n running on Amazon EKS are fully compatible with applications running on any\n standard Kubernetes environment, whether running in on-premises data centers or public\n clouds. This means that you can easily migrate any standard Kubernetes application to Amazon EKS without any code modification required.

", + "smithy.api#documentation": "

Amazon Elastic Kubernetes Service (Amazon EKS) is a managed service that makes it easy\n for you to run Kubernetes on Amazon Web Services without needing to setup or maintain your own\n Kubernetes control plane. Kubernetes is an open-source system for automating the deployment,\n scaling, and management of containerized applications.

\n

Amazon EKS runs up-to-date versions of the open-source Kubernetes software, so you\n can use all the existing plugins and tooling from the Kubernetes community. Applications\n running on Amazon EKS are fully compatible with applications running on any\n standard Kubernetes environment, whether running in on-premises data centers or public\n clouds. This means that you can easily migrate any standard Kubernetes application to Amazon EKS\n without any code modification required.

", "smithy.api#title": "Amazon Elastic Kubernetes Service", "smithy.rules#endpointRuleSet": { "version": "1.0", @@ -1396,7 +1399,7 @@ "principalArn": { "target": "com.amazonaws.eks#String", "traits": { - "smithy.api#documentation": "

The ARN of the IAM principal for the access entry. If you ever delete\n the IAM principal with this ARN, the access entry isn't automatically\n deleted. We recommend that you delete the access entry with an ARN for an IAM principal that you delete. If you don't delete the access entry and ever\n recreate the IAM principal, even if it has the same ARN, the access\n entry won't work. This is because even though the ARN is the same for the recreated\n IAM principal, the roleID or userID (you\n can see this with the Security Token Service\n GetCallerIdentity API) is different for the recreated IAM\n principal than it was for the original IAM principal. Even though you\n don't see the IAM principal's roleID or userID\n for an access entry, Amazon EKS stores it with the access entry.

" + "smithy.api#documentation": "

The ARN of the IAM principal for the access entry. If you ever delete\n the IAM principal with this ARN, the access entry isn't automatically\n deleted. We recommend that you delete the access entry with an ARN for an IAM\n principal that you delete. If you don't delete the access entry and ever\n recreate the IAM principal, even if it has the same ARN, the access\n entry won't work. This is because even though the ARN is the same for the recreated\n IAM principal, the roleID or userID (you\n can see this with the Security Token Service\n GetCallerIdentity API) is different for the recreated IAM\n principal than it was for the original IAM principal. Even though you\n don't see the IAM principal's roleID or userID\n for an access entry, Amazon EKS stores it with the access entry.

" } }, "kubernetesGroups": { @@ -1608,7 +1611,7 @@ "podIdentityAssociations": { "target": "com.amazonaws.eks#StringList", "traits": { - "smithy.api#documentation": "

An array of Pod Identity Assocations owned by the Addon. Each EKS Pod Identity association maps a role to a service account in a namespace in the cluster.

\n

For more information, see Attach an IAM Role to an Amazon EKS add-on using Pod Identity in the EKS User Guide.

" + "smithy.api#documentation": "

An array of Pod Identity Assocations owned by the Addon. Each EKS Pod Identity association maps a role to a service account in a namespace in the cluster.

\n

For more information, see Attach an IAM Role to an Amazon EKS add-on using Pod Identity in the Amazon EKS User Guide.

" } } }, @@ -1616,6 +1619,32 @@ "smithy.api#documentation": "

An Amazon EKS add-on. For more information, see Amazon EKS add-ons in\n the Amazon EKS User Guide.

" } }, + "com.amazonaws.eks#AddonCompatibilityDetail": { + "type": "structure", + "members": { + "name": { + "target": "com.amazonaws.eks#String", + "traits": { + "smithy.api#documentation": "

The name of the Amazon EKS add-on.

" + } + }, + "compatibleVersions": { + "target": "com.amazonaws.eks#StringList", + "traits": { + "smithy.api#documentation": "

The list of compatible Amazon EKS add-on versions for the next Kubernetes version.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The summary information about the Amazon EKS add-on compatibility for the next Kubernetes \n version for an insight check in the UPGRADE_READINESS category.

" + } + }, + "com.amazonaws.eks#AddonCompatibilityDetails": { + "type": "list", + "member": { + "target": "com.amazonaws.eks#AddonCompatibilityDetail" + } + }, "com.amazonaws.eks#AddonHealth": { "type": "structure", "members": { @@ -1790,7 +1819,7 @@ } }, "traits": { - "smithy.api#documentation": "

A type of Pod Identity Association owned by an Amazon EKS Add-on.

\n

Each EKS Pod Identity Association maps a role to a service account in a namespace in the cluster.

\n

For more information, see Attach an IAM Role to an Amazon EKS add-on using Pod Identity in the EKS User Guide.

" + "smithy.api#documentation": "

A type of Pod Identity Association owned by an Amazon EKS Add-on.

\n

Each EKS Pod Identity Association maps a role to a service account in a namespace in the cluster.

\n

For more information, see Attach an IAM Role to an Amazon EKS add-on using Pod Identity in the Amazon EKS User Guide.

" } }, "com.amazonaws.eks#AddonPodIdentityAssociationsList": { @@ -2307,7 +2336,7 @@ } }, "traits": { - "smithy.api#documentation": "

Indicates the current configuration of the block storage capability on your EKS Auto Mode cluster. For example, if the capability is enabled or disabled. If the block storage capability is enabled, EKS Auto Mode will create and delete EBS volumes in your Amazon Web Services account. For more information, see EKS Auto Mode block storage capability in the EKS User Guide.

" + "smithy.api#documentation": "

Indicates the current configuration of the block storage capability on your EKS Auto Mode cluster. For example, if the capability is enabled or disabled. If the block storage capability is enabled, EKS Auto Mode will create and delete EBS volumes in your Amazon Web Services account. For more information, see EKS Auto Mode block storage capability in the Amazon EKS User Guide.

" } }, "com.amazonaws.eks#Boolean": { @@ -2577,7 +2606,7 @@ "outpostConfig": { "target": "com.amazonaws.eks#OutpostConfigResponse", "traits": { - "smithy.api#documentation": "

An object representing the configuration of your local Amazon EKS cluster on\n an Amazon Web Services Outpost. This object isn't available for clusters on the Amazon Web Services cloud.

" + "smithy.api#documentation": "

An object representing the configuration of your local Amazon EKS cluster on\n an Amazon Web Services Outpost. This object isn't available for clusters on the Amazon Web Services\n cloud.

" } }, "accessConfig": { @@ -2589,7 +2618,7 @@ "upgradePolicy": { "target": "com.amazonaws.eks#UpgradePolicyResponse", "traits": { - "smithy.api#documentation": "

This value indicates if extended support is enabled or disabled for the cluster.

\n

\n Learn more about EKS Extended Support in the EKS User Guide.\n

" + "smithy.api#documentation": "

This value indicates if extended support is enabled or disabled for the cluster.

\n

\n Learn more about EKS Extended Support in the Amazon EKS User Guide.\n

" } }, "zonalShiftConfig": { @@ -2607,13 +2636,13 @@ "computeConfig": { "target": "com.amazonaws.eks#ComputeConfigResponse", "traits": { - "smithy.api#documentation": "

Indicates the current configuration of the compute capability on your EKS Auto Mode cluster. For example, if the capability is enabled or disabled. If the compute capability is enabled, EKS Auto Mode will create and delete EC2 Managed Instances in your Amazon Web Services account. For more information, see EKS Auto Mode compute capability in the EKS User Guide.

" + "smithy.api#documentation": "

Indicates the current configuration of the compute capability on your EKS Auto Mode cluster. For example, if the capability is enabled or disabled. If the compute capability is enabled, EKS Auto Mode will create and delete EC2 Managed Instances in your Amazon Web Services account. For more information, see EKS Auto Mode compute capability in the Amazon EKS User Guide.

" } }, "storageConfig": { "target": "com.amazonaws.eks#StorageConfigResponse", "traits": { - "smithy.api#documentation": "

Indicates the current configuration of the block storage capability on your EKS Auto Mode cluster. For example, if the capability is enabled or disabled. If the block storage capability is enabled, EKS Auto Mode will create and delete EBS volumes in your Amazon Web Services account. For more information, see EKS Auto Mode block storage capability in the EKS User Guide.

" + "smithy.api#documentation": "

Indicates the current configuration of the block storage capability on your EKS Auto Mode cluster. For example, if the capability is enabled or disabled. If the block storage capability is enabled, EKS Auto Mode will create and delete EBS volumes in your Amazon Web Services account. For more information, see EKS Auto Mode block storage capability in the Amazon EKS User Guide.

" } } }, @@ -2837,6 +2866,98 @@ } } }, + "com.amazonaws.eks#ClusterVersionInformation": { + "type": "structure", + "members": { + "clusterVersion": { + "target": "com.amazonaws.eks#String", + "traits": { + "smithy.api#documentation": "

The Kubernetes version for the cluster.

" + } + }, + "clusterType": { + "target": "com.amazonaws.eks#String", + "traits": { + "smithy.api#documentation": "

The type of cluster this version is for.

" + } + }, + "defaultPlatformVersion": { + "target": "com.amazonaws.eks#String", + "traits": { + "smithy.api#documentation": "

Default platform version for this Kubernetes version.

" + } + }, + "defaultVersion": { + "target": "com.amazonaws.eks#Boolean", + "traits": { + "smithy.api#default": false, + "smithy.api#documentation": "

Indicates if this is a default version.

" + } + }, + "releaseDate": { + "target": "com.amazonaws.eks#Timestamp", + "traits": { + "smithy.api#documentation": "

The release date of this cluster version.

" + } + }, + "endOfStandardSupportDate": { + "target": "com.amazonaws.eks#Timestamp", + "traits": { + "smithy.api#documentation": "

Date when standard support ends for this version.

" + } + }, + "endOfExtendedSupportDate": { + "target": "com.amazonaws.eks#Timestamp", + "traits": { + "smithy.api#documentation": "

Date when extended support ends for this version.

" + } + }, + "status": { + "target": "com.amazonaws.eks#ClusterVersionStatus", + "traits": { + "smithy.api#documentation": "

Current status of this cluster version.

" + } + }, + "kubernetesPatchVersion": { + "target": "com.amazonaws.eks#String", + "traits": { + "smithy.api#documentation": "

The patch version of Kubernetes for this cluster version.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Contains details about a specific EKS cluster version.

" + } + }, + "com.amazonaws.eks#ClusterVersionList": { + "type": "list", + "member": { + "target": "com.amazonaws.eks#ClusterVersionInformation" + } + }, + "com.amazonaws.eks#ClusterVersionStatus": { + "type": "enum", + "members": { + "unsupported": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "unsupported" + } + }, + "standard_support": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "standard-support" + } + }, + "extended_support": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "extended-support" + } + } + } + }, "com.amazonaws.eks#Compatibilities": { "type": "list", "member": { @@ -2882,18 +3003,18 @@ "nodePools": { "target": "com.amazonaws.eks#StringList", "traits": { - "smithy.api#documentation": "

Configuration for node pools that defines the compute resources for your EKS Auto Mode cluster. For more information, see EKS Auto Mode Node Pools in the EKS User Guide.

" + "smithy.api#documentation": "

Configuration for node pools that defines the compute resources for your EKS Auto Mode cluster. For more information, see EKS Auto Mode Node Pools in the Amazon EKS User Guide.

" } }, "nodeRoleArn": { "target": "com.amazonaws.eks#String", "traits": { - "smithy.api#documentation": "

The ARN of the IAM Role EKS will assign to EC2 Managed Instances in your EKS Auto Mode cluster. This value cannot be changed after the compute capability of EKS Auto Mode is enabled. For more information, see the IAM Reference in the EKS User Guide.

" + "smithy.api#documentation": "

The ARN of the IAM Role EKS will assign to EC2 Managed Instances in your EKS Auto Mode cluster. This value cannot be changed after the compute capability of EKS Auto Mode is enabled. For more information, see the IAM Reference in the Amazon EKS User Guide.

" } } }, "traits": { - "smithy.api#documentation": "

Request to update the configuration of the compute capability of your EKS Auto Mode cluster. For example, enable the capability. For more information, see EKS Auto Mode compute capability in the EKS User Guide.

" + "smithy.api#documentation": "

Request to update the configuration of the compute capability of your EKS Auto Mode cluster. For example, enable the capability. For more information, see EKS Auto Mode compute capability in the Amazon EKS User Guide.

" } }, "com.amazonaws.eks#ComputeConfigResponse": { @@ -2908,7 +3029,7 @@ "nodePools": { "target": "com.amazonaws.eks#StringList", "traits": { - "smithy.api#documentation": "

Indicates the current configuration of node pools in your EKS Auto Mode cluster. For more information, see EKS Auto Mode Node Pools in the EKS User Guide.

" + "smithy.api#documentation": "

Indicates the current configuration of node pools in your EKS Auto Mode cluster. For more information, see EKS Auto Mode Node Pools in the Amazon EKS User Guide.

" } }, "nodeRoleArn": { @@ -3052,7 +3173,7 @@ } }, "traits": { - "smithy.api#documentation": "

The placement configuration for all the control plane instances of your local Amazon EKS cluster on an Amazon Web Services Outpost. For more information, see\n Capacity\n considerations in the Amazon EKS User Guide.

" + "smithy.api#documentation": "

The placement configuration for all the control plane instances of your local Amazon EKS\n cluster on an Amazon Web Services Outpost. For more information, see\n Capacity\n considerations in the Amazon EKS User Guide.

" } }, "com.amazonaws.eks#ControlPlanePlacementResponse": { @@ -3066,7 +3187,7 @@ } }, "traits": { - "smithy.api#documentation": "

The placement configuration for all the control plane instances of your local Amazon EKS cluster on an Amazon Web Services Outpost. For more information, see\n Capacity considerations in the Amazon EKS User Guide.

" + "smithy.api#documentation": "

The placement configuration for all the control plane instances of your local Amazon EKS\n cluster on an Amazon Web Services Outpost. For more information, see\n Capacity considerations in the Amazon EKS User Guide.

" } }, "com.amazonaws.eks#CreateAccessConfigRequest": { @@ -3140,7 +3261,7 @@ "principalArn": { "target": "com.amazonaws.eks#String", "traits": { - "smithy.api#documentation": "

The ARN of the IAM principal for the AccessEntry. You can specify one ARN for each access entry. You can't specify the\n same ARN in more than one access entry. This value can't be changed after access entry\n creation.

\n

The valid principals differ depending on the type of the access entry in the\n type field. The only valid ARN is IAM roles for the types of access\n entries for nodes: \n . You can use every IAM principal type for STANDARD access entries.\n You can't use the STS session principal type with access entries because this is a\n temporary principal for each session and not a permanent identity that can be assigned\n permissions.

\n

\n IAM best practices recommend using IAM roles with\n temporary credentials, rather than IAM users with long-term credentials.\n

", + "smithy.api#documentation": "

The ARN of the IAM principal for the AccessEntry. You can specify one ARN for each access entry. You can't specify the\n same ARN in more than one access entry. This value can't be changed after access entry\n creation.

\n

The valid principals differ depending on the type of the access entry in the\n type field. For STANDARD access entries, you can use every\n IAM principal type. For nodes (EC2 (for EKS Auto Mode),\n EC2_LINUX, EC2_WINDOWS, FARGATE_LINUX, and\n HYBRID_LINUX), the only valid ARN is IAM roles.\n \n You can't use the STS session principal type with access entries because this is a\n temporary principal for each session and not a permanent identity that can be assigned\n permissions.

\n

\n IAM\n best practices recommend using IAM roles with\n temporary credentials, rather than IAM users with long-term credentials.\n

", "smithy.api#required": {} } }, @@ -3172,7 +3293,7 @@ "type": { "target": "com.amazonaws.eks#String", "traits": { - "smithy.api#documentation": "

The type of the new access entry. Valid values are Standard,\n FARGATE_LINUX, EC2_LINUX, and\n EC2_WINDOWS.

\n

If the principalArn is for an IAM role that's used for\n self-managed Amazon EC2 nodes, specify EC2_LINUX or\n EC2_WINDOWS. Amazon EKS grants the necessary permissions to the\n node for you. If the principalArn is for any other purpose, specify\n STANDARD. If you don't specify a value, Amazon EKS sets the\n value to STANDARD. It's unnecessary to create access entries for IAM roles used with Fargate profiles or managed Amazon EC2 nodes, because Amazon EKS creates entries in the\n aws-auth\n ConfigMap for the roles. You can't change this value once you've created\n the access entry.

\n

If you set the value to EC2_LINUX or EC2_WINDOWS, you can't\n specify values for kubernetesGroups, or associate an\n AccessPolicy to the access entry.

" + "smithy.api#documentation": "

The type of the new access entry. Valid values are STANDARD,\n FARGATE_LINUX, EC2_LINUX, EC2_WINDOWS,\n EC2 (for EKS Auto Mode), HYBRID_LINUX, and HYPERPOD_LINUX.\n

\n

If the principalArn is for an IAM role that's used for self-managed\n Amazon EC2 nodes, specify EC2_LINUX or EC2_WINDOWS. Amazon EKS grants\n the necessary permissions to the node for you. If the principalArn is for\n any other purpose, specify STANDARD. If you don't specify a value, Amazon EKS\n sets the value to STANDARD. If you have the access mode of the cluster set\n to API_AND_CONFIG_MAP, it's unnecessary to create access entries for IAM\n roles used with Fargate profiles or managed Amazon EC2 nodes, because Amazon EKS creates entries\n in the aws-auth\n ConfigMap for the roles. You can't change this value once you've created\n the access entry.

\n

If you set the value to EC2_LINUX or EC2_WINDOWS, you can't\n specify values for kubernetesGroups, or associate an\n AccessPolicy to the access entry.

" } } }, @@ -3220,7 +3341,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates an Amazon EKS add-on.

\n

Amazon EKS add-ons help to automate the provisioning and lifecycle management\n of common operational software for Amazon EKS clusters. For more information,\n see Amazon EKS add-ons in the Amazon EKS User Guide.

", + "smithy.api#documentation": "

Creates an Amazon EKS add-on.

\n

Amazon EKS add-ons help to automate the provisioning and lifecycle management\n of common operational software for Amazon EKS clusters. For more information,\n see Amazon EKS\n add-ons in the Amazon EKS User Guide.

", "smithy.api#http": { "method": "POST", "uri": "/clusters/{clusterName}/addons", @@ -3286,7 +3407,7 @@ "podIdentityAssociations": { "target": "com.amazonaws.eks#AddonPodIdentityAssociationsList", "traits": { - "smithy.api#documentation": "

An array of Pod Identity Assocations to be created. Each EKS Pod Identity association maps a Kubernetes service account to an IAM Role.

\n

For more information, see Attach an IAM Role to an Amazon EKS add-on using Pod Identity in the EKS User Guide.

" + "smithy.api#documentation": "

An array of Pod Identity Assocations to be created. Each EKS Pod Identity association maps a Kubernetes service account to an IAM Role.

\n

For more information, see Attach an IAM Role to an Amazon EKS add-on using Pod Identity in the Amazon EKS User Guide.

" } } }, @@ -3337,7 +3458,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates an Amazon EKS control plane.

\n

The Amazon EKS control plane consists of control plane instances that run the\n Kubernetes software, such as etcd and the API server. The control plane runs in\n an account managed by Amazon Web Services, and the Kubernetes API is exposed by the Amazon EKS API server endpoint. Each Amazon EKS cluster control plane is\n single tenant and unique. It runs on its own set of Amazon EC2 instances.

\n

The cluster control plane is provisioned across multiple Availability Zones and\n fronted by an Elastic Load Balancing\n Network Load Balancer. Amazon EKS also provisions elastic network interfaces in\n your VPC subnets to provide connectivity from the control plane instances to the nodes\n (for example, to support kubectl exec, logs, and\n proxy data flows).

\n

Amazon EKS nodes run in your Amazon Web Services account and connect to your\n cluster's control plane over the Kubernetes API server endpoint and a certificate file that\n is created for your cluster.

\n

You can use the endpointPublicAccess and\n endpointPrivateAccess parameters to enable or disable public and\n private access to your cluster's Kubernetes API server endpoint. By default, public access is\n enabled, and private access is disabled. For more information, see Amazon EKS Cluster Endpoint Access Control in the\n \n Amazon EKS User Guide\n .

\n

You can use the logging parameter to enable or disable exporting the\n Kubernetes control plane logs for your cluster to CloudWatch Logs. By default, cluster\n control plane logs aren't exported to CloudWatch Logs. For more information, see\n Amazon EKS Cluster Control Plane Logs in the\n \n Amazon EKS User Guide\n .

\n \n

CloudWatch Logs ingestion, archive storage, and data scanning rates apply to\n exported control plane logs. For more information, see CloudWatch\n Pricing.

\n
\n

In most cases, it takes several minutes to create a cluster. After you create an\n Amazon EKS cluster, you must configure your Kubernetes tooling to communicate\n with the API server and launch nodes into your cluster. For more information, see Allowing users to\n access your cluster and Launching\n Amazon EKS nodes in the Amazon EKS User Guide.

", + "smithy.api#documentation": "

Creates an Amazon EKS control plane.

\n

The Amazon EKS control plane consists of control plane instances that run the\n Kubernetes software, such as etcd and the API server. The control plane runs in\n an account managed by Amazon Web Services, and the Kubernetes API is exposed by the Amazon EKS\n API server endpoint. Each Amazon EKS cluster control plane is\n single tenant and unique. It runs on its own set of Amazon EC2 instances.

\n

The cluster control plane is provisioned across multiple Availability Zones and\n fronted by an Elastic Load Balancing\n Network Load Balancer. Amazon EKS also provisions elastic network interfaces in\n your VPC subnets to provide connectivity from the control plane instances to the nodes\n (for example, to support kubectl exec, logs, and\n proxy data flows).

\n

Amazon EKS nodes run in your Amazon Web Services account and connect to your\n cluster's control plane over the Kubernetes API server endpoint and a certificate file that\n is created for your cluster.

\n

You can use the endpointPublicAccess and\n endpointPrivateAccess parameters to enable or disable public and\n private access to your cluster's Kubernetes API server endpoint. By default, public access is\n enabled, and private access is disabled. For more information, see Amazon EKS\n Cluster Endpoint Access Control in the\n \n Amazon EKS User Guide\n .

\n

You can use the logging parameter to enable or disable exporting the\n Kubernetes control plane logs for your cluster to CloudWatch Logs. By default, cluster\n control plane logs aren't exported to CloudWatch Logs. For more information, see\n Amazon EKS Cluster Control Plane Logs in the\n \n Amazon EKS User Guide\n .

\n \n

CloudWatch Logs ingestion, archive storage, and data scanning rates apply to\n exported control plane logs. For more information, see CloudWatch\n Pricing.

\n
\n

In most cases, it takes several minutes to create a cluster. After you create an\n Amazon EKS cluster, you must configure your Kubernetes tooling to communicate\n with the API server and launch nodes into your cluster. For more information, see Allowing users to\n access your cluster and Launching\n Amazon EKS nodes in the Amazon EKS User Guide.

", "smithy.api#examples": [ { "title": "To create a new cluster", @@ -3386,7 +3507,7 @@ "roleArn": { "target": "com.amazonaws.eks#String", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the IAM role that provides permissions for the Kubernetes\n control plane to make calls to Amazon Web Services API operations on your behalf. For\n more information, see Amazon EKS Service IAM Role in the \n Amazon EKS User Guide\n .

", + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the IAM role that provides permissions for the Kubernetes\n control plane to make calls to Amazon Web Services API operations on your behalf. For\n more information, see Amazon EKS Service IAM\n Role in the \n Amazon EKS User Guide\n .

", "smithy.api#required": {} } }, @@ -3406,7 +3527,7 @@ "logging": { "target": "com.amazonaws.eks#Logging", "traits": { - "smithy.api#documentation": "

Enable or disable exporting the Kubernetes control plane logs for your cluster to CloudWatch Logs. By default, cluster control plane logs aren't exported to CloudWatch Logs. For more information, see Amazon EKS Cluster control plane logs in the\n \n Amazon EKS User Guide\n .

\n \n

CloudWatch Logs ingestion, archive storage, and data scanning rates apply to\n exported control plane logs. For more information, see CloudWatch\n Pricing.

\n
" + "smithy.api#documentation": "

Enable or disable exporting the Kubernetes control plane logs for your cluster to CloudWatch Logs\n . By default, cluster control plane logs aren't exported to CloudWatch Logs\n . For more information, see Amazon EKS\n Cluster control plane logs in the\n \n Amazon EKS User Guide\n .

\n \n

CloudWatch Logs ingestion, archive storage, and data scanning rates apply to\n exported control plane logs. For more information, see CloudWatch\n Pricing.

\n
" } }, "clientRequestToken": { @@ -3455,7 +3576,7 @@ "zonalShiftConfig": { "target": "com.amazonaws.eks#ZonalShiftConfigRequest", "traits": { - "smithy.api#documentation": "

Enable or disable ARC zonal shift for the cluster. If zonal shift is enabled, Amazon Web Services\n configures zonal autoshift for the cluster.

\n

Zonal shift is a feature of\n Amazon Application Recovery Controller (ARC). ARC zonal shift is designed to be a temporary measure that allows you to move\n traffic for a resource away from an impaired AZ until the zonal shift expires or you cancel\n it. You can extend the zonal shift if necessary.

\n

You can start a zonal shift for an EKS cluster, or you can allow Amazon Web Services to do it for you\n by enabling zonal autoshift. This shift updates the flow of\n east-to-west network traffic in your cluster to only consider network endpoints for Pods\n running on worker nodes in healthy AZs. Additionally, any ALB or NLB handling ingress\n traffic for applications in your EKS cluster will automatically route traffic to targets in\n the healthy AZs. For more information about zonal shift in EKS, see Learn about Amazon Application Recovery Controller (ARC)\n Zonal Shift in Amazon EKS in the\n \n Amazon EKS User Guide\n .

" + "smithy.api#documentation": "

Enable or disable ARC zonal shift for the cluster. If zonal shift is enabled, Amazon Web Services\n configures zonal autoshift for the cluster.

\n

Zonal shift is a feature of\n Amazon Application Recovery Controller (ARC). ARC zonal shift is designed to be a temporary measure that allows you to move\n traffic for a resource away from an impaired AZ until the zonal shift expires or you cancel\n it. You can extend the zonal shift if necessary.

\n

You can start a zonal shift for an Amazon EKS cluster, or you can allow Amazon Web Services to do it for you\n by enabling zonal autoshift. This shift updates the flow of\n east-to-west network traffic in your cluster to only consider network endpoints for Pods\n running on worker nodes in healthy AZs. Additionally, any ALB or NLB handling ingress\n traffic for applications in your Amazon EKS cluster will automatically route traffic to targets in\n the healthy AZs. For more information about zonal shift in EKS, see Learn about Amazon Application Recovery Controller (ARC)\n Zonal Shift in Amazon EKS in the\n \n Amazon EKS User Guide\n .

" } }, "remoteNetworkConfig": { @@ -3627,7 +3748,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates an Fargate profile for your Amazon EKS cluster. You\n must have at least one Fargate profile in a cluster to be able to run\n pods on Fargate.

\n

The Fargate profile allows an administrator to declare which pods run\n on Fargate and specify which pods run on which Fargate\n profile. This declaration is done through the profile’s selectors. Each profile can have\n up to five selectors that contain a namespace and labels. A namespace is required for\n every selector. The label field consists of multiple optional key-value pairs. Pods that\n match the selectors are scheduled on Fargate. If a to-be-scheduled pod\n matches any of the selectors in the Fargate profile, then that pod is run\n on Fargate.

\n

When you create a Fargate profile, you must specify a pod execution\n role to use with the pods that are scheduled with the profile. This role is added to the\n cluster's Kubernetes Role Based\n Access Control (RBAC) for authorization so that the kubelet\n that is running on the Fargate infrastructure can register with your\n Amazon EKS cluster so that it can appear in your cluster as a node. The pod\n execution role also provides IAM permissions to the Fargate infrastructure to allow read access to Amazon ECR image repositories. For\n more information, see Pod Execution Role in the Amazon EKS User Guide.

\n

Fargate profiles are immutable. However, you can create a new updated\n profile to replace an existing profile and then delete the original after the updated\n profile has finished creating.

\n

If any Fargate profiles in a cluster are in the DELETING\n status, you must wait for that Fargate profile to finish deleting before\n you can create any other profiles in that cluster.

\n

For more information, see Fargate profile in the\n Amazon EKS User Guide.

", + "smithy.api#documentation": "

Creates an Fargate profile for your Amazon EKS cluster. You\n must have at least one Fargate profile in a cluster to be able to run\n pods on Fargate.

\n

The Fargate profile allows an administrator to declare which pods run\n on Fargate and specify which pods run on which Fargate\n profile. This declaration is done through the profile's selectors. Each profile can have\n up to five selectors that contain a namespace and labels. A namespace is required for\n every selector. The label field consists of multiple optional key-value pairs. Pods that\n match the selectors are scheduled on Fargate. If a to-be-scheduled pod\n matches any of the selectors in the Fargate profile, then that pod is run\n on Fargate.

\n

When you create a Fargate profile, you must specify a pod execution\n role to use with the pods that are scheduled with the profile. This role is added to the\n cluster's Kubernetes Role Based\n Access Control (RBAC) for authorization so that the kubelet\n that is running on the Fargate infrastructure can register with your\n Amazon EKS cluster so that it can appear in your cluster as a node. The pod\n execution role also provides IAM permissions to the Fargate infrastructure to \n allow read access to Amazon ECR image repositories. For\n more information, see Pod Execution Role in the Amazon EKS User Guide.

\n

Fargate profiles are immutable. However, you can create a new updated\n profile to replace an existing profile and then delete the original after the updated\n profile has finished creating.

\n

If any Fargate profiles in a cluster are in the DELETING\n status, you must wait for that Fargate profile to finish deleting before\n you can create any other profiles in that cluster.

\n

For more information, see Fargate profile in the\n Amazon EKS User Guide.

", "smithy.api#http": { "method": "POST", "uri": "/clusters/{clusterName}/fargate-profiles", @@ -3802,7 +3923,7 @@ "nodeRole": { "target": "com.amazonaws.eks#String", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the IAM role to associate with your node group. The\n Amazon EKS worker node kubelet daemon makes calls to Amazon Web Services APIs on your behalf. Nodes receive permissions for these API calls\n through an IAM instance profile and associated policies. Before you can\n launch nodes and register them into a cluster, you must create an IAM\n role for those nodes to use when they are launched. For more information, see Amazon EKS node IAM role in the\n \n Amazon EKS User Guide\n . If you specify launchTemplate, then don't specify \n \n IamInstanceProfile\n in your launch template, or the node group \n deployment will fail. For more information about using launch templates with Amazon EKS, see Customizing managed nodes with launch templates in the Amazon EKS User Guide.

", + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the IAM role to associate with your node group. The\n Amazon EKS worker node kubelet daemon makes calls to Amazon Web Services\n APIs on your behalf. Nodes receive permissions for these API calls\n through an IAM instance profile and associated policies. Before you can\n launch nodes and register them into a cluster, you must create an IAM\n role for those nodes to use when they are launched. For more information, see Amazon EKS\n node IAM role in the\n \n Amazon EKS User Guide\n . If you specify launchTemplate, then don't specify \n \n IamInstanceProfile\n in your launch template, or the node group \n deployment will fail. For more information about using launch templates with Amazon EKS, see Customizing managed nodes with launch templates in the Amazon EKS User Guide.

", "smithy.api#required": {} } }, @@ -3864,7 +3985,7 @@ "releaseVersion": { "target": "com.amazonaws.eks#String", "traits": { - "smithy.api#documentation": "

The AMI version of the Amazon EKS optimized AMI to use with your node group.\n By default, the latest available AMI version for the node group's current Kubernetes version\n is used. For information about Linux versions, see Amazon EKS optimized Amazon Linux AMI versions in the Amazon EKS User Guide. Amazon EKS managed node groups support the November 2022 and later releases of the\n Windows AMIs. For information about Windows versions, see Amazon EKS optimized Windows AMI versions in the\n Amazon EKS User Guide.

\n

If you specify launchTemplate, and your launch template uses a custom AMI, then don't specify \n releaseVersion, or the node group deployment will fail.\n For more information about using launch templates with Amazon EKS, see Customizing managed nodes with launch templates in the Amazon EKS User Guide.

" + "smithy.api#documentation": "

The AMI version of the Amazon EKS optimized AMI to use with your node group.\n By default, the latest available AMI version for the node group's current Kubernetes version\n is used. For information about Linux versions, see Amazon EKS\n optimized Amazon Linux AMI versions in the Amazon EKS User Guide. Amazon EKS\n managed node groups support the November 2022 and later releases of the\n Windows AMIs. For information about Windows versions, see Amazon EKS optimized Windows AMI versions in the\n Amazon EKS User Guide.

\n

If you specify launchTemplate, and your launch template uses a custom AMI, then don't specify \n releaseVersion, or the node group deployment will fail.\n For more information about using launch templates with Amazon EKS, see Customizing managed nodes with launch templates in the Amazon EKS User Guide.

" } } }, @@ -3944,7 +4065,7 @@ "serviceAccount": { "target": "com.amazonaws.eks#String", "traits": { - "smithy.api#documentation": "

The name of the Kubernetes service account inside the cluster to associate the IAM credentials with.

", + "smithy.api#documentation": "

The name of the Kubernetes service account inside the cluster to associate the IAM\n credentials with.

", "smithy.api#required": {} } }, @@ -4103,7 +4224,7 @@ "target": "com.amazonaws.eks#Boolean", "traits": { "smithy.api#default": false, - "smithy.api#documentation": "

Specifying this option preserves the add-on software on your cluster but Amazon EKS stops managing any settings for the add-on. If an IAM\n account is associated with the add-on, it isn't removed.

", + "smithy.api#documentation": "

Specifying this option preserves the add-on software on your cluster but Amazon EKS\n stops managing any settings for the add-on. If an IAM\n account is associated with the add-on, it isn't removed.

", "smithy.api#httpQuery": "preserve" } } @@ -5148,6 +5269,126 @@ "smithy.api#output": {} } }, + "com.amazonaws.eks#DescribeClusterVersionMaxResults": { + "type": "integer", + "traits": { + "smithy.api#range": { + "min": 1, + "max": 100 + } + } + }, + "com.amazonaws.eks#DescribeClusterVersions": { + "type": "operation", + "input": { + "target": "com.amazonaws.eks#DescribeClusterVersionsRequest" + }, + "output": { + "target": "com.amazonaws.eks#DescribeClusterVersionsResponse" + }, + "errors": [ + { + "target": "com.amazonaws.eks#InvalidParameterException" + }, + { + "target": "com.amazonaws.eks#InvalidRequestException" + }, + { + "target": "com.amazonaws.eks#ServerException" + } + ], + "traits": { + "smithy.api#documentation": "

Lists available Kubernetes versions for Amazon EKS clusters.

", + "smithy.api#http": { + "method": "GET", + "uri": "/cluster-versions", + "code": 200 + }, + "smithy.api#paginated": { + "inputToken": "nextToken", + "outputToken": "nextToken", + "items": "clusterVersions", + "pageSize": "maxResults" + } + } + }, + "com.amazonaws.eks#DescribeClusterVersionsRequest": { + "type": "structure", + "members": { + "clusterType": { + "target": "com.amazonaws.eks#String", + "traits": { + "smithy.api#documentation": "

The type of cluster to filter versions by.

", + "smithy.api#httpQuery": "clusterType" + } + }, + "maxResults": { + "target": "com.amazonaws.eks#DescribeClusterVersionMaxResults", + "traits": { + "smithy.api#documentation": "

Maximum number of results to return.

", + "smithy.api#httpQuery": "maxResults" + } + }, + "nextToken": { + "target": "com.amazonaws.eks#String", + "traits": { + "smithy.api#documentation": "

Pagination token for the next set of results.

", + "smithy.api#httpQuery": "nextToken" + } + }, + "defaultOnly": { + "target": "com.amazonaws.eks#BoxedBoolean", + "traits": { + "smithy.api#documentation": "

Filter to show only default versions.

", + "smithy.api#httpQuery": "defaultOnly" + } + }, + "includeAll": { + "target": "com.amazonaws.eks#BoxedBoolean", + "traits": { + "smithy.api#documentation": "

Include all available versions in the response.

", + "smithy.api#httpQuery": "includeAll" + } + }, + "clusterVersions": { + "target": "com.amazonaws.eks#StringList", + "traits": { + "smithy.api#documentation": "

List of specific cluster versions to describe.

", + "smithy.api#httpQuery": "clusterVersions" + } + }, + "status": { + "target": "com.amazonaws.eks#ClusterVersionStatus", + "traits": { + "smithy.api#documentation": "

Filter versions by their current status.

", + "smithy.api#httpQuery": "status" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.eks#DescribeClusterVersionsResponse": { + "type": "structure", + "members": { + "nextToken": { + "target": "com.amazonaws.eks#String", + "traits": { + "smithy.api#documentation": "

Pagination token for the next set of results.

" + } + }, + "clusterVersions": { + "target": "com.amazonaws.eks#ClusterVersionList", + "traits": { + "smithy.api#documentation": "

List of cluster version information objects.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.eks#DescribeEksAnywhereSubscription": { "type": "operation", "input": { @@ -6083,7 +6324,7 @@ } }, "traits": { - "smithy.api#documentation": "

Indicates the current configuration of the load balancing capability on your EKS Auto Mode cluster. For example, if the capability is enabled or disabled. For more information, see EKS Auto Mode load balancing capability in the EKS User Guide.

" + "smithy.api#documentation": "

Indicates the current configuration of the load balancing capability on your EKS Auto Mode cluster. For example, if the capability is enabled or disabled. For more information, see EKS Auto Mode load balancing capability in the Amazon EKS User Guide.

" } }, "com.amazonaws.eks#EncryptionConfig": { @@ -6629,6 +6870,12 @@ "traits": { "smithy.api#documentation": "

The summary information about deprecated resource usage for an insight check in the\n UPGRADE_READINESS category.

" } + }, + "addonCompatibilityDetails": { + "target": "com.amazonaws.eks#AddonCompatibilityDetails", + "traits": { + "smithy.api#documentation": "

A list of AddonCompatibilityDetail objects for Amazon EKS add-ons.

" + } } }, "traits": { @@ -6925,7 +7172,7 @@ "code": { "target": "com.amazonaws.eks#NodegroupIssueCode", "traits": { - "smithy.api#documentation": "

A brief description of the error.

\n
    \n
  • \n

    \n AccessDenied: Amazon EKS or one or\n more of your managed nodes is failing to authenticate or authorize with your\n Kubernetes cluster API server.

    \n
  • \n
  • \n

    \n AsgInstanceLaunchFailures: Your Auto Scaling group is experiencing failures while attempting to launch\n instances.

    \n
  • \n
  • \n

    \n AutoScalingGroupNotFound: We couldn't find\n the Auto Scaling group associated with the managed node group. You may be\n able to recreate an Auto Scaling group with the same settings to\n recover.

    \n
  • \n
  • \n

    \n ClusterUnreachable: Amazon EKS or one\n or more of your managed nodes is unable to to communicate with your Kubernetes\n cluster API server. This can happen if there are network disruptions or if API\n servers are timing out processing requests.

    \n
  • \n
  • \n

    \n Ec2InstanceTypeDoesNotExist: One or more of\n the supplied Amazon EC2 instance types do not exist. Amazon EKS checked for the\n instance types that you provided in this Amazon Web Services Region, and one or more aren't\n available.

    \n
  • \n
  • \n

    \n Ec2LaunchTemplateNotFound: We couldn't find\n the Amazon EC2 launch template for your managed node group. You may be\n able to recreate a launch template with the same settings to recover.

    \n
  • \n
  • \n

    \n Ec2LaunchTemplateVersionMismatch: The Amazon EC2 launch template version for your managed node group does not\n match the version that Amazon EKS created. You may be able to revert to\n the version that Amazon EKS created to recover.

    \n
  • \n
  • \n

    \n Ec2SecurityGroupDeletionFailure: We could not\n delete the remote access security group for your managed node group. Remove any\n dependencies from the security group.

    \n
  • \n
  • \n

    \n Ec2SecurityGroupNotFound: We couldn't find\n the cluster security group for the cluster. You must recreate your\n cluster.

    \n
  • \n
  • \n

    \n Ec2SubnetInvalidConfiguration: One or more\n Amazon EC2 subnets specified for a node group do not automatically\n assign public IP addresses to instances launched into it. If you want your\n instances to be assigned a public IP address, then you need to enable the\n auto-assign public IP address setting for the subnet. See\n Modifying\n the public IPv4 addressing attribute for your subnet in\n the Amazon VPC User Guide.

    \n
  • \n
  • \n

    \n IamInstanceProfileNotFound: We couldn't find\n the IAM instance profile for your managed node group. You may be\n able to recreate an instance profile with the same settings to recover.

    \n
  • \n
  • \n

    \n IamNodeRoleNotFound: We couldn't find the\n IAM role for your managed node group. You may be able to\n recreate an IAM role with the same settings to recover.

    \n
  • \n
  • \n

    \n InstanceLimitExceeded: Your Amazon Web Services account is unable to launch any more instances of the specified instance\n type. You may be able to request an Amazon EC2 instance limit increase\n to recover.

    \n
  • \n
  • \n

    \n InsufficientFreeAddresses: One or more of the\n subnets associated with your managed node group does not have enough available\n IP addresses for new nodes.

    \n
  • \n
  • \n

    \n InternalFailure: These errors are usually\n caused by an Amazon EKS server-side issue.

    \n
  • \n
  • \n

    \n NodeCreationFailure: Your launched instances\n are unable to register with your Amazon EKS cluster. Common causes of\n this failure are insufficient node IAM\n role permissions or lack of outbound internet access for the nodes.\n

    \n
  • \n
" + "smithy.api#documentation": "

A brief description of the error.

\n
    \n
  • \n

    \n AccessDenied: Amazon EKS or one or\n more of your managed nodes is failing to authenticate or authorize with your\n Kubernetes cluster API server.

    \n
  • \n
  • \n

    \n AsgInstanceLaunchFailures: Your Auto Scaling\n group is experiencing failures while attempting to launch\n instances.

    \n
  • \n
  • \n

    \n AutoScalingGroupNotFound: We couldn't find\n the Auto Scaling group associated with the managed node group. You may be\n able to recreate an Auto Scaling group with the same settings to\n recover.

    \n
  • \n
  • \n

    \n ClusterUnreachable: Amazon EKS or one\n or more of your managed nodes is unable to to communicate with your Kubernetes\n cluster API server. This can happen if there are network disruptions or if API\n servers are timing out processing requests.

    \n
  • \n
  • \n

    \n Ec2InstanceTypeDoesNotExist: One or more of\n the supplied Amazon EC2 instance types do not exist. Amazon EKS checked for the\n instance types that you provided in this Amazon Web Services Region, and one or more aren't\n available.

    \n
  • \n
  • \n

    \n Ec2LaunchTemplateNotFound: We couldn't find\n the Amazon EC2 launch template for your managed node group. You may be\n able to recreate a launch template with the same settings to recover.

    \n
  • \n
  • \n

    \n Ec2LaunchTemplateVersionMismatch: The Amazon EC2\n launch template version for your managed node group does not\n match the version that Amazon EKS created. You may be able to revert to\n the version that Amazon EKS created to recover.

    \n
  • \n
  • \n

    \n Ec2SecurityGroupDeletionFailure: We could not\n delete the remote access security group for your managed node group. Remove any\n dependencies from the security group.

    \n
  • \n
  • \n

    \n Ec2SecurityGroupNotFound: We couldn't find\n the cluster security group for the cluster. You must recreate your\n cluster.

    \n
  • \n
  • \n

    \n Ec2SubnetInvalidConfiguration: One or more\n Amazon EC2 subnets specified for a node group do not automatically\n assign public IP addresses to instances launched into it. If you want your\n instances to be assigned a public IP address, then you need to enable the\n auto-assign public IP address setting for the subnet. See\n Modifying\n the public IPv4 addressing attribute for your subnet in\n the Amazon VPC User Guide.

    \n
  • \n
  • \n

    \n IamInstanceProfileNotFound: We couldn't find\n the IAM instance profile for your managed node group. You may be\n able to recreate an instance profile with the same settings to recover.

    \n
  • \n
  • \n

    \n IamNodeRoleNotFound: We couldn't find the\n IAM role for your managed node group. You may be able to\n recreate an IAM role with the same settings to recover.

    \n
  • \n
  • \n

    \n InstanceLimitExceeded: Your Amazon Web Services \n account is unable to launch any more instances of the specified instance\n type. You may be able to request an Amazon EC2 instance limit increase\n to recover.

    \n
  • \n
  • \n

    \n InsufficientFreeAddresses: One or more of the\n subnets associated with your managed node group does not have enough available\n IP addresses for new nodes.

    \n
  • \n
  • \n

    \n InternalFailure: These errors are usually\n caused by an Amazon EKS server-side issue.

    \n
  • \n
  • \n

    \n NodeCreationFailure: Your launched instances\n are unable to register with your Amazon EKS cluster. Common causes of\n this failure are insufficient node IAM\n role permissions or lack of outbound internet access for the nodes.\n

    \n
  • \n
" } }, "message": { @@ -6963,13 +7210,13 @@ "ipFamily": { "target": "com.amazonaws.eks#IpFamily", "traits": { - "smithy.api#documentation": "

Specify which IP family is used to assign Kubernetes pod and service IP addresses. If you\n don't specify a value, ipv4 is used by default. You can only specify an IP\n family when you create a cluster and can't change this value once the cluster is\n created. If you specify ipv6, the VPC and subnets that you specify for\n cluster creation must have both IPv4 and IPv6 CIDR blocks\n assigned to them. You can't specify ipv6 for clusters in China\n Regions.

\n

You can only specify ipv6 for 1.21 and later clusters that\n use version 1.10.1 or later of the Amazon VPC CNI add-on. If you specify\n ipv6, then ensure that your VPC meets the requirements listed in the\n considerations listed in Assigning IPv6 addresses to pods and\n services in the Amazon EKS User Guide. Kubernetes assigns services\n IPv6 addresses from the unique local address range\n (fc00::/7). You can't specify a custom IPv6 CIDR block.\n Pod addresses are assigned from the subnet's IPv6 CIDR.

" + "smithy.api#documentation": "

Specify which IP family is used to assign Kubernetes pod and service IP addresses. If you\n don't specify a value, ipv4 is used by default. You can only specify an IP\n family when you create a cluster and can't change this value once the cluster is\n created. If you specify ipv6, the VPC and subnets that you specify for\n cluster creation must have both IPv4 and IPv6 CIDR blocks\n assigned to them. You can't specify ipv6 for clusters in China\n Regions.

\n

You can only specify ipv6 for 1.21 and later clusters that\n use version 1.10.1 or later of the Amazon VPC CNI add-on. If you specify\n ipv6, then ensure that your VPC meets the requirements listed in the\n considerations listed in Assigning IPv6 addresses to pods and\n services in the Amazon EKS User Guide. Kubernetes assigns services\n IPv6 addresses from the unique local address range\n (fc00::/7). You can't specify a custom IPv6 CIDR block.\n Pod addresses are assigned from the subnet's IPv6 CIDR.

" } }, "elasticLoadBalancing": { "target": "com.amazonaws.eks#ElasticLoadBalancing", "traits": { - "smithy.api#documentation": "

Request to enable or disable the load balancing capability on your EKS Auto Mode cluster. For more information, see EKS Auto Mode load balancing capability in the EKS User Guide.

" + "smithy.api#documentation": "

Request to enable or disable the load balancing capability on your EKS Auto Mode cluster. For more information, see EKS Auto Mode load balancing capability in the Amazon EKS User Guide.

" } } }, @@ -7522,7 +7769,7 @@ "clusters": { "target": "com.amazonaws.eks#StringList", "traits": { - "smithy.api#documentation": "

A list of all of the clusters for your account in the specified Amazon Web Services Region.

" + "smithy.api#documentation": "

A list of all of the clusters for your account in the specified Amazon Web Services Region\n .

" } }, "nextToken": { @@ -7943,7 +8190,7 @@ } ], "traits": { - "smithy.api#documentation": "

Lists the managed node groups associated with the specified cluster in your Amazon Web Services account in the specified Amazon Web Services Region. Self-managed node\n groups aren't listed.

", + "smithy.api#documentation": "

Lists the managed node groups associated with the specified cluster in your Amazon Web Services\n account in the specified Amazon Web Services Region. Self-managed node\n groups aren't listed.

", "smithy.api#http": { "method": "GET", "uri": "/clusters/{clusterName}/node-groups", @@ -8218,7 +8465,7 @@ } ], "traits": { - "smithy.api#documentation": "

Lists the updates associated with an Amazon EKS resource in your Amazon Web Services account, in the specified Amazon Web Services Region.

", + "smithy.api#documentation": "

Lists the updates associated with an Amazon EKS resource in your Amazon Web Services\n account, in the specified Amazon Web Services Region.

", "smithy.api#http": { "method": "GET", "uri": "/clusters/{name}/updates", @@ -8317,7 +8564,7 @@ "enabled": { "target": "com.amazonaws.eks#BoxedBoolean", "traits": { - "smithy.api#documentation": "

If a log type is enabled, that log type exports its control plane logs to CloudWatch Logs. If a log type isn't enabled, that log type doesn't export its control\n plane logs. Each individual log type can be enabled or disabled independently.

" + "smithy.api#documentation": "

If a log type is enabled, that log type exports its control plane logs to CloudWatch Logs\n . If a log type isn't enabled, that log type doesn't export its control\n plane logs. Each individual log type can be enabled or disabled independently.

" } } }, @@ -8839,13 +9086,13 @@ "maxSize": { "target": "com.amazonaws.eks#Capacity", "traits": { - "smithy.api#documentation": "

The maximum number of nodes that the managed node group can scale out to. For\n information about the maximum number that you can specify, see Amazon EKS service quotas in the Amazon EKS User Guide.

" + "smithy.api#documentation": "

The maximum number of nodes that the managed node group can scale out to. For\n information about the maximum number that you can specify, see Amazon EKS service \n quotas in the Amazon EKS User Guide.

" } }, "desiredSize": { "target": "com.amazonaws.eks#ZeroCapacity", "traits": { - "smithy.api#documentation": "

The current number of nodes that the managed node group should maintain.

\n \n

If you use the Kubernetes Cluster\n Autoscaler, you shouldn't change the desiredSize value\n directly, as this can cause the Cluster Autoscaler to suddenly scale up or scale\n down.

\n
\n

Whenever this parameter changes, the number of worker nodes in the node group is\n updated to the specified size. If this parameter is given a value that is smaller than\n the current number of running worker nodes, the necessary number of worker nodes are\n terminated to match the given value.\n \n When using CloudFormation, no action occurs if you remove this parameter from your CFN\n template.

\n

This parameter can be different from minSize in some cases, such as when\n starting with extra hosts for testing. This parameter can also be different when you\n want to start with an estimated number of needed hosts, but let the Cluster Autoscaler\n reduce the number if there are too many. When the Cluster Autoscaler is used, the\n desiredSize parameter is altered by the Cluster Autoscaler (but can be\n out-of-date for short periods of time). the Cluster Autoscaler doesn't scale a managed\n node group lower than minSize or higher than maxSize.

" + "smithy.api#documentation": "

The current number of nodes that the managed node group should maintain.

\n \n

If you use the Kubernetes Cluster\n Autoscaler, you shouldn't change the desiredSize value\n directly, as this can cause the Cluster Autoscaler to suddenly scale up or scale\n down.

\n
\n

Whenever this parameter changes, the number of worker nodes in the node group is\n updated to the specified size. If this parameter is given a value that is smaller than\n the current number of running worker nodes, the necessary number of worker nodes are\n terminated to match the given value.\n \n When using CloudFormation, no action occurs if you remove this parameter from your CFN\n template.

\n

This parameter can be different from minSize in some cases, such as when\n starting with extra hosts for testing. This parameter can also be different when you\n want to start with an estimated number of needed hosts, but let the Cluster Autoscaler\n reduce the number if there are too many. When the Cluster Autoscaler is used, the \n desiredSize parameter is altered by the Cluster Autoscaler (but can be\n out-of-date for short periods of time). the Cluster Autoscaler doesn't scale a managed\n node group lower than minSize or higher than maxSize.

" } } }, @@ -8914,10 +9161,33 @@ "traits": { "smithy.api#documentation": "

The maximum percentage of nodes unavailable during a version update. This percentage\n of nodes are updated in parallel, up to 100 nodes at once. This value or\n maxUnavailable is required to have a value.

" } + }, + "updateStrategy": { + "target": "com.amazonaws.eks#NodegroupUpdateStrategies", + "traits": { + "smithy.api#documentation": "

The configuration for the behavior to follow during a node group version update of this managed\n node group. You choose between two possible strategies for replacing nodes during an\n UpdateNodegroupVersion action.

\n

An Amazon EKS managed node group updates by replacing nodes with new nodes of newer AMI\n versions in parallel. The update strategy changes the managed node\n update behavior of the managed node group for each quantity. The\n default strategy has guardrails to protect you from\n misconfiguration and launches the new instances first, before terminating the old\n instances. The minimal strategy removes the guardrails and\n terminates the old instances before launching the new instances. This minimal\n strategy is useful in scenarios where you are constrained to resources or costs (for\n example, with hardware accelerators such as GPUs).

" + } } }, "traits": { - "smithy.api#documentation": "

The node group update configuration.

" + "smithy.api#documentation": "

The node group update configuration. An Amazon EKS managed node group updates by replacing nodes with new\n nodes of newer AMI versions in parallel. You choose the maximum\n unavailable and the update strategy.

" + } + }, + "com.amazonaws.eks#NodegroupUpdateStrategies": { + "type": "enum", + "members": { + "DEFAULT": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DEFAULT" + } + }, + "MINIMAL": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "MINIMAL" + } + } } }, "com.amazonaws.eks#NonZeroInteger": { @@ -9110,7 +9380,7 @@ "controlPlaneInstanceType": { "target": "com.amazonaws.eks#String", "traits": { - "smithy.api#documentation": "

The Amazon EC2 instance type that you want to use for your local Amazon EKS cluster on Outposts. Choose an instance type based on the number of nodes\n that your cluster will have. For more information, see Capacity\n considerations in the Amazon EKS User Guide.

\n

The instance type that you specify is used for all Kubernetes control plane instances. The\n instance type can't be changed after cluster creation. The control plane is not\n automatically scaled by Amazon EKS.

\n

", + "smithy.api#documentation": "

The Amazon EC2 instance type that you want to use for your local Amazon EKS\n cluster on Outposts. Choose an instance type based on the number of nodes\n that your cluster will have. For more information, see Capacity\n considerations in the Amazon EKS User Guide.

\n

The instance type that you specify is used for all Kubernetes control plane instances. The\n instance type can't be changed after cluster creation. The control plane is not\n automatically scaled by Amazon EKS.

\n

", "smithy.api#required": {} } }, @@ -9180,7 +9450,7 @@ "serviceAccount": { "target": "com.amazonaws.eks#String", "traits": { - "smithy.api#documentation": "

The name of the Kubernetes service account inside the cluster to associate the IAM credentials with.

" + "smithy.api#documentation": "

The name of the Kubernetes service account inside the cluster to associate the IAM\n credentials with.

" } }, "roleArn": { @@ -9350,7 +9620,7 @@ "connectorConfig": { "target": "com.amazonaws.eks#ConnectorConfigRequest", "traits": { - "smithy.api#documentation": "

The configuration settings required to connect the Kubernetes cluster to the Amazon EKS control plane.

", + "smithy.api#documentation": "

The configuration settings required to connect the Kubernetes cluster to the Amazon EKS\n control plane.

", "smithy.api#required": {} } }, @@ -9389,7 +9659,7 @@ "ec2SshKey": { "target": "com.amazonaws.eks#String", "traits": { - "smithy.api#documentation": "

The Amazon EC2 SSH key name that provides access for SSH communication with\n the nodes in the managed node group. For more information, see Amazon EC2 key pairs and Linux instances in the Amazon Elastic Compute Cloud User Guide for Linux Instances. For\n Windows, an Amazon EC2 SSH key is used to obtain the RDP password. For more\n information, see Amazon EC2 key pairs and Windows instances in\n the Amazon Elastic Compute Cloud User Guide for Windows Instances.

" + "smithy.api#documentation": "

The Amazon EC2 SSH key name that provides access for SSH communication with\n the nodes in the managed node group. For more information, see Amazon EC2\n key pairs and Linux instances in the Amazon Elastic Compute Cloud User Guide for Linux Instances. For\n Windows, an Amazon EC2 SSH key is used to obtain the RDP password. For more\n information, see Amazon EC2 key pairs and Windows instances in\n the Amazon Elastic Compute Cloud User Guide for Windows Instances.

" } }, "sourceSecurityGroups": { @@ -9627,7 +9897,7 @@ } }, "traits": { - "smithy.api#documentation": "

The specified resource could not be found. You can view your available clusters with\n ListClusters. You can view your available managed node groups with\n ListNodegroups. Amazon EKS clusters and node groups are Amazon Web Services Region specific.

", + "smithy.api#documentation": "

The specified resource could not be found. You can view your available clusters with\n ListClusters. You can view your available managed node groups with\n ListNodegroups. Amazon EKS clusters and node groups are Amazon Web Services Region\n specific.

", "smithy.api#error": "client", "smithy.api#httpError": 404 } @@ -9724,7 +9994,7 @@ } }, "traits": { - "smithy.api#documentation": "

Request to update the configuration of the storage capability of your EKS Auto Mode cluster. For example, enable the capability. For more information, see EKS Auto Mode block storage capability in the EKS User Guide.

" + "smithy.api#documentation": "

Request to update the configuration of the storage capability of your EKS Auto Mode cluster. For example, enable the capability. For more information, see EKS Auto Mode block storage capability in the Amazon EKS User Guide.

" } }, "com.amazonaws.eks#StorageConfigResponse": { @@ -10248,7 +10518,7 @@ "podIdentityAssociations": { "target": "com.amazonaws.eks#AddonPodIdentityAssociationsList", "traits": { - "smithy.api#documentation": "

An array of Pod Identity Assocations to be updated. Each EKS Pod Identity association maps a Kubernetes service account to an IAM Role. If this value is left blank, no change. If an empty array is provided, existing Pod Identity Assocations owned by the Addon are deleted.

\n

For more information, see Attach an IAM Role to an Amazon EKS add-on using Pod Identity in the EKS User Guide.

" + "smithy.api#documentation": "

An array of Pod Identity Assocations to be updated. Each EKS Pod Identity association maps a Kubernetes service account to an IAM Role. If this value is left blank, no change. If an empty array is provided, existing Pod Identity Assocations owned by the Addon are deleted.

\n

For more information, see Attach an IAM Role to an Amazon EKS add-on using Pod Identity in the Amazon EKS User Guide.

" } } }, @@ -10296,7 +10566,7 @@ } ], "traits": { - "smithy.api#documentation": "

Updates an Amazon EKS cluster configuration. Your cluster continues to\n function during the update. The response output includes an update ID that you can use\n to track the status of your cluster update with DescribeUpdate\"/>.

\n

You can use this API operation to enable or disable exporting the Kubernetes control plane\n logs for your cluster to CloudWatch Logs. By default, cluster control plane logs\n aren't exported to CloudWatch Logs. For more information, see Amazon EKS Cluster control plane logs in the\n \n Amazon EKS User Guide\n .

\n \n

CloudWatch Logs ingestion, archive storage, and data scanning rates apply to\n exported control plane logs. For more information, see CloudWatch\n Pricing.

\n
\n

You can also use this API operation to enable or disable public and private access to\n your cluster's Kubernetes API server endpoint. By default, public access is enabled, and\n private access is disabled. For more information, see Amazon EKS cluster endpoint access control in the\n \n Amazon EKS User Guide\n .

\n

You can also use this API operation to choose different subnets and security groups\n for the cluster. You must specify at least two subnets that are in different\n Availability Zones. You can't change which VPC the subnets are from, the subnets must be\n in the same VPC as the subnets that the cluster was created with. For more information\n about the VPC requirements, see https://docs.aws.amazon.com/eks/latest/userguide/network_reqs.html in the \n Amazon EKS User Guide\n .

\n

You can also use this API operation to enable or disable ARC zonal shift. If zonal shift is enabled, Amazon Web Services\n configures zonal autoshift for the cluster.

\n

Cluster updates are asynchronous, and they should finish within a few minutes. During\n an update, the cluster status moves to UPDATING (this status transition is\n eventually consistent). When the update is complete (either Failed or\n Successful), the cluster status moves to Active.

", + "smithy.api#documentation": "

Updates an Amazon EKS cluster configuration. Your cluster continues to\n function during the update. The response output includes an update ID that you can use\n to track the status of your cluster update with DescribeUpdate\"/>.

\n

You can use this API operation to enable or disable exporting the Kubernetes control plane\n logs for your cluster to CloudWatch Logs. By default, cluster control plane logs\n aren't exported to CloudWatch Logs. For more information, see Amazon EKS\n Cluster control plane logs in the\n \n Amazon EKS User Guide\n .

\n \n

CloudWatch Logs ingestion, archive storage, and data scanning rates apply to\n exported control plane logs. For more information, see CloudWatch\n Pricing.

\n
\n

You can also use this API operation to enable or disable public and private access to\n your cluster's Kubernetes API server endpoint. By default, public access is enabled, and\n private access is disabled. For more information, see Amazon EKS\n cluster endpoint access control in the\n \n Amazon EKS User Guide\n .

\n

You can also use this API operation to choose different subnets and security groups\n for the cluster. You must specify at least two subnets that are in different\n Availability Zones. You can't change which VPC the subnets are from, the subnets must be\n in the same VPC as the subnets that the cluster was created with. For more information\n about the VPC requirements, see https://docs.aws.amazon.com/eks/latest/userguide/network_reqs.html in the \n Amazon EKS User Guide\n .

\n

You can also use this API operation to enable or disable ARC zonal shift. If zonal shift is enabled, Amazon Web Services\n configures zonal autoshift for the cluster.

\n

Cluster updates are asynchronous, and they should finish within a few minutes. During\n an update, the cluster status moves to UPDATING (this status transition is\n eventually consistent). When the update is complete (either Failed or\n Successful), the cluster status moves to Active.

", "smithy.api#http": { "method": "POST", "uri": "/clusters/{name}/update-config", @@ -10321,7 +10591,7 @@ "logging": { "target": "com.amazonaws.eks#Logging", "traits": { - "smithy.api#documentation": "

Enable or disable exporting the Kubernetes control plane logs for your cluster to CloudWatch Logs. By default, cluster control plane logs aren't exported to CloudWatch Logs. For more information, see Amazon EKS cluster control plane logs in the\n \n Amazon EKS User Guide\n .

\n \n

CloudWatch Logs ingestion, archive storage, and data scanning rates apply to\n exported control plane logs. For more information, see CloudWatch\n Pricing.

\n
" + "smithy.api#documentation": "

Enable or disable exporting the Kubernetes control plane logs for your cluster to CloudWatch Logs\n . By default, cluster control plane logs aren't exported to CloudWatch Logs\n . For more information, see Amazon EKS\n cluster control plane logs in the\n \n Amazon EKS User Guide\n .

\n \n

CloudWatch Logs ingestion, archive storage, and data scanning rates apply to\n exported control plane logs. For more information, see CloudWatch\n Pricing.

\n
" } }, "clientRequestToken": { @@ -10409,7 +10679,7 @@ } ], "traits": { - "smithy.api#documentation": "

Updates an Amazon EKS cluster to the specified Kubernetes version. Your cluster\n continues to function during the update. The response output includes an update ID that\n you can use to track the status of your cluster update with the DescribeUpdate API operation.

\n

Cluster updates are asynchronous, and they should finish within a few minutes. During\n an update, the cluster status moves to UPDATING (this status transition is\n eventually consistent). When the update is complete (either Failed or\n Successful), the cluster status moves to Active.

\n

If your cluster has managed node groups attached to it, all of your node groups’ Kubernetes\n versions must match the cluster’s Kubernetes version in order to update the cluster to a new\n Kubernetes version.

", + "smithy.api#documentation": "

Updates an Amazon EKS cluster to the specified Kubernetes version. Your cluster\n continues to function during the update. The response output includes an update ID that\n you can use to track the status of your cluster update with the DescribeUpdate API operation.

\n

Cluster updates are asynchronous, and they should finish within a few minutes. During\n an update, the cluster status moves to UPDATING (this status transition is\n eventually consistent). When the update is complete (either Failed or\n Successful), the cluster status moves to Active.

\n

If your cluster has managed node groups attached to it, all of your node groups' Kubernetes\n versions must match the cluster's Kubernetes version in order to update the cluster to a new\n Kubernetes version.

", "smithy.api#http": { "method": "POST", "uri": "/clusters/{name}/updates", @@ -10589,7 +10859,7 @@ } ], "traits": { - "smithy.api#documentation": "

Updates an Amazon EKS managed node group configuration. Your node group\n continues to function during the update. The response output includes an update ID that\n you can use to track the status of your node group update with the DescribeUpdate API operation. Currently you can update the Kubernetes labels\n for a node group or the scaling configuration.

", + "smithy.api#documentation": "

Updates an Amazon EKS managed node group configuration. Your node group\n continues to function during the update. The response output includes an update ID that\n you can use to track the status of your node group update with the DescribeUpdate API operation. You can update the Kubernetes labels and taints\n for a node group and the scaling and version update configuration.

", "smithy.api#http": { "method": "POST", "uri": "/clusters/{clusterName}/node-groups/{nodegroupName}/update-config", @@ -10734,7 +11004,7 @@ "releaseVersion": { "target": "com.amazonaws.eks#String", "traits": { - "smithy.api#documentation": "

The AMI version of the Amazon EKS optimized AMI to use for the update. By\n default, the latest available AMI version for the node group's Kubernetes version is used.\n For information about Linux versions, see Amazon EKS optimized Amazon Linux AMI versions in the Amazon EKS User Guide. Amazon EKS managed node groups support the November 2022 and later releases of the\n Windows AMIs. For information about Windows versions, see Amazon EKS optimized Windows AMI versions in the\n Amazon EKS User Guide.

\n

If you specify launchTemplate, and your launch template uses a custom AMI, then don't specify \n releaseVersion, or the node group update will fail.\n For more information about using launch templates with Amazon EKS, see Customizing managed nodes with launch templates in the Amazon EKS User Guide.

" + "smithy.api#documentation": "

The AMI version of the Amazon EKS optimized AMI to use for the update. By\n default, the latest available AMI version for the node group's Kubernetes version is used.\n For information about Linux versions, see Amazon EKS\n optimized Amazon Linux AMI versions in the Amazon EKS User Guide. Amazon EKS\n managed node groups support the November 2022 and later releases of the\n Windows AMIs. For information about Windows versions, see Amazon EKS optimized Windows AMI versions in the\n Amazon EKS User Guide.

\n

If you specify launchTemplate, and your launch template uses a custom AMI, then don't specify \n releaseVersion, or the node group update will fail.\n For more information about using launch templates with Amazon EKS, see Customizing managed nodes with launch templates in the Amazon EKS User Guide.

" } }, "launchTemplate": { @@ -10940,6 +11210,12 @@ "smithy.api#enumValue": "NodeRepairEnabled" } }, + "UPDATE_STRATEGY": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "UpdateStrategy" + } + }, "CONFIGURATION_VALUES": { "target": "smithy.api#Unit", "traits": { @@ -11228,12 +11504,12 @@ "supportType": { "target": "com.amazonaws.eks#SupportType", "traits": { - "smithy.api#documentation": "

If the cluster is set to EXTENDED, it will enter extended support at the end of standard support. If the cluster is set to STANDARD, it will be automatically upgraded at the end of standard support.

\n

\n Learn more about EKS Extended Support in the EKS User Guide.\n

" + "smithy.api#documentation": "

If the cluster is set to EXTENDED, it will enter extended support at the end of standard support. If the cluster is set to STANDARD, it will be automatically upgraded at the end of standard support.

\n

\n Learn more about EKS Extended Support in the Amazon EKS User Guide.\n

" } } }, "traits": { - "smithy.api#documentation": "

The support policy to use for the cluster. Extended support allows you to remain on specific Kubernetes versions for longer. Clusters in extended support have higher costs. The default value is EXTENDED. Use STANDARD to disable extended support.

\n

\n Learn more about EKS Extended Support in the EKS User Guide.\n

" + "smithy.api#documentation": "

The support policy to use for the cluster. Extended support allows you to remain on specific Kubernetes versions for longer. Clusters in extended support have higher costs. The default value is EXTENDED. Use STANDARD to disable extended support.

\n

\n Learn more about EKS Extended Support in the Amazon EKS User Guide.\n

" } }, "com.amazonaws.eks#UpgradePolicyResponse": { @@ -11242,12 +11518,12 @@ "supportType": { "target": "com.amazonaws.eks#SupportType", "traits": { - "smithy.api#documentation": "

If the cluster is set to EXTENDED, it will enter extended support at the end of standard support. If the cluster is set to STANDARD, it will be automatically upgraded at the end of standard support.

\n

\n Learn more about EKS Extended Support in the EKS User Guide.\n

" + "smithy.api#documentation": "

If the cluster is set to EXTENDED, it will enter extended support at the end of standard support. If the cluster is set to STANDARD, it will be automatically upgraded at the end of standard support.

\n

\n Learn more about EKS Extended Support in the Amazon EKS User Guide.\n

" } } }, "traits": { - "smithy.api#documentation": "

This value indicates if extended support is enabled or disabled for the cluster.

\n

\n Learn more about EKS Extended Support in the EKS User Guide.\n

" + "smithy.api#documentation": "

This value indicates if extended support is enabled or disabled for the cluster.

\n

\n Learn more about EKS Extended Support in the Amazon EKS User Guide.\n

" } }, "com.amazonaws.eks#VpcConfigRequest": { @@ -11280,7 +11556,7 @@ "publicAccessCidrs": { "target": "com.amazonaws.eks#StringList", "traits": { - "smithy.api#documentation": "

The CIDR blocks that are allowed access to your cluster's public Kubernetes API server\n endpoint. Communication to the endpoint from addresses outside of the CIDR blocks that\n you specify is denied. The default value is 0.0.0.0/0. If you've disabled\n private endpoint access, make sure that you specify the necessary CIDR blocks for every\n node and Fargate\n Pod in the cluster. For more information, see Amazon EKS cluster endpoint access control in the\n \n Amazon EKS User Guide\n .

" + "smithy.api#documentation": "

The CIDR blocks that are allowed access to your cluster's public Kubernetes API server\n endpoint. Communication to the endpoint from addresses outside of the CIDR blocks that\n you specify is denied. The default value is 0.0.0.0/0. If you've disabled\n private endpoint access, make sure that you specify the necessary CIDR blocks for every\n node and Fargate\n Pod in the cluster. For more information, see Amazon EKS cluster \n endpoint access control in the \n Amazon EKS User Guide\n .

" } } }, @@ -11326,7 +11602,7 @@ "target": "com.amazonaws.eks#Boolean", "traits": { "smithy.api#default": false, - "smithy.api#documentation": "

This parameter indicates whether the Amazon EKS private API server endpoint is\n enabled. If the Amazon EKS private API server endpoint is enabled, Kubernetes API\n requests that originate from within your cluster's VPC use the private VPC endpoint\n instead of traversing the internet. If this value is disabled and you have nodes or\n Fargate pods in the cluster, then ensure that\n publicAccessCidrs includes the necessary CIDR blocks for communication\n with the nodes or Fargate pods. For more information, see Amazon EKS cluster endpoint access control in the\n \n Amazon EKS User Guide\n .

" + "smithy.api#documentation": "

This parameter indicates whether the Amazon EKS private API server endpoint is\n enabled. If the Amazon EKS private API server endpoint is enabled, Kubernetes API\n requests that originate from within your cluster's VPC use the private VPC endpoint\n instead of traversing the internet. If this value is disabled and you have nodes or\n Fargate pods in the cluster, then ensure that\n publicAccessCidrs includes the necessary CIDR blocks for communication\n with the nodes or Fargate pods. For more information, see Amazon EKS\n cluster endpoint access control in the\n \n Amazon EKS User Guide\n .

" } }, "publicAccessCidrs": { diff --git a/models/emr-serverless.json b/models/emr-serverless.json index 14a6a9b411..45371c628f 100644 --- a/models/emr-serverless.json +++ b/models/emr-serverless.json @@ -1651,7 +1651,7 @@ "traits": { "smithy.api#length": { "min": 1, - "max": 256 + "max": 4096 }, "smithy.api#pattern": ".*\\S.*", "smithy.api#sensitive": {} diff --git a/models/endpoints/endpoints.json b/models/endpoints/endpoints.json index 1c7d34cccf..ff6c6a3856 100644 --- a/models/endpoints/endpoints.json +++ b/models/endpoints/endpoints.json @@ -59,6 +59,9 @@ "ap-southeast-5" : { "description" : "Asia Pacific (Malaysia)" }, + "ap-southeast-7" : { + "description" : "Asia Pacific (Thailand)" + }, "ca-central-1" : { "description" : "Canada (Central)" }, @@ -98,6 +101,9 @@ "me-south-1" : { "description" : "Middle East (Bahrain)" }, + "mx-central-1" : { + "description" : "Mexico (Central)" + }, "sa-east-1" : { "description" : "South America (Sao Paulo)" }, @@ -129,6 +135,7 @@ "ap-southeast-3" : { }, "ap-southeast-4" : { }, "ap-southeast-5" : { }, + "ap-southeast-7" : { }, "ca-central-1" : { "variants" : [ { "hostname" : "access-analyzer-fips.ca-central-1.amazonaws.com", @@ -194,6 +201,7 @@ "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, + "mx-central-1" : { }, "sa-east-1" : { }, "us-east-1" : { "variants" : [ { @@ -247,6 +255,7 @@ "ap-southeast-3" : { }, "ap-southeast-4" : { }, "ap-southeast-5" : { }, + "ap-southeast-7" : { }, "ca-central-1" : { "variants" : [ { "hostname" : "acm-fips.ca-central-1.amazonaws.com", @@ -284,6 +293,7 @@ "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, + "mx-central-1" : { }, "sa-east-1" : { }, "us-east-1" : { "variants" : [ { @@ -558,6 +568,7 @@ }, "aoss" : { "endpoints" : { + "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, "ap-south-1" : { }, @@ -566,6 +577,7 @@ "ca-central-1" : { }, "eu-central-1" : { }, "eu-central-2" : { }, + "eu-north-1" : { }, "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, @@ -675,85 +687,151 @@ "credentialScope" : { "region" : "af-south-1" }, - "hostname" : "api.ecr.af-south-1.amazonaws.com" + "hostname" : "api.ecr.af-south-1.amazonaws.com", + "variants" : [ { + "hostname" : "ecr.af-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] }, "ap-east-1" : { "credentialScope" : { "region" : "ap-east-1" }, - "hostname" : "api.ecr.ap-east-1.amazonaws.com" + "hostname" : "api.ecr.ap-east-1.amazonaws.com", + "variants" : [ { + "hostname" : "ecr.ap-east-1.api.aws", + "tags" : [ "dualstack" ] + } ] }, "ap-northeast-1" : { "credentialScope" : { "region" : "ap-northeast-1" }, - "hostname" : "api.ecr.ap-northeast-1.amazonaws.com" + "hostname" : "api.ecr.ap-northeast-1.amazonaws.com", + "variants" : [ { + "hostname" : "ecr.ap-northeast-1.api.aws", + "tags" : [ "dualstack" ] + } ] }, "ap-northeast-2" : { "credentialScope" : { "region" : "ap-northeast-2" }, - "hostname" : "api.ecr.ap-northeast-2.amazonaws.com" + "hostname" : "api.ecr.ap-northeast-2.amazonaws.com", + "variants" : [ { + "hostname" : "ecr.ap-northeast-2.api.aws", + "tags" : [ "dualstack" ] + } ] }, "ap-northeast-3" : { "credentialScope" : { "region" : "ap-northeast-3" }, - "hostname" : "api.ecr.ap-northeast-3.amazonaws.com" + "hostname" : "api.ecr.ap-northeast-3.amazonaws.com", + "variants" : [ { + "hostname" : "ecr.ap-northeast-3.api.aws", + "tags" : [ "dualstack" ] + } ] }, "ap-south-1" : { "credentialScope" : { "region" : "ap-south-1" }, - "hostname" : "api.ecr.ap-south-1.amazonaws.com" + "hostname" : "api.ecr.ap-south-1.amazonaws.com", + "variants" : [ { + "hostname" : "ecr.ap-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] }, "ap-south-2" : { "credentialScope" : { "region" : "ap-south-2" }, - "hostname" : "api.ecr.ap-south-2.amazonaws.com" + "hostname" : "api.ecr.ap-south-2.amazonaws.com", + "variants" : [ { + "hostname" : "ecr.ap-south-2.api.aws", + "tags" : [ "dualstack" ] + } ] }, "ap-southeast-1" : { "credentialScope" : { "region" : "ap-southeast-1" }, - "hostname" : "api.ecr.ap-southeast-1.amazonaws.com" + "hostname" : "api.ecr.ap-southeast-1.amazonaws.com", + "variants" : [ { + "hostname" : "ecr.ap-southeast-1.api.aws", + "tags" : [ "dualstack" ] + } ] }, "ap-southeast-2" : { "credentialScope" : { "region" : "ap-southeast-2" }, - "hostname" : "api.ecr.ap-southeast-2.amazonaws.com" + "hostname" : "api.ecr.ap-southeast-2.amazonaws.com", + "variants" : [ { + "hostname" : "ecr.ap-southeast-2.api.aws", + "tags" : [ "dualstack" ] + } ] }, "ap-southeast-3" : { "credentialScope" : { "region" : "ap-southeast-3" }, - "hostname" : "api.ecr.ap-southeast-3.amazonaws.com" + "hostname" : "api.ecr.ap-southeast-3.amazonaws.com", + "variants" : [ { + "hostname" : "ecr.ap-southeast-3.api.aws", + "tags" : [ "dualstack" ] + } ] }, "ap-southeast-4" : { "credentialScope" : { "region" : "ap-southeast-4" }, - "hostname" : "api.ecr.ap-southeast-4.amazonaws.com" + "hostname" : "api.ecr.ap-southeast-4.amazonaws.com", + "variants" : [ { + "hostname" : "ecr.ap-southeast-4.api.aws", + "tags" : [ "dualstack" ] + } ] }, "ap-southeast-5" : { "credentialScope" : { "region" : "ap-southeast-5" }, - "hostname" : "api.ecr.ap-southeast-5.amazonaws.com" + "hostname" : "api.ecr.ap-southeast-5.amazonaws.com", + "variants" : [ { + "hostname" : "ecr.ap-southeast-5.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-7" : { + "credentialScope" : { + "region" : "ap-southeast-7" + }, + "hostname" : "api.ecr.ap-southeast-7.amazonaws.com", + "variants" : [ { + "hostname" : "ecr.ap-southeast-7.api.aws", + "tags" : [ "dualstack" ] + } ] }, "ca-central-1" : { "credentialScope" : { "region" : "ca-central-1" }, - "hostname" : "api.ecr.ca-central-1.amazonaws.com" + "hostname" : "api.ecr.ca-central-1.amazonaws.com", + "variants" : [ { + "hostname" : "ecr.ca-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] }, "ca-west-1" : { "credentialScope" : { "region" : "ca-west-1" }, - "hostname" : "api.ecr.ca-west-1.amazonaws.com" + "hostname" : "api.ecr.ca-west-1.amazonaws.com", + "variants" : [ { + "hostname" : "ecr.ca-west-1.api.aws", + "tags" : [ "dualstack" ] + } ] }, "dkr-us-east-1" : { "credentialScope" : { @@ -799,49 +877,81 @@ "credentialScope" : { "region" : "eu-central-1" }, - "hostname" : "api.ecr.eu-central-1.amazonaws.com" + "hostname" : "api.ecr.eu-central-1.amazonaws.com", + "variants" : [ { + "hostname" : "ecr.eu-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] }, "eu-central-2" : { "credentialScope" : { "region" : "eu-central-2" }, - "hostname" : "api.ecr.eu-central-2.amazonaws.com" + "hostname" : "api.ecr.eu-central-2.amazonaws.com", + "variants" : [ { + "hostname" : "ecr.eu-central-2.api.aws", + "tags" : [ "dualstack" ] + } ] }, "eu-north-1" : { "credentialScope" : { "region" : "eu-north-1" }, - "hostname" : "api.ecr.eu-north-1.amazonaws.com" + "hostname" : "api.ecr.eu-north-1.amazonaws.com", + "variants" : [ { + "hostname" : "ecr.eu-north-1.api.aws", + "tags" : [ "dualstack" ] + } ] }, "eu-south-1" : { "credentialScope" : { "region" : "eu-south-1" }, - "hostname" : "api.ecr.eu-south-1.amazonaws.com" + "hostname" : "api.ecr.eu-south-1.amazonaws.com", + "variants" : [ { + "hostname" : "ecr.eu-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] }, "eu-south-2" : { "credentialScope" : { "region" : "eu-south-2" }, - "hostname" : "api.ecr.eu-south-2.amazonaws.com" + "hostname" : "api.ecr.eu-south-2.amazonaws.com", + "variants" : [ { + "hostname" : "ecr.eu-south-2.api.aws", + "tags" : [ "dualstack" ] + } ] }, "eu-west-1" : { "credentialScope" : { "region" : "eu-west-1" }, - "hostname" : "api.ecr.eu-west-1.amazonaws.com" + "hostname" : "api.ecr.eu-west-1.amazonaws.com", + "variants" : [ { + "hostname" : "ecr.eu-west-1.api.aws", + "tags" : [ "dualstack" ] + } ] }, "eu-west-2" : { "credentialScope" : { "region" : "eu-west-2" }, - "hostname" : "api.ecr.eu-west-2.amazonaws.com" + "hostname" : "api.ecr.eu-west-2.amazonaws.com", + "variants" : [ { + "hostname" : "ecr.eu-west-2.api.aws", + "tags" : [ "dualstack" ] + } ] }, "eu-west-3" : { "credentialScope" : { "region" : "eu-west-3" }, - "hostname" : "api.ecr.eu-west-3.amazonaws.com" + "hostname" : "api.ecr.eu-west-3.amazonaws.com", + "variants" : [ { + "hostname" : "ecr.eu-west-3.api.aws", + "tags" : [ "dualstack" ] + } ] }, "fips-dkr-us-east-1" : { "credentialScope" : { @@ -903,25 +1013,51 @@ "credentialScope" : { "region" : "il-central-1" }, - "hostname" : "api.ecr.il-central-1.amazonaws.com" + "hostname" : "api.ecr.il-central-1.amazonaws.com", + "variants" : [ { + "hostname" : "ecr.il-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] }, "me-central-1" : { "credentialScope" : { "region" : "me-central-1" }, - "hostname" : "api.ecr.me-central-1.amazonaws.com" + "hostname" : "api.ecr.me-central-1.amazonaws.com", + "variants" : [ { + "hostname" : "ecr.me-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] }, "me-south-1" : { "credentialScope" : { "region" : "me-south-1" }, - "hostname" : "api.ecr.me-south-1.amazonaws.com" + "hostname" : "api.ecr.me-south-1.amazonaws.com", + "variants" : [ { + "hostname" : "ecr.me-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "mx-central-1" : { + "credentialScope" : { + "region" : "mx-central-1" + }, + "hostname" : "api.ecr.mx-central-1.amazonaws.com", + "variants" : [ { + "hostname" : "ecr.mx-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] }, "sa-east-1" : { "credentialScope" : { "region" : "sa-east-1" }, - "hostname" : "api.ecr.sa-east-1.amazonaws.com" + "hostname" : "api.ecr.sa-east-1.amazonaws.com", + "variants" : [ { + "hostname" : "ecr.sa-east-1.api.aws", + "tags" : [ "dualstack" ] + } ] }, "us-east-1" : { "credentialScope" : { @@ -931,6 +1067,12 @@ "variants" : [ { "hostname" : "ecr-fips.us-east-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "ecr-fips.us-east-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "ecr.us-east-1.api.aws", + "tags" : [ "dualstack" ] } ] }, "us-east-2" : { @@ -941,6 +1083,12 @@ "variants" : [ { "hostname" : "ecr-fips.us-east-2.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "ecr-fips.us-east-2.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "ecr.us-east-2.api.aws", + "tags" : [ "dualstack" ] } ] }, "us-west-1" : { @@ -951,6 +1099,12 @@ "variants" : [ { "hostname" : "ecr-fips.us-west-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "ecr-fips.us-west-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "ecr.us-west-1.api.aws", + "tags" : [ "dualstack" ] } ] }, "us-west-2" : { @@ -961,6 +1115,12 @@ "variants" : [ { "hostname" : "ecr-fips.us-west-2.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "ecr-fips.us-west-2.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "ecr.us-west-2.api.aws", + "tags" : [ "dualstack" ] } ] } } @@ -971,7 +1131,11 @@ "credentialScope" : { "region" : "us-east-1" }, - "hostname" : "api.ecr-public.us-east-1.amazonaws.com" + "hostname" : "api.ecr-public.us-east-1.amazonaws.com", + "variants" : [ { + "hostname" : "ecr-public.us-east-1.api.aws", + "tags" : [ "dualstack" ] + } ] }, "us-west-2" : { "credentialScope" : { @@ -1267,28 +1431,98 @@ "api.tunneling.iot" : { "defaults" : { "variants" : [ { + "dnsSuffix" : "amazonaws.com", "hostname" : "api.tunneling.iot-fips.{region}.{dnsSuffix}", "tags" : [ "fips" ] + }, { + "dnsSuffix" : "api.aws", + "hostname" : "api.iot-tunneling-fips.{region}.{dnsSuffix}", + "tags" : [ "dualstack", "fips" ] + }, { + "dnsSuffix" : "api.aws", + "hostname" : "api.iot-tunneling.{region}.{dnsSuffix}", + "tags" : [ "dualstack" ] } ] }, "endpoints" : { - "ap-east-1" : { }, - "ap-northeast-1" : { }, - "ap-northeast-2" : { }, - "ap-south-1" : { }, - "ap-southeast-1" : { }, - "ap-southeast-2" : { }, + "ap-east-1" : { + "variants" : [ { + "hostname" : "api.iot-tunneling.ap-east-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-northeast-1" : { + "variants" : [ { + "hostname" : "api.iot-tunneling.ap-northeast-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-northeast-2" : { + "variants" : [ { + "hostname" : "api.iot-tunneling.ap-northeast-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-south-1" : { + "variants" : [ { + "hostname" : "api.iot-tunneling.ap-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-1" : { + "variants" : [ { + "hostname" : "api.iot-tunneling.ap-southeast-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-2" : { + "variants" : [ { + "hostname" : "api.iot-tunneling.ap-southeast-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, "ca-central-1" : { "variants" : [ { + "hostname" : "api.iot-tunneling-fips.ca-central-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "api.iot-tunneling.ca-central-1.api.aws", + "tags" : [ "dualstack" ] + }, { "hostname" : "api.tunneling.iot-fips.ca-central-1.amazonaws.com", "tags" : [ "fips" ] } ] }, - "eu-central-1" : { }, - "eu-north-1" : { }, - "eu-west-1" : { }, - "eu-west-2" : { }, - "eu-west-3" : { }, + "eu-central-1" : { + "variants" : [ { + "hostname" : "api.iot-tunneling.eu-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-north-1" : { + "variants" : [ { + "hostname" : "api.iot-tunneling.eu-north-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-west-1" : { + "variants" : [ { + "hostname" : "api.iot-tunneling.eu-west-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-west-2" : { + "variants" : [ { + "hostname" : "api.iot-tunneling.eu-west-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-west-3" : { + "variants" : [ { + "hostname" : "api.iot-tunneling.eu-west-3.api.aws", + "tags" : [ "dualstack" ] + } ] + }, "fips-ca-central-1" : { "credentialScope" : { "region" : "ca-central-1" @@ -1324,29 +1558,68 @@ "deprecated" : true, "hostname" : "api.tunneling.iot-fips.us-west-2.amazonaws.com" }, - "me-central-1" : { }, - "me-south-1" : { }, - "sa-east-1" : { }, + "me-central-1" : { + "variants" : [ { + "hostname" : "api.iot-tunneling.me-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "me-south-1" : { + "variants" : [ { + "hostname" : "api.iot-tunneling.me-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "sa-east-1" : { + "variants" : [ { + "hostname" : "api.iot-tunneling.sa-east-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, "us-east-1" : { "variants" : [ { + "hostname" : "api.iot-tunneling-fips.us-east-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "api.iot-tunneling.us-east-1.api.aws", + "tags" : [ "dualstack" ] + }, { "hostname" : "api.tunneling.iot-fips.us-east-1.amazonaws.com", "tags" : [ "fips" ] } ] }, "us-east-2" : { "variants" : [ { + "hostname" : "api.iot-tunneling-fips.us-east-2.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "api.iot-tunneling.us-east-2.api.aws", + "tags" : [ "dualstack" ] + }, { "hostname" : "api.tunneling.iot-fips.us-east-2.amazonaws.com", "tags" : [ "fips" ] } ] }, "us-west-1" : { "variants" : [ { + "hostname" : "api.iot-tunneling-fips.us-west-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "api.iot-tunneling.us-west-1.api.aws", + "tags" : [ "dualstack" ] + }, { "hostname" : "api.tunneling.iot-fips.us-west-1.amazonaws.com", "tags" : [ "fips" ] } ] }, "us-west-2" : { "variants" : [ { + "hostname" : "api.iot-tunneling-fips.us-west-2.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "api.iot-tunneling.us-west-2.api.aws", + "tags" : [ "dualstack" ] + }, { "hostname" : "api.tunneling.iot-fips.us-west-2.amazonaws.com", "tags" : [ "fips" ] } ] @@ -1367,6 +1640,7 @@ "ap-southeast-3" : { }, "ap-southeast-4" : { }, "ap-southeast-5" : { }, + "ap-southeast-7" : { }, "ca-central-1" : { "variants" : [ { "hostname" : "apigateway-fips.ca-central-1.amazonaws.com", @@ -1432,6 +1706,7 @@ "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, + "mx-central-1" : { }, "sa-east-1" : { }, "us-east-1" : { "variants" : [ { @@ -1487,6 +1762,7 @@ "ap-southeast-3" : { }, "ap-southeast-4" : { }, "ap-southeast-5" : { }, + "ap-southeast-7" : { }, "ca-central-1" : { }, "ca-west-1" : { }, "eu-central-1" : { }, @@ -1500,6 +1776,7 @@ "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, + "mx-central-1" : { }, "sa-east-1" : { }, "us-east-1" : { }, "us-east-2" : { }, @@ -1521,6 +1798,7 @@ "ap-southeast-3" : { }, "ap-southeast-4" : { }, "ap-southeast-5" : { }, + "ap-southeast-7" : { }, "ca-central-1" : { }, "ca-west-1" : { }, "eu-central-1" : { }, @@ -1534,6 +1812,7 @@ "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, + "mx-central-1" : { }, "sa-east-1" : { }, "us-east-1" : { }, "us-east-2" : { }, @@ -1626,6 +1905,7 @@ "ap-southeast-3" : { }, "ap-southeast-4" : { }, "ap-southeast-5" : { }, + "ap-southeast-7" : { }, "ca-central-1" : { }, "ca-west-1" : { }, "eu-central-1" : { }, @@ -1639,6 +1919,7 @@ "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, + "mx-central-1" : { }, "sa-east-1" : { }, "us-east-1" : { }, "us-east-2" : { }, @@ -2141,6 +2422,7 @@ "ap-southeast-3" : { }, "ap-southeast-4" : { }, "ap-southeast-5" : { }, + "ap-southeast-7" : { }, "ca-central-1" : { }, "ca-west-1" : { }, "eu-central-1" : { }, @@ -2154,6 +2436,7 @@ "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, + "mx-central-1" : { }, "sa-east-1" : { }, "us-east-1" : { }, "us-east-2" : { }, @@ -2504,6 +2787,7 @@ "ap-southeast-3" : { }, "ap-southeast-4" : { }, "ap-southeast-5" : { }, + "ap-southeast-7" : { }, "ca-central-1" : { "variants" : [ { "hostname" : "autoscaling-fips.ca-central-1.amazonaws.com", @@ -2569,6 +2853,7 @@ "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, + "mx-central-1" : { }, "sa-east-1" : { }, "us-east-1" : { "variants" : [ { @@ -2639,6 +2924,7 @@ "ap-southeast-3" : { }, "ap-southeast-4" : { }, "ap-southeast-5" : { }, + "ap-southeast-7" : { }, "ca-central-1" : { }, "ca-west-1" : { }, "eu-central-1" : { }, @@ -2652,6 +2938,7 @@ "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, + "mx-central-1" : { }, "sa-east-1" : { }, "us-east-1" : { }, "us-east-2" : { }, @@ -2704,6 +2991,7 @@ "ap-southeast-3" : { }, "ap-southeast-4" : { }, "ap-southeast-5" : { }, + "ap-southeast-7" : { }, "ca-central-1" : { }, "ca-west-1" : { }, "eu-central-1" : { }, @@ -2745,6 +3033,7 @@ "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, + "mx-central-1" : { }, "sa-east-1" : { }, "us-east-1" : { "variants" : [ { @@ -3702,6 +3991,7 @@ "ap-southeast-3" : { }, "ap-southeast-4" : { }, "ap-southeast-5" : { }, + "ap-southeast-7" : { }, "ca-central-1" : { }, "ca-west-1" : { }, "eu-central-1" : { }, @@ -3715,6 +4005,7 @@ "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, + "mx-central-1" : { }, "sa-east-1" : { }, "us-east-1" : { "variants" : [ { @@ -3981,6 +4272,7 @@ "ap-southeast-3" : { }, "ap-southeast-4" : { }, "ap-southeast-5" : { }, + "ap-southeast-7" : { }, "ca-central-1" : { }, "ca-west-1" : { }, "eu-central-1" : { }, @@ -4022,6 +4314,7 @@ "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, + "mx-central-1" : { }, "sa-east-1" : { }, "us-east-1" : { "variants" : [ { @@ -4292,6 +4585,7 @@ "ap-southeast-3" : { }, "ap-southeast-4" : { }, "ap-southeast-5" : { }, + "ap-southeast-7" : { }, "ca-central-1" : { }, "ca-west-1" : { }, "eu-central-1" : { }, @@ -4305,6 +4599,7 @@ "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, + "mx-central-1" : { }, "sa-east-1" : { }, "us-east-1" : { "variants" : [ { @@ -4741,35 +5036,140 @@ }, "cognito-idp" : { "endpoints" : { - "af-south-1" : { }, - "ap-east-1" : { }, - "ap-northeast-1" : { }, - "ap-northeast-2" : { }, - "ap-northeast-3" : { }, - "ap-south-1" : { }, - "ap-south-2" : { }, - "ap-southeast-1" : { }, - "ap-southeast-2" : { }, - "ap-southeast-3" : { }, - "ap-southeast-4" : { }, - "ca-central-1" : { }, - "ca-west-1" : { }, - "eu-central-1" : { }, - "eu-central-2" : { }, - "eu-north-1" : { }, - "eu-south-1" : { }, - "eu-south-2" : { }, - "eu-west-1" : { }, - "eu-west-2" : { }, - "eu-west-3" : { }, - "fips-us-east-1" : { - "credentialScope" : { - "region" : "us-east-1" - }, - "deprecated" : true, - "hostname" : "cognito-idp-fips.us-east-1.amazonaws.com" + "af-south-1" : { + "variants" : [ { + "hostname" : "cognito-idp.af-south-1.amazonaws.com", + "tags" : [ "dualstack" ] + } ] }, - "fips-us-east-2" : { + "ap-east-1" : { + "variants" : [ { + "hostname" : "cognito-idp.ap-east-1.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, + "ap-northeast-1" : { + "variants" : [ { + "hostname" : "cognito-idp.ap-northeast-1.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, + "ap-northeast-2" : { + "variants" : [ { + "hostname" : "cognito-idp.ap-northeast-2.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, + "ap-northeast-3" : { + "variants" : [ { + "hostname" : "cognito-idp.ap-northeast-3.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, + "ap-south-1" : { + "variants" : [ { + "hostname" : "cognito-idp.ap-south-1.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, + "ap-south-2" : { + "variants" : [ { + "hostname" : "cognito-idp.ap-south-2.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-1" : { + "variants" : [ { + "hostname" : "cognito-idp.ap-southeast-1.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-2" : { + "variants" : [ { + "hostname" : "cognito-idp.ap-southeast-2.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-3" : { + "variants" : [ { + "hostname" : "cognito-idp.ap-southeast-3.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-4" : { + "variants" : [ { + "hostname" : "cognito-idp.ap-southeast-4.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, + "ca-central-1" : { + "variants" : [ { + "hostname" : "cognito-idp.ca-central-1.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, + "ca-west-1" : { + "variants" : [ { + "hostname" : "cognito-idp.ca-west-1.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, + "eu-central-1" : { + "variants" : [ { + "hostname" : "cognito-idp.eu-central-1.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, + "eu-central-2" : { + "variants" : [ { + "hostname" : "cognito-idp.eu-central-2.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, + "eu-north-1" : { + "variants" : [ { + "hostname" : "cognito-idp.eu-north-1.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, + "eu-south-1" : { + "variants" : [ { + "hostname" : "cognito-idp.eu-south-1.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, + "eu-south-2" : { + "variants" : [ { + "hostname" : "cognito-idp.eu-south-2.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, + "eu-west-1" : { + "variants" : [ { + "hostname" : "cognito-idp.eu-west-1.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, + "eu-west-2" : { + "variants" : [ { + "hostname" : "cognito-idp.eu-west-2.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, + "eu-west-3" : { + "variants" : [ { + "hostname" : "cognito-idp.eu-west-3.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "cognito-idp-fips.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { "credentialScope" : { "region" : "us-east-2" }, @@ -4790,32 +5190,76 @@ "deprecated" : true, "hostname" : "cognito-idp-fips.us-west-2.amazonaws.com" }, - "il-central-1" : { }, - "me-central-1" : { }, - "me-south-1" : { }, - "sa-east-1" : { }, + "il-central-1" : { + "variants" : [ { + "hostname" : "cognito-idp.il-central-1.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, + "me-central-1" : { + "variants" : [ { + "hostname" : "cognito-idp.me-central-1.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, + "me-south-1" : { + "variants" : [ { + "hostname" : "cognito-idp.me-south-1.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, + "sa-east-1" : { + "variants" : [ { + "hostname" : "cognito-idp.sa-east-1.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, "us-east-1" : { "variants" : [ { + "hostname" : "cognito-idp-fips.us-east-1.amazonaws.com", + "tags" : [ "dualstack", "fips" ] + }, { "hostname" : "cognito-idp-fips.us-east-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "cognito-idp.us-east-1.amazonaws.com", + "tags" : [ "dualstack" ] } ] }, "us-east-2" : { "variants" : [ { + "hostname" : "cognito-idp-fips.us-east-2.amazonaws.com", + "tags" : [ "dualstack", "fips" ] + }, { "hostname" : "cognito-idp-fips.us-east-2.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "cognito-idp.us-east-2.amazonaws.com", + "tags" : [ "dualstack" ] } ] }, "us-west-1" : { "variants" : [ { + "hostname" : "cognito-idp-fips.us-west-1.amazonaws.com", + "tags" : [ "dualstack", "fips" ] + }, { "hostname" : "cognito-idp-fips.us-west-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "cognito-idp.us-west-1.amazonaws.com", + "tags" : [ "dualstack" ] } ] }, "us-west-2" : { "variants" : [ { + "hostname" : "cognito-idp-fips.us-west-2.amazonaws.com", + "tags" : [ "dualstack", "fips" ] + }, { "hostname" : "cognito-idp-fips.us-west-2.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "cognito-idp.us-west-2.amazonaws.com", + "tags" : [ "dualstack" ] } ] } } @@ -5135,6 +5579,7 @@ "ap-southeast-3" : { }, "ap-southeast-4" : { }, "ap-southeast-5" : { }, + "ap-southeast-7" : { }, "ca-central-1" : { }, "ca-west-1" : { }, "eu-central-1" : { }, @@ -5176,6 +5621,7 @@ "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, + "mx-central-1" : { }, "sa-east-1" : { }, "us-east-1" : { "variants" : [ { @@ -5210,9 +5656,21 @@ "ap-northeast-2" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, - "ca-central-1" : { }, + "ca-central-1" : { + "variants" : [ { + "hostname" : "connect-fips.ca-central-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, "eu-central-1" : { }, "eu-west-2" : { }, + "fips-ca-central-1" : { + "credentialScope" : { + "region" : "ca-central-1" + }, + "deprecated" : true, + "hostname" : "connect-fips.ca-central-1.amazonaws.com" + }, "fips-us-east-1" : { "credentialScope" : { "region" : "us-east-1" @@ -5780,106 +6238,274 @@ }, "datasync" : { "endpoints" : { - "af-south-1" : { }, - "ap-east-1" : { }, - "ap-northeast-1" : { }, - "ap-northeast-2" : { }, - "ap-northeast-3" : { }, - "ap-south-1" : { }, - "ap-south-2" : { }, - "ap-southeast-1" : { }, - "ap-southeast-2" : { }, - "ap-southeast-3" : { }, - "ap-southeast-4" : { }, - "ap-southeast-5" : { }, - "ca-central-1" : { + "af-south-1" : { "variants" : [ { - "hostname" : "datasync-fips.ca-central-1.amazonaws.com", - "tags" : [ "fips" ] + "hostname" : "datasync.af-south-1.api.aws", + "tags" : [ "dualstack" ] } ] }, - "ca-west-1" : { + "ap-east-1" : { "variants" : [ { - "hostname" : "datasync-fips.ca-west-1.amazonaws.com", - "tags" : [ "fips" ] + "hostname" : "datasync.ap-east-1.api.aws", + "tags" : [ "dualstack" ] } ] }, - "eu-central-1" : { }, - "eu-central-2" : { }, - "eu-north-1" : { }, - "eu-south-1" : { }, - "eu-south-2" : { }, - "eu-west-1" : { }, - "eu-west-2" : { }, - "eu-west-3" : { }, - "fips-ca-central-1" : { - "credentialScope" : { - "region" : "ca-central-1" - }, - "deprecated" : true, - "hostname" : "datasync-fips.ca-central-1.amazonaws.com" - }, - "fips-ca-west-1" : { - "credentialScope" : { - "region" : "ca-west-1" - }, - "deprecated" : true, - "hostname" : "datasync-fips.ca-west-1.amazonaws.com" - }, - "fips-us-east-1" : { - "credentialScope" : { - "region" : "us-east-1" - }, - "deprecated" : true, - "hostname" : "datasync-fips.us-east-1.amazonaws.com" - }, - "fips-us-east-2" : { - "credentialScope" : { - "region" : "us-east-2" - }, - "deprecated" : true, - "hostname" : "datasync-fips.us-east-2.amazonaws.com" - }, - "fips-us-west-1" : { - "credentialScope" : { - "region" : "us-west-1" - }, - "deprecated" : true, - "hostname" : "datasync-fips.us-west-1.amazonaws.com" - }, - "fips-us-west-2" : { - "credentialScope" : { - "region" : "us-west-2" - }, - "deprecated" : true, - "hostname" : "datasync-fips.us-west-2.amazonaws.com" - }, - "il-central-1" : { }, - "me-central-1" : { }, - "me-south-1" : { }, - "sa-east-1" : { }, - "us-east-1" : { + "ap-northeast-1" : { "variants" : [ { - "hostname" : "datasync-fips.us-east-1.amazonaws.com", - "tags" : [ "fips" ] + "hostname" : "datasync.ap-northeast-1.api.aws", + "tags" : [ "dualstack" ] } ] }, - "us-east-2" : { + "ap-northeast-2" : { "variants" : [ { - "hostname" : "datasync-fips.us-east-2.amazonaws.com", - "tags" : [ "fips" ] + "hostname" : "datasync.ap-northeast-2.api.aws", + "tags" : [ "dualstack" ] } ] }, - "us-west-1" : { + "ap-northeast-3" : { "variants" : [ { - "hostname" : "datasync-fips.us-west-1.amazonaws.com", - "tags" : [ "fips" ] + "hostname" : "datasync.ap-northeast-3.api.aws", + "tags" : [ "dualstack" ] } ] }, - "us-west-2" : { + "ap-south-1" : { + "variants" : [ { + "hostname" : "datasync.ap-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-south-2" : { + "variants" : [ { + "hostname" : "datasync.ap-south-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-1" : { + "variants" : [ { + "hostname" : "datasync.ap-southeast-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-2" : { + "variants" : [ { + "hostname" : "datasync.ap-southeast-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-3" : { + "variants" : [ { + "hostname" : "datasync.ap-southeast-3.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-4" : { + "variants" : [ { + "hostname" : "datasync.ap-southeast-4.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-5" : { + "variants" : [ { + "hostname" : "datasync.ap-southeast-5.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-7" : { + "variants" : [ { + "hostname" : "datasync.ap-southeast-7.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ca-central-1" : { + "variants" : [ { + "hostname" : "datasync-fips.ca-central-1.amazonaws.com", + "tags" : [ "fips" ] + }, { + "hostname" : "datasync-fips.ca-central-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "datasync.ca-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ca-west-1" : { + "variants" : [ { + "hostname" : "datasync-fips.ca-west-1.amazonaws.com", + "tags" : [ "fips" ] + }, { + "hostname" : "datasync-fips.ca-west-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "datasync.ca-west-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-central-1" : { + "variants" : [ { + "hostname" : "datasync.eu-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-central-2" : { + "variants" : [ { + "hostname" : "datasync.eu-central-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-north-1" : { + "variants" : [ { + "hostname" : "datasync.eu-north-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-south-1" : { + "variants" : [ { + "hostname" : "datasync.eu-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-south-2" : { + "variants" : [ { + "hostname" : "datasync.eu-south-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-west-1" : { + "variants" : [ { + "hostname" : "datasync.eu-west-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-west-2" : { + "variants" : [ { + "hostname" : "datasync.eu-west-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-west-3" : { + "variants" : [ { + "hostname" : "datasync.eu-west-3.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "fips-ca-central-1" : { + "credentialScope" : { + "region" : "ca-central-1" + }, + "deprecated" : true, + "hostname" : "datasync-fips.ca-central-1.amazonaws.com" + }, + "fips-ca-west-1" : { + "credentialScope" : { + "region" : "ca-west-1" + }, + "deprecated" : true, + "hostname" : "datasync-fips.ca-west-1.amazonaws.com" + }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "datasync-fips.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "datasync-fips.us-east-2.amazonaws.com" + }, + "fips-us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "deprecated" : true, + "hostname" : "datasync-fips.us-west-1.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "datasync-fips.us-west-2.amazonaws.com" + }, + "il-central-1" : { + "variants" : [ { + "hostname" : "datasync.il-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "me-central-1" : { + "variants" : [ { + "hostname" : "datasync.me-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "me-south-1" : { + "variants" : [ { + "hostname" : "datasync.me-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "mx-central-1" : { + "variants" : [ { + "hostname" : "datasync.mx-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "sa-east-1" : { + "variants" : [ { + "hostname" : "datasync.sa-east-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "us-east-1" : { + "variants" : [ { + "hostname" : "datasync-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + }, { + "hostname" : "datasync-fips.us-east-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "datasync.us-east-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "us-east-2" : { + "variants" : [ { + "hostname" : "datasync-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + }, { + "hostname" : "datasync-fips.us-east-2.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "datasync.us-east-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "us-west-1" : { + "variants" : [ { + "hostname" : "datasync-fips.us-west-1.amazonaws.com", + "tags" : [ "fips" ] + }, { + "hostname" : "datasync-fips.us-west-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "datasync.us-west-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "us-west-2" : { "variants" : [ { "hostname" : "datasync-fips.us-west-2.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "datasync-fips.us-west-2.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "datasync.us-west-2.api.aws", + "tags" : [ "dualstack" ] } ] } } @@ -5921,6 +6547,9 @@ "ap-southeast-5" : { "hostname" : "datazone.ap-southeast-5.api.aws" }, + "ap-southeast-7" : { + "hostname" : "datazone.ap-southeast-7.api.aws" + }, "ca-central-1" : { "hostname" : "datazone.ca-central-1.api.aws", "variants" : [ { @@ -6108,6 +6737,7 @@ "ap-southeast-3" : { }, "ap-southeast-4" : { }, "ap-southeast-5" : { }, + "ap-southeast-7" : { }, "ca-central-1" : { "variants" : [ { "hostname" : "directconnect-fips.ca-central-1.amazonaws.com", @@ -6173,6 +6803,7 @@ "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, + "mx-central-1" : { }, "sa-east-1" : { }, "us-east-1" : { "variants" : [ { @@ -6285,6 +6916,12 @@ "tags" : [ "dualstack" ] } ] }, + "ap-southeast-7" : { + "variants" : [ { + "hostname" : "dlm.ap-southeast-7.api.aws", + "tags" : [ "dualstack" ] + } ] + }, "ca-central-1" : { "variants" : [ { "hostname" : "dlm-fips.ca-central-1.api.aws", @@ -6369,6 +7006,12 @@ "tags" : [ "dualstack" ] } ] }, + "mx-central-1" : { + "variants" : [ { + "hostname" : "dlm.mx-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, "sa-east-1" : { "variants" : [ { "hostname" : "dlm.sa-east-1.api.aws", @@ -6427,6 +7070,7 @@ "ap-southeast-3" : { }, "ap-southeast-4" : { }, "ap-southeast-5" : { }, + "ap-southeast-7" : { }, "ca-central-1" : { }, "ca-west-1" : { }, "dms" : { @@ -6457,6 +7101,7 @@ "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, + "mx-central-1" : { }, "sa-east-1" : { }, "us-east-1" : { "variants" : [ { @@ -6803,6 +7448,7 @@ "ap-southeast-3" : { }, "ap-southeast-4" : { }, "ap-southeast-5" : { }, + "ap-southeast-7" : { }, "ca-central-1" : { "variants" : [ { "hostname" : "dynamodb-fips.ca-central-1.amazonaws.com", @@ -6847,6 +7493,7 @@ }, "me-central-1" : { }, "me-south-1" : { }, + "mx-central-1" : { }, "sa-east-1" : { }, "us-east-1" : { "variants" : [ { @@ -6916,6 +7563,7 @@ "ap-southeast-3" : { }, "ap-southeast-4" : { }, "ap-southeast-5" : { }, + "ap-southeast-7" : { }, "ca-central-1" : { "variants" : [ { "hostname" : "ebs-fips.ca-central-1.amazonaws.com", @@ -6981,6 +7629,7 @@ "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, + "mx-central-1" : { }, "sa-east-1" : { }, "us-east-1" : { "variants" : [ { @@ -7060,6 +7709,7 @@ "ap-southeast-3" : { }, "ap-southeast-4" : { }, "ap-southeast-5" : { }, + "ap-southeast-7" : { }, "ca-central-1" : { "variants" : [ { "hostname" : "ec2-fips.ca-central-1.amazonaws.com", @@ -7163,6 +7813,7 @@ "tags" : [ "dualstack" ] } ] }, + "mx-central-1" : { }, "sa-east-1" : { "variants" : [ { "hostname" : "ec2.sa-east-1.api.aws", @@ -7221,6 +7872,7 @@ "ap-southeast-3" : { }, "ap-southeast-4" : { }, "ap-southeast-5" : { }, + "ap-southeast-7" : { }, "ca-central-1" : { }, "ca-west-1" : { }, "eu-central-1" : { }, @@ -7262,6 +7914,7 @@ "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, + "mx-central-1" : { }, "sa-east-1" : { }, "us-east-1" : { "variants" : [ { @@ -7320,6 +7973,7 @@ "ap-southeast-3" : { }, "ap-southeast-4" : { }, "ap-southeast-5" : { }, + "ap-southeast-7" : { }, "ca-central-1" : { }, "ca-west-1" : { }, "eu-central-1" : { }, @@ -7361,6 +8015,7 @@ "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, + "mx-central-1" : { }, "sa-east-1" : { }, "us-east-1" : { "variants" : [ { @@ -7434,6 +8089,9 @@ "ap-southeast-5" : { "hostname" : "eks-auth.ap-southeast-5.api.aws" }, + "ap-southeast-7" : { + "hostname" : "eks-auth.ap-southeast-7.api.aws" + }, "ca-central-1" : { "hostname" : "eks-auth.ca-central-1.api.aws" }, @@ -7473,6 +8131,9 @@ "me-south-1" : { "hostname" : "eks-auth.me-south-1.api.aws" }, + "mx-central-1" : { + "hostname" : "eks-auth.mx-central-1.api.aws" + }, "sa-east-1" : { "hostname" : "eks-auth.sa-east-1.api.aws" }, @@ -7504,6 +8165,7 @@ "ap-southeast-3" : { }, "ap-southeast-4" : { }, "ap-southeast-5" : { }, + "ap-southeast-7" : { }, "ca-central-1" : { }, "ca-west-1" : { }, "eu-central-1" : { }, @@ -7524,6 +8186,7 @@ "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, + "mx-central-1" : { }, "sa-east-1" : { }, "us-east-1" : { "variants" : [ { @@ -7847,6 +8510,12 @@ "tags" : [ "fips" ] } ] }, + "ap-southeast-7" : { + "variants" : [ { + "hostname" : "elasticfilesystem-fips.ap-southeast-7.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, "ca-central-1" : { "variants" : [ { "hostname" : "elasticfilesystem-fips.ca-central-1.amazonaws.com", @@ -7991,6 +8660,13 @@ "deprecated" : true, "hostname" : "elasticfilesystem-fips.ap-southeast-5.amazonaws.com" }, + "fips-ap-southeast-7" : { + "credentialScope" : { + "region" : "ap-southeast-7" + }, + "deprecated" : true, + "hostname" : "elasticfilesystem-fips.ap-southeast-7.amazonaws.com" + }, "fips-ca-central-1" : { "credentialScope" : { "region" : "ca-central-1" @@ -8082,6 +8758,13 @@ "deprecated" : true, "hostname" : "elasticfilesystem-fips.me-south-1.amazonaws.com" }, + "fips-mx-central-1" : { + "credentialScope" : { + "region" : "mx-central-1" + }, + "deprecated" : true, + "hostname" : "elasticfilesystem-fips.mx-central-1.amazonaws.com" + }, "fips-sa-east-1" : { "credentialScope" : { "region" : "sa-east-1" @@ -8135,6 +8818,12 @@ "tags" : [ "fips" ] } ] }, + "mx-central-1" : { + "variants" : [ { + "hostname" : "elasticfilesystem-fips.mx-central-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, "sa-east-1" : { "variants" : [ { "hostname" : "elasticfilesystem-fips.sa-east-1.amazonaws.com", @@ -8184,6 +8873,7 @@ "ap-southeast-3" : { }, "ap-southeast-4" : { }, "ap-southeast-5" : { }, + "ap-southeast-7" : { }, "ca-central-1" : { }, "ca-west-1" : { }, "eu-central-1" : { }, @@ -8225,6 +8915,7 @@ "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, + "mx-central-1" : { }, "sa-east-1" : { }, "us-east-1" : { "variants" : [ { @@ -8270,6 +8961,7 @@ "ap-southeast-3" : { }, "ap-southeast-4" : { }, "ap-southeast-5" : { }, + "ap-southeast-7" : { }, "ca-central-1" : { "variants" : [ { "hostname" : "elasticmapreduce-fips.ca-central-1.amazonaws.com", @@ -8337,6 +9029,7 @@ "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, + "mx-central-1" : { }, "sa-east-1" : { }, "us-east-1" : { "sslCommonName" : "{service}.{region}.{dnsSuffix}", @@ -8729,6 +9422,12 @@ "tags" : [ "dualstack" ] } ] }, + "ap-southeast-7" : { + "variants" : [ { + "hostname" : "aos.ap-southeast-7.api.aws", + "tags" : [ "dualstack" ] + } ] + }, "ca-central-1" : { "variants" : [ { "hostname" : "aos.ca-central-1.api.aws", @@ -8814,6 +9513,12 @@ "tags" : [ "dualstack" ] } ] }, + "mx-central-1" : { + "variants" : [ { + "hostname" : "aos.mx-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, "sa-east-1" : { "variants" : [ { "hostname" : "aos.sa-east-1.api.aws", @@ -8900,6 +9605,7 @@ "ap-southeast-3" : { }, "ap-southeast-4" : { }, "ap-southeast-5" : { }, + "ap-southeast-7" : { }, "ca-central-1" : { }, "ca-west-1" : { }, "eu-central-1" : { }, @@ -8941,6 +9647,7 @@ "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, + "mx-central-1" : { }, "sa-east-1" : { }, "us-east-1" : { "variants" : [ { @@ -9653,6 +10360,7 @@ "ap-southeast-2" : { }, "ap-southeast-3" : { }, "ap-southeast-4" : { }, + "ap-southeast-5" : { }, "ca-central-1" : { "variants" : [ { "hostname" : "fsx-fips.ca-central-1.amazonaws.com", @@ -9878,6 +10586,7 @@ "ap-south-1" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, + "ap-southeast-5" : { }, "ca-central-1" : { }, "eu-central-1" : { }, "eu-north-1" : { }, @@ -9991,28 +10700,138 @@ }, "glue" : { "endpoints" : { - "af-south-1" : { }, - "ap-east-1" : { }, - "ap-northeast-1" : { }, - "ap-northeast-2" : { }, - "ap-northeast-3" : { }, - "ap-south-1" : { }, - "ap-south-2" : { }, - "ap-southeast-1" : { }, - "ap-southeast-2" : { }, - "ap-southeast-3" : { }, - "ap-southeast-4" : { }, - "ap-southeast-5" : { }, - "ca-central-1" : { }, - "ca-west-1" : { }, - "eu-central-1" : { }, - "eu-central-2" : { }, - "eu-north-1" : { }, - "eu-south-1" : { }, - "eu-south-2" : { }, - "eu-west-1" : { }, - "eu-west-2" : { }, - "eu-west-3" : { }, + "af-south-1" : { + "variants" : [ { + "hostname" : "glue.af-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-east-1" : { + "variants" : [ { + "hostname" : "glue.ap-east-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-northeast-1" : { + "variants" : [ { + "hostname" : "glue.ap-northeast-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-northeast-2" : { + "variants" : [ { + "hostname" : "glue.ap-northeast-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-northeast-3" : { + "variants" : [ { + "hostname" : "glue.ap-northeast-3.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-south-1" : { + "variants" : [ { + "hostname" : "glue.ap-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-south-2" : { + "variants" : [ { + "hostname" : "glue.ap-south-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-1" : { + "variants" : [ { + "hostname" : "glue.ap-southeast-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-2" : { + "variants" : [ { + "hostname" : "glue.ap-southeast-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-3" : { + "variants" : [ { + "hostname" : "glue.ap-southeast-3.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-4" : { + "variants" : [ { + "hostname" : "glue.ap-southeast-4.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-5" : { + "variants" : [ { + "hostname" : "glue.ap-southeast-5.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ca-central-1" : { + "variants" : [ { + "hostname" : "glue.ca-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ca-west-1" : { + "variants" : [ { + "hostname" : "glue.ca-west-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-central-1" : { + "variants" : [ { + "hostname" : "glue.eu-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-central-2" : { + "variants" : [ { + "hostname" : "glue.eu-central-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-north-1" : { + "variants" : [ { + "hostname" : "glue.eu-north-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-south-1" : { + "variants" : [ { + "hostname" : "glue.eu-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-south-2" : { + "variants" : [ { + "hostname" : "glue.eu-south-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-west-1" : { + "variants" : [ { + "hostname" : "glue.eu-west-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-west-2" : { + "variants" : [ { + "hostname" : "glue.eu-west-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-west-3" : { + "variants" : [ { + "hostname" : "glue.eu-west-3.api.aws", + "tags" : [ "dualstack" ] + } ] + }, "fips-us-east-1" : { "credentialScope" : { "region" : "us-east-1" @@ -10041,32 +10860,76 @@ "deprecated" : true, "hostname" : "glue-fips.us-west-2.amazonaws.com" }, - "il-central-1" : { }, - "me-central-1" : { }, - "me-south-1" : { }, - "sa-east-1" : { }, + "il-central-1" : { + "variants" : [ { + "hostname" : "glue.il-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "me-central-1" : { + "variants" : [ { + "hostname" : "glue.me-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "me-south-1" : { + "variants" : [ { + "hostname" : "glue.me-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "sa-east-1" : { + "variants" : [ { + "hostname" : "glue.sa-east-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, "us-east-1" : { "variants" : [ { "hostname" : "glue-fips.us-east-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "glue-fips.us-east-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "glue.us-east-1.api.aws", + "tags" : [ "dualstack" ] } ] }, "us-east-2" : { "variants" : [ { "hostname" : "glue-fips.us-east-2.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "glue-fips.us-east-2.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "glue.us-east-2.api.aws", + "tags" : [ "dualstack" ] } ] }, "us-west-1" : { "variants" : [ { "hostname" : "glue-fips.us-west-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "glue-fips.us-west-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "glue.us-west-1.api.aws", + "tags" : [ "dualstack" ] } ] }, "us-west-2" : { "variants" : [ { "hostname" : "glue-fips.us-west-2.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "glue-fips.us-west-2.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "glue.us-west-2.api.aws", + "tags" : [ "dualstack" ] } ] } } @@ -10271,6 +11134,7 @@ "ap-southeast-2" : { }, "ap-southeast-3" : { }, "ap-southeast-4" : { }, + "ap-southeast-5" : { }, "ca-central-1" : { }, "ca-west-1" : { }, "eu-central-1" : { }, @@ -10457,6 +11321,7 @@ "ap-southeast-2" : { }, "ap-southeast-3" : { }, "ap-southeast-4" : { }, + "ap-southeast-5" : { }, "ca-central-1" : { }, "ca-west-1" : { }, "eu-central-1" : { }, @@ -10783,6 +11648,9 @@ "ap-southeast-5" : { "hostname" : "internetmonitor.ap-southeast-5.api.aws" }, + "ap-southeast-7" : { + "hostname" : "internetmonitor.ap-southeast-7.api.aws" + }, "ca-central-1" : { "hostname" : "internetmonitor.ca-central-1.api.aws", "variants" : [ { @@ -10872,6 +11740,9 @@ "tags" : [ "dualstack" ] } ] }, + "mx-central-1" : { + "hostname" : "internetmonitor.mx-central-1.api.aws" + }, "sa-east-1" : { "hostname" : "internetmonitor.sa-east-1.api.aws", "variants" : [ { @@ -11199,9 +12070,21 @@ }, "iotfleetwise" : { "endpoints" : { - "ap-south-1" : { }, - "eu-central-1" : { }, - "us-east-1" : { } + "ap-south-1" : { + "variants" : [ { + "tags" : [ "dualstack" ] + } ] + }, + "eu-central-1" : { + "variants" : [ { + "tags" : [ "dualstack" ] + } ] + }, + "us-east-1" : { + "variants" : [ { + "tags" : [ "dualstack" ] + } ] + } } }, "iotsecuredtunneling" : { @@ -11619,6 +12502,7 @@ "ap-southeast-2" : { }, "ap-southeast-3" : { }, "ap-southeast-4" : { }, + "ap-southeast-5" : { }, "ca-central-1" : { "variants" : [ { "hostname" : "kafka-fips.ca-central-1.amazonaws.com", @@ -11839,6 +12723,9 @@ "ap-southeast-5" : { "hostname" : "kendra-ranking.ap-southeast-5.api.aws" }, + "ap-southeast-7" : { + "hostname" : "kendra-ranking.ap-southeast-7.api.aws" + }, "ca-central-1" : { "hostname" : "kendra-ranking.ca-central-1.api.aws", "variants" : [ { @@ -11876,6 +12763,9 @@ "me-south-1" : { "hostname" : "kendra-ranking.me-south-1.api.aws" }, + "mx-central-1" : { + "hostname" : "kendra-ranking.mx-central-1.api.aws" + }, "sa-east-1" : { "hostname" : "kendra-ranking.sa-east-1.api.aws" }, @@ -11919,6 +12809,7 @@ "ap-southeast-3" : { }, "ap-southeast-4" : { }, "ap-southeast-5" : { }, + "ap-southeast-7" : { }, "ca-central-1" : { }, "ca-west-1" : { }, "eu-central-1" : { }, @@ -11960,6 +12851,7 @@ "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, + "mx-central-1" : { }, "sa-east-1" : { }, "us-east-1" : { "variants" : [ { @@ -12278,6 +13170,19 @@ "deprecated" : true, "hostname" : "kms-fips.ap-southeast-5.amazonaws.com" }, + "ap-southeast-7" : { + "variants" : [ { + "hostname" : "kms-fips.ap-southeast-7.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "ap-southeast-7-fips" : { + "credentialScope" : { + "region" : "ap-southeast-7" + }, + "deprecated" : true, + "hostname" : "kms-fips.ap-southeast-7.amazonaws.com" + }, "ca-central-1" : { "variants" : [ { "hostname" : "kms-fips.ca-central-1.amazonaws.com", @@ -12447,6 +13352,19 @@ "deprecated" : true, "hostname" : "kms-fips.me-south-1.amazonaws.com" }, + "mx-central-1" : { + "variants" : [ { + "hostname" : "kms-fips.mx-central-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "mx-central-1-fips" : { + "credentialScope" : { + "region" : "mx-central-1" + }, + "deprecated" : true, + "hostname" : "kms-fips.mx-central-1.amazonaws.com" + }, "sa-east-1" : { "variants" : [ { "hostname" : "kms-fips.sa-east-1.amazonaws.com", @@ -12824,6 +13742,12 @@ "tags" : [ "dualstack" ] } ] }, + "ap-southeast-7" : { + "variants" : [ { + "hostname" : "lambda.ap-southeast-7.api.aws", + "tags" : [ "dualstack" ] + } ] + }, "ca-central-1" : { "variants" : [ { "hostname" : "lambda.ca-central-1.api.aws", @@ -12930,6 +13854,12 @@ "tags" : [ "dualstack" ] } ] }, + "mx-central-1" : { + "variants" : [ { + "hostname" : "lambda.mx-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, "sa-east-1" : { "variants" : [ { "hostname" : "lambda.sa-east-1.api.aws", @@ -12987,6 +13917,7 @@ "ap-southeast-2" : { }, "ap-southeast-3" : { }, "ap-southeast-4" : { }, + "ap-southeast-5" : { }, "ca-central-1" : { }, "ca-west-1" : { }, "eu-central-1" : { }, @@ -13310,6 +14241,12 @@ "tags" : [ "dualstack" ] } ] }, + "ap-southeast-7" : { + "variants" : [ { + "hostname" : "logs.ap-southeast-7.api.aws", + "tags" : [ "dualstack" ] + } ] + }, "ca-central-1" : { "variants" : [ { "hostname" : "logs-fips.ca-central-1.amazonaws.com", @@ -13436,6 +14373,12 @@ "tags" : [ "dualstack" ] } ] }, + "mx-central-1" : { + "variants" : [ { + "hostname" : "logs.mx-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, "sa-east-1" : { "variants" : [ { "hostname" : "logs.sa-east-1.api.aws", @@ -13579,41 +14522,116 @@ }, "macie2" : { "endpoints" : { - "af-south-1" : { }, - "ap-east-1" : { }, - "ap-northeast-1" : { }, - "ap-northeast-2" : { }, - "ap-northeast-3" : { }, - "ap-south-1" : { }, - "ap-southeast-1" : { }, - "ap-southeast-2" : { }, - "ca-central-1" : { }, - "eu-central-1" : { }, - "eu-north-1" : { }, - "eu-south-1" : { }, - "eu-west-1" : { }, - "eu-west-2" : { }, - "eu-west-3" : { }, - "fips-us-east-1" : { - "credentialScope" : { - "region" : "us-east-1" - }, - "deprecated" : true, - "hostname" : "macie2-fips.us-east-1.amazonaws.com" + "af-south-1" : { + "variants" : [ { + "hostname" : "macie2.af-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] }, - "fips-us-east-2" : { - "credentialScope" : { - "region" : "us-east-2" - }, - "deprecated" : true, - "hostname" : "macie2-fips.us-east-2.amazonaws.com" + "ap-east-1" : { + "variants" : [ { + "hostname" : "macie2.ap-east-1.api.aws", + "tags" : [ "dualstack" ] + } ] }, - "fips-us-west-1" : { - "credentialScope" : { - "region" : "us-west-1" - }, - "deprecated" : true, - "hostname" : "macie2-fips.us-west-1.amazonaws.com" + "ap-northeast-1" : { + "variants" : [ { + "hostname" : "macie2.ap-northeast-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-northeast-2" : { + "variants" : [ { + "hostname" : "macie2.ap-northeast-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-northeast-3" : { + "variants" : [ { + "hostname" : "macie2.ap-northeast-3.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-south-1" : { + "variants" : [ { + "hostname" : "macie2.ap-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-1" : { + "variants" : [ { + "hostname" : "macie2.ap-southeast-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-2" : { + "variants" : [ { + "hostname" : "macie2.ap-southeast-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ca-central-1" : { + "variants" : [ { + "hostname" : "macie2.ca-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-central-1" : { + "variants" : [ { + "hostname" : "macie2.eu-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-north-1" : { + "variants" : [ { + "hostname" : "macie2.eu-north-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-south-1" : { + "variants" : [ { + "hostname" : "macie2.eu-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-west-1" : { + "variants" : [ { + "hostname" : "macie2.eu-west-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-west-2" : { + "variants" : [ { + "hostname" : "macie2.eu-west-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-west-3" : { + "variants" : [ { + "hostname" : "macie2.eu-west-3.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "macie2-fips.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "macie2-fips.us-east-2.amazonaws.com" + }, + "fips-us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "deprecated" : true, + "hostname" : "macie2-fips.us-west-1.amazonaws.com" }, "fips-us-west-2" : { "credentialScope" : { @@ -13622,31 +14640,70 @@ "deprecated" : true, "hostname" : "macie2-fips.us-west-2.amazonaws.com" }, - "il-central-1" : { }, - "me-south-1" : { }, - "sa-east-1" : { }, + "il-central-1" : { + "variants" : [ { + "hostname" : "macie2.il-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "me-south-1" : { + "variants" : [ { + "hostname" : "macie2.me-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "sa-east-1" : { + "variants" : [ { + "hostname" : "macie2.sa-east-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, "us-east-1" : { "variants" : [ { "hostname" : "macie2-fips.us-east-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "macie2-fips.us-east-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "macie2.us-east-1.api.aws", + "tags" : [ "dualstack" ] } ] }, "us-east-2" : { "variants" : [ { "hostname" : "macie2-fips.us-east-2.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "macie2-fips.us-east-2.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "macie2.us-east-2.api.aws", + "tags" : [ "dualstack" ] } ] }, "us-west-1" : { "variants" : [ { "hostname" : "macie2-fips.us-west-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "macie2-fips.us-west-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "macie2.us-west-1.api.aws", + "tags" : [ "dualstack" ] } ] }, "us-west-2" : { "variants" : [ { "hostname" : "macie2-fips.us-west-2.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "macie2-fips.us-west-2.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "macie2.us-west-2.api.aws", + "tags" : [ "dualstack" ] } ] } } @@ -13737,25 +14794,96 @@ }, "mediaconvert" : { "endpoints" : { - "af-south-1" : { }, - "ap-northeast-1" : { }, - "ap-northeast-2" : { }, - "ap-northeast-3" : { }, - "ap-south-1" : { }, - "ap-southeast-1" : { }, - "ap-southeast-2" : { }, - "ap-southeast-4" : { }, + "af-south-1" : { + "variants" : [ { + "hostname" : "mediaconvert.af-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-northeast-1" : { + "variants" : [ { + "hostname" : "mediaconvert.ap-northeast-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-northeast-2" : { + "variants" : [ { + "hostname" : "mediaconvert.ap-northeast-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-northeast-3" : { + "variants" : [ { + "hostname" : "mediaconvert.ap-northeast-3.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-south-1" : { + "variants" : [ { + "hostname" : "mediaconvert.ap-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-1" : { + "variants" : [ { + "hostname" : "mediaconvert.ap-southeast-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-2" : { + "variants" : [ { + "hostname" : "mediaconvert.ap-southeast-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-4" : { + "variants" : [ { + "hostname" : "mediaconvert.ap-southeast-4.api.aws", + "tags" : [ "dualstack" ] + } ] + }, "ca-central-1" : { "variants" : [ { "hostname" : "mediaconvert-fips.ca-central-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "mediaconvert-fips.ca-central-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "mediaconvert.ca-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-central-1" : { + "variants" : [ { + "hostname" : "mediaconvert.eu-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-north-1" : { + "variants" : [ { + "hostname" : "mediaconvert.eu-north-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-west-1" : { + "variants" : [ { + "hostname" : "mediaconvert.eu-west-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-west-2" : { + "variants" : [ { + "hostname" : "mediaconvert.eu-west-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-west-3" : { + "variants" : [ { + "hostname" : "mediaconvert.eu-west-3.api.aws", + "tags" : [ "dualstack" ] } ] }, - "eu-central-1" : { }, - "eu-north-1" : { }, - "eu-west-1" : { }, - "eu-west-2" : { }, - "eu-west-3" : { }, "fips-ca-central-1" : { "credentialScope" : { "region" : "ca-central-1" @@ -13791,30 +14919,64 @@ "deprecated" : true, "hostname" : "mediaconvert-fips.us-west-2.amazonaws.com" }, - "me-central-1" : { }, - "sa-east-1" : { }, + "me-central-1" : { + "variants" : [ { + "hostname" : "mediaconvert.me-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "sa-east-1" : { + "variants" : [ { + "hostname" : "mediaconvert.sa-east-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, "us-east-1" : { "variants" : [ { "hostname" : "mediaconvert-fips.us-east-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "mediaconvert-fips.us-east-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "mediaconvert.us-east-1.api.aws", + "tags" : [ "dualstack" ] } ] }, "us-east-2" : { "variants" : [ { "hostname" : "mediaconvert-fips.us-east-2.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "mediaconvert-fips.us-east-2.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "mediaconvert.us-east-2.api.aws", + "tags" : [ "dualstack" ] } ] }, "us-west-1" : { "variants" : [ { "hostname" : "mediaconvert-fips.us-west-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "mediaconvert-fips.us-west-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "mediaconvert.us-west-1.api.aws", + "tags" : [ "dualstack" ] } ] }, "us-west-2" : { "variants" : [ { "hostname" : "mediaconvert-fips.us-west-2.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "mediaconvert-fips.us-west-2.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "mediaconvert.us-west-2.api.aws", + "tags" : [ "dualstack" ] } ] } } @@ -14110,6 +15272,7 @@ "ap-southeast-2" : { }, "ap-southeast-3" : { }, "ap-southeast-4" : { }, + "ap-southeast-5" : { }, "ca-central-1" : { "variants" : [ { "hostname" : "metrics-fips.sagemaker.ca-central-1.amazonaws.com", @@ -14397,6 +15560,7 @@ "ap-southeast-3" : { }, "ap-southeast-4" : { }, "ap-southeast-5" : { }, + "ap-southeast-7" : { }, "ca-central-1" : { }, "ca-west-1" : { }, "eu-central-1" : { }, @@ -14438,6 +15602,7 @@ "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, + "mx-central-1" : { }, "sa-east-1" : { }, "us-east-1" : { "variants" : [ { @@ -14831,6 +15996,9 @@ "ap-southeast-5" : { "hostname" : "notifications.ap-southeast-5.api.aws" }, + "ap-southeast-7" : { + "hostname" : "notifications.ap-southeast-7.api.aws" + }, "ca-central-1" : { "hostname" : "notifications.ca-central-1.api.aws" }, @@ -14870,6 +16038,9 @@ "me-south-1" : { "hostname" : "notifications.me-south-1.api.aws" }, + "mx-central-1" : { + "hostname" : "notifications.mx-central-1.api.aws" + }, "sa-east-1" : { "hostname" : "notifications.sa-east-1.api.aws" }, @@ -14913,6 +16084,7 @@ "ap-southeast-3" : { }, "ap-southeast-4" : { }, "ap-southeast-5" : { }, + "ap-southeast-7" : { }, "ca-central-1" : { }, "ca-west-1" : { }, "eu-central-1" : { }, @@ -14926,6 +16098,7 @@ "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, + "mx-central-1" : { }, "sa-east-1" : { }, "us-east-1" : { }, "us-east-2" : { }, @@ -15200,15 +16373,9 @@ }, "opsworks-cm" : { "endpoints" : { - "ap-northeast-1" : { }, - "ap-southeast-1" : { }, "ap-southeast-2" : { }, - "eu-central-1" : { }, "eu-west-1" : { }, - "us-east-1" : { }, - "us-east-2" : { }, - "us-west-1" : { }, - "us-west-2" : { } + "us-east-1" : { } } }, "organizations" : { @@ -15481,6 +16648,13 @@ "tags" : [ "dualstack" ] } ] }, + "ap-southeast-7" : { + "protocols" : [ "https" ], + "variants" : [ { + "hostname" : "pi.ap-southeast-7.api.aws", + "tags" : [ "dualstack" ] + } ] + }, "ca-central-1" : { "protocols" : [ "https" ], "variants" : [ { @@ -15626,6 +16800,13 @@ "tags" : [ "dualstack" ] } ] }, + "mx-central-1" : { + "protocols" : [ "https" ], + "variants" : [ { + "hostname" : "pi.mx-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, "sa-east-1" : { "protocols" : [ "https" ], "variants" : [ { @@ -16213,17 +17394,6 @@ } } }, - "projects.iot1click" : { - "endpoints" : { - "ap-northeast-1" : { }, - "eu-central-1" : { }, - "eu-west-1" : { }, - "eu-west-2" : { }, - "us-east-1" : { }, - "us-east-2" : { }, - "us-west-2" : { } - } - }, "proton" : { "endpoints" : { "ap-northeast-1" : { }, @@ -16285,6 +17455,9 @@ "ap-southeast-5" : { "hostname" : "qbusiness.ap-southeast-5.api.aws" }, + "ap-southeast-7" : { + "hostname" : "qbusiness.ap-southeast-7.api.aws" + }, "ca-central-1" : { "hostname" : "qbusiness.ca-central-1.api.aws" }, @@ -16324,6 +17497,9 @@ "me-south-1" : { "hostname" : "qbusiness.me-south-1.api.aws" }, + "mx-central-1" : { + "hostname" : "qbusiness.mx-central-1.api.aws" + }, "sa-east-1" : { "hostname" : "qbusiness.sa-east-1.api.aws" }, @@ -16442,6 +17618,7 @@ "ap-southeast-3" : { }, "ap-southeast-4" : { }, "ap-southeast-5" : { }, + "ap-southeast-7" : { }, "ca-central-1" : { "variants" : [ { "hostname" : "ram-fips.ca-central-1.amazonaws.com", @@ -16507,6 +17684,7 @@ "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, + "mx-central-1" : { }, "sa-east-1" : { }, "us-east-1" : { "variants" : [ { @@ -16536,38 +17714,151 @@ }, "rbin" : { "endpoints" : { - "af-south-1" : { }, - "ap-east-1" : { }, - "ap-northeast-1" : { }, - "ap-northeast-2" : { }, - "ap-northeast-3" : { }, - "ap-south-1" : { }, - "ap-south-2" : { }, - "ap-southeast-1" : { }, - "ap-southeast-2" : { }, - "ap-southeast-3" : { }, - "ap-southeast-4" : { }, - "ap-southeast-5" : { }, + "af-south-1" : { + "variants" : [ { + "hostname" : "rbin.af-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-east-1" : { + "variants" : [ { + "hostname" : "rbin.ap-east-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-northeast-1" : { + "variants" : [ { + "hostname" : "rbin.ap-northeast-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-northeast-2" : { + "variants" : [ { + "hostname" : "rbin.ap-northeast-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-northeast-3" : { + "variants" : [ { + "hostname" : "rbin.ap-northeast-3.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-south-1" : { + "variants" : [ { + "hostname" : "rbin.ap-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-south-2" : { + "variants" : [ { + "hostname" : "rbin.ap-south-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-1" : { + "variants" : [ { + "hostname" : "rbin.ap-southeast-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-2" : { + "variants" : [ { + "hostname" : "rbin.ap-southeast-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-3" : { + "variants" : [ { + "hostname" : "rbin.ap-southeast-3.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-4" : { + "variants" : [ { + "hostname" : "rbin.ap-southeast-4.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-5" : { + "variants" : [ { + "hostname" : "rbin.ap-southeast-5.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-7" : { }, "ca-central-1" : { "variants" : [ { "hostname" : "rbin-fips.ca-central-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "rbin-fips.ca-central-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "rbin.ca-central-1.api.aws", + "tags" : [ "dualstack" ] } ] }, "ca-west-1" : { "variants" : [ { "hostname" : "rbin-fips.ca-west-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "rbin-fips.ca-west-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "rbin.ca-west-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-central-1" : { + "variants" : [ { + "hostname" : "rbin.eu-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-central-2" : { + "variants" : [ { + "hostname" : "rbin.eu-central-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-north-1" : { + "variants" : [ { + "hostname" : "rbin.eu-north-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-south-1" : { + "variants" : [ { + "hostname" : "rbin.eu-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-south-2" : { + "variants" : [ { + "hostname" : "rbin.eu-south-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-west-1" : { + "variants" : [ { + "hostname" : "rbin.eu-west-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-west-2" : { + "variants" : [ { + "hostname" : "rbin.eu-west-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-west-3" : { + "variants" : [ { + "hostname" : "rbin.eu-west-3.api.aws", + "tags" : [ "dualstack" ] } ] }, - "eu-central-1" : { }, - "eu-central-2" : { }, - "eu-north-1" : { }, - "eu-south-1" : { }, - "eu-south-2" : { }, - "eu-west-1" : { }, - "eu-west-2" : { }, - "eu-west-3" : { }, "fips-ca-central-1" : { "credentialScope" : { "region" : "ca-central-1" @@ -16610,32 +17901,77 @@ "deprecated" : true, "hostname" : "rbin-fips.us-west-2.amazonaws.com" }, - "il-central-1" : { }, - "me-central-1" : { }, - "me-south-1" : { }, - "sa-east-1" : { }, + "il-central-1" : { + "variants" : [ { + "hostname" : "rbin.il-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "me-central-1" : { + "variants" : [ { + "hostname" : "rbin.me-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "me-south-1" : { + "variants" : [ { + "hostname" : "rbin.me-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "mx-central-1" : { }, + "sa-east-1" : { + "variants" : [ { + "hostname" : "rbin.sa-east-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, "us-east-1" : { "variants" : [ { "hostname" : "rbin-fips.us-east-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "rbin-fips.us-east-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "rbin.us-east-1.api.aws", + "tags" : [ "dualstack" ] } ] }, "us-east-2" : { "variants" : [ { "hostname" : "rbin-fips.us-east-2.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "rbin-fips.us-east-2.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "rbin.us-east-2.api.aws", + "tags" : [ "dualstack" ] } ] }, "us-west-1" : { "variants" : [ { "hostname" : "rbin-fips.us-west-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "rbin-fips.us-west-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "rbin.us-west-1.api.aws", + "tags" : [ "dualstack" ] } ] }, "us-west-2" : { "variants" : [ { "hostname" : "rbin-fips.us-west-2.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "rbin-fips.us-west-2.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "rbin.us-west-2.api.aws", + "tags" : [ "dualstack" ] } ] } } @@ -16654,6 +17990,7 @@ "ap-southeast-3" : { }, "ap-southeast-4" : { }, "ap-southeast-5" : { }, + "ap-southeast-7" : { }, "ca-central-1" : { "variants" : [ { "hostname" : "rds-fips.ca-central-1.amazonaws.com", @@ -16691,6 +18028,7 @@ "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, + "mx-central-1" : { }, "rds-fips.ca-central-1" : { "credentialScope" : { "region" : "ca-central-1" @@ -16929,6 +18267,7 @@ "ap-southeast-3" : { }, "ap-southeast-4" : { }, "ap-southeast-5" : { }, + "ap-southeast-7" : { }, "ca-central-1" : { "variants" : [ { "hostname" : "redshift-fips.ca-central-1.amazonaws.com", @@ -16994,6 +18333,7 @@ "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, + "mx-central-1" : { }, "sa-east-1" : { }, "us-east-1" : { "variants" : [ { @@ -17273,87 +18613,187 @@ }, "resiliencehub" : { "endpoints" : { - "af-south-1" : { }, - "ap-east-1" : { }, - "ap-northeast-1" : { }, - "ap-northeast-2" : { }, - "ap-south-1" : { }, - "ap-southeast-1" : { }, - "ap-southeast-2" : { }, - "ca-central-1" : { }, - "eu-central-1" : { }, - "eu-north-1" : { }, - "eu-south-1" : { }, - "eu-west-1" : { }, - "eu-west-2" : { }, - "eu-west-3" : { }, - "me-south-1" : { }, - "sa-east-1" : { }, - "us-east-1" : { }, - "us-east-2" : { }, - "us-west-1" : { }, - "us-west-2" : { } - } - }, - "resource-explorer-2" : { - "endpoints" : { - "af-south-1" : { }, - "ap-east-1" : { }, - "ap-northeast-1" : { }, - "ap-northeast-2" : { }, - "ap-northeast-3" : { }, - "ap-south-1" : { }, - "ap-south-2" : { }, - "ap-southeast-1" : { }, - "ap-southeast-2" : { }, - "ap-southeast-3" : { }, - "ap-southeast-4" : { }, - "ca-central-1" : { + "af-south-1" : { "variants" : [ { - "hostname" : "resource-explorer-2-fips.ca-central-1.amazonaws.com", - "tags" : [ "fips" ] - }, { - "hostname" : "resource-explorer-2-fips.ca-central-1.api.aws", - "tags" : [ "dualstack", "fips" ] + "hostname" : "resiliencehub.af-south-1.api.aws", + "tags" : [ "dualstack" ] } ] }, - "ca-west-1" : { + "ap-east-1" : { "variants" : [ { - "hostname" : "resource-explorer-2-fips.ca-west-1.amazonaws.com", - "tags" : [ "fips" ] - }, { - "hostname" : "resource-explorer-2-fips.ca-west-1.api.aws", - "tags" : [ "dualstack", "fips" ] + "hostname" : "resiliencehub.ap-east-1.api.aws", + "tags" : [ "dualstack" ] } ] }, - "eu-central-1" : { }, - "eu-central-2" : { }, - "eu-north-1" : { }, - "eu-south-1" : { }, - "eu-south-2" : { }, - "eu-west-1" : { }, - "eu-west-2" : { }, - "eu-west-3" : { }, - "fips-ca-central-1" : { - "credentialScope" : { - "region" : "ca-central-1" - }, - "deprecated" : true, - "hostname" : "resource-explorer-2-fips.ca-central-1.amazonaws.com" - }, - "fips-ca-west-1" : { - "credentialScope" : { - "region" : "ca-west-1" - }, - "deprecated" : true, - "hostname" : "resource-explorer-2-fips.ca-west-1.amazonaws.com" + "ap-northeast-1" : { + "variants" : [ { + "hostname" : "resiliencehub.ap-northeast-1.api.aws", + "tags" : [ "dualstack" ] + } ] }, - "fips-us-east-1" : { - "credentialScope" : { - "region" : "us-east-1" - }, - "deprecated" : true, - "hostname" : "resource-explorer-2-fips.us-east-1.amazonaws.com" + "ap-northeast-2" : { + "variants" : [ { + "hostname" : "resiliencehub.ap-northeast-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-south-1" : { + "variants" : [ { + "hostname" : "resiliencehub.ap-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-1" : { + "variants" : [ { + "hostname" : "resiliencehub.ap-southeast-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-2" : { + "variants" : [ { + "hostname" : "resiliencehub.ap-southeast-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ca-central-1" : { + "variants" : [ { + "hostname" : "resiliencehub.ca-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-central-1" : { + "variants" : [ { + "hostname" : "resiliencehub.eu-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-north-1" : { + "variants" : [ { + "hostname" : "resiliencehub.eu-north-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-south-1" : { + "variants" : [ { + "hostname" : "resiliencehub.eu-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-west-1" : { + "variants" : [ { + "hostname" : "resiliencehub.eu-west-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-west-2" : { + "variants" : [ { + "hostname" : "resiliencehub.eu-west-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-west-3" : { + "variants" : [ { + "hostname" : "resiliencehub.eu-west-3.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "me-south-1" : { + "variants" : [ { + "hostname" : "resiliencehub.me-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "sa-east-1" : { + "variants" : [ { + "hostname" : "resiliencehub.sa-east-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "us-east-1" : { + "variants" : [ { + "hostname" : "resiliencehub.us-east-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "us-east-2" : { + "variants" : [ { + "hostname" : "resiliencehub.us-east-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "us-west-1" : { + "variants" : [ { + "hostname" : "resiliencehub.us-west-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "resiliencehub.us-west-2.api.aws", + "tags" : [ "dualstack" ] + } ] + } + } + }, + "resource-explorer-2" : { + "endpoints" : { + "af-south-1" : { }, + "ap-east-1" : { }, + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-northeast-3" : { }, + "ap-south-1" : { }, + "ap-south-2" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ap-southeast-3" : { }, + "ap-southeast-4" : { }, + "ca-central-1" : { + "variants" : [ { + "hostname" : "resource-explorer-2-fips.ca-central-1.amazonaws.com", + "tags" : [ "fips" ] + }, { + "hostname" : "resource-explorer-2-fips.ca-central-1.api.aws", + "tags" : [ "dualstack", "fips" ] + } ] + }, + "ca-west-1" : { + "variants" : [ { + "hostname" : "resource-explorer-2-fips.ca-west-1.amazonaws.com", + "tags" : [ "fips" ] + }, { + "hostname" : "resource-explorer-2-fips.ca-west-1.api.aws", + "tags" : [ "dualstack", "fips" ] + } ] + }, + "eu-central-1" : { }, + "eu-central-2" : { }, + "eu-north-1" : { }, + "eu-south-1" : { }, + "eu-south-2" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "fips-ca-central-1" : { + "credentialScope" : { + "region" : "ca-central-1" + }, + "deprecated" : true, + "hostname" : "resource-explorer-2-fips.ca-central-1.amazonaws.com" + }, + "fips-ca-west-1" : { + "credentialScope" : { + "region" : "ca-west-1" + }, + "deprecated" : true, + "hostname" : "resource-explorer-2-fips.ca-west-1.amazonaws.com" + }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "resource-explorer-2-fips.us-east-1.amazonaws.com" }, "fips-us-east-2" : { "credentialScope" : { @@ -17432,6 +18872,7 @@ "ap-southeast-3" : { }, "ap-southeast-4" : { }, "ap-southeast-5" : { }, + "ap-southeast-7" : { }, "ca-central-1" : { }, "ca-west-1" : { }, "eu-central-1" : { }, @@ -17473,6 +18914,7 @@ "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, + "mx-central-1" : { }, "sa-east-1" : { }, "us-east-1" : { "variants" : [ { @@ -17524,6 +18966,8 @@ "ap-southeast-2" : { }, "ap-southeast-3" : { }, "ap-southeast-4" : { }, + "ap-southeast-5" : { }, + "ap-southeast-7" : { }, "ca-central-1" : { }, "ca-west-1" : { }, "eu-central-1" : { }, @@ -17565,6 +19009,7 @@ "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, + "mx-central-1" : { }, "sa-east-1" : { }, "us-east-1" : { "variants" : [ { @@ -17647,6 +19092,7 @@ "ap-southeast-3" : { }, "ap-southeast-4" : { }, "ap-southeast-5" : { }, + "ap-southeast-7" : { }, "ca-central-1" : { "variants" : [ { "hostname" : "route53resolver-fips.ca-central-1.amazonaws.com", @@ -17684,6 +19130,7 @@ "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, + "mx-central-1" : { }, "sa-east-1" : { }, "us-east-1" : { "variants" : [ { @@ -18009,6 +19456,12 @@ "tags" : [ "dualstack" ] } ] }, + "ap-southeast-7" : { + "variants" : [ { + "hostname" : "s3.dualstack.ap-southeast-7.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, "aws-global" : { "credentialScope" : { "region" : "us-east-1" @@ -18150,6 +19603,12 @@ "tags" : [ "dualstack" ] } ] }, + "mx-central-1" : { + "variants" : [ { + "hostname" : "s3.dualstack.mx-central-1.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, "s3-external-1" : { "credentialScope" : { "region" : "us-east-1" @@ -18954,6 +20413,11 @@ "tags" : [ "dualstack" ] } ] }, + "ap-southeast-7" : { + "variants" : [ { + "tags" : [ "dualstack" ] + } ] + }, "ca-central-1" : { "variants" : [ { "tags" : [ "dualstack" ] @@ -19033,6 +20497,11 @@ "tags" : [ "dualstack" ] } ] }, + "mx-central-1" : { + "variants" : [ { + "tags" : [ "dualstack" ] + } ] + }, "sa-east-1" : { "variants" : [ { "tags" : [ "dualstack" ] @@ -19744,6 +21213,12 @@ "tags" : [ "dualstack" ] } ] }, + "ap-southeast-7" : { + "variants" : [ { + "hostname" : "servicediscovery.ap-southeast-7.api.aws", + "tags" : [ "dualstack" ] + } ] + }, "ca-central-1" : { "variants" : [ { "hostname" : "servicediscovery-fips.ca-central-1.amazonaws.com", @@ -19848,6 +21323,12 @@ "tags" : [ "dualstack" ] } ] }, + "mx-central-1" : { + "variants" : [ { + "hostname" : "servicediscovery.mx-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, "sa-east-1" : { "variants" : [ { "hostname" : "servicediscovery.sa-east-1.api.aws", @@ -19949,6 +21430,7 @@ "ap-southeast-3" : { }, "ap-southeast-4" : { }, "ap-southeast-5" : { }, + "ap-southeast-7" : { }, "ca-central-1" : { }, "ca-west-1" : { }, "eu-central-1" : { }, @@ -19962,6 +21444,7 @@ "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, + "mx-central-1" : { }, "sa-east-1" : { }, "us-east-1" : { }, "us-east-2" : { }, @@ -20386,80 +21869,215 @@ }, "snowball" : { "endpoints" : { - "af-south-1" : { }, - "ap-east-1" : { }, + "af-south-1" : { + "variants" : [ { + "hostname" : "snowball-fips.af-south-1.amazonaws.com", + "tags" : [ "fips" ] + }, { + "hostname" : "snowball-fips.af-south-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "snowball.af-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-east-1" : { + "variants" : [ { + "hostname" : "snowball-fips.ap-east-1.amazonaws.com", + "tags" : [ "fips" ] + }, { + "hostname" : "snowball-fips.ap-east-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "snowball.ap-east-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, "ap-northeast-1" : { "variants" : [ { "hostname" : "snowball-fips.ap-northeast-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "snowball-fips.ap-northeast-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "snowball.ap-northeast-1.api.aws", + "tags" : [ "dualstack" ] } ] }, "ap-northeast-2" : { "variants" : [ { "hostname" : "snowball-fips.ap-northeast-2.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "snowball-fips.ap-northeast-2.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "snowball.ap-northeast-2.api.aws", + "tags" : [ "dualstack" ] } ] }, "ap-northeast-3" : { "variants" : [ { "hostname" : "snowball-fips.ap-northeast-3.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "snowball-fips.ap-northeast-3.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "snowball.ap-northeast-3.api.aws", + "tags" : [ "dualstack" ] } ] }, "ap-south-1" : { "variants" : [ { "hostname" : "snowball-fips.ap-south-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "snowball-fips.ap-south-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "snowball.ap-south-1.api.aws", + "tags" : [ "dualstack" ] } ] }, "ap-southeast-1" : { "variants" : [ { "hostname" : "snowball-fips.ap-southeast-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "snowball-fips.ap-southeast-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "snowball.ap-southeast-1.api.aws", + "tags" : [ "dualstack" ] } ] }, "ap-southeast-2" : { "variants" : [ { "hostname" : "snowball-fips.ap-southeast-2.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "snowball-fips.ap-southeast-2.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "snowball.ap-southeast-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-3" : { + "variants" : [ { + "hostname" : "snowball-fips.ap-southeast-3.amazonaws.com", + "tags" : [ "fips" ] + }, { + "hostname" : "snowball-fips.ap-southeast-3.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "snowball.ap-southeast-3.api.aws", + "tags" : [ "dualstack" ] } ] }, - "ap-southeast-3" : { }, "ca-central-1" : { "variants" : [ { "hostname" : "snowball-fips.ca-central-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "snowball-fips.ca-central-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "snowball.ca-central-1.api.aws", + "tags" : [ "dualstack" ] } ] }, "eu-central-1" : { "variants" : [ { "hostname" : "snowball-fips.eu-central-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "snowball-fips.eu-central-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "snowball.eu-central-1.api.aws", + "tags" : [ "dualstack" ] } ] }, - "eu-north-1" : { }, - "eu-south-1" : { }, - "eu-west-1" : { + "eu-north-1" : { "variants" : [ { - "hostname" : "snowball-fips.eu-west-1.amazonaws.com", + "hostname" : "snowball-fips.eu-north-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "snowball-fips.eu-north-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "snowball.eu-north-1.api.aws", + "tags" : [ "dualstack" ] } ] }, - "eu-west-2" : { + "eu-south-1" : { "variants" : [ { - "hostname" : "snowball-fips.eu-west-2.amazonaws.com", + "hostname" : "snowball-fips.eu-south-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "snowball-fips.eu-south-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "snowball.eu-south-1.api.aws", + "tags" : [ "dualstack" ] } ] }, - "eu-west-3" : { + "eu-west-1" : { "variants" : [ { - "hostname" : "snowball-fips.eu-west-3.amazonaws.com", + "hostname" : "snowball-fips.eu-west-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "snowball-fips.eu-west-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "snowball.eu-west-1.api.aws", + "tags" : [ "dualstack" ] } ] }, - "fips-ap-northeast-1" : { - "credentialScope" : { - "region" : "ap-northeast-1" + "eu-west-2" : { + "variants" : [ { + "hostname" : "snowball-fips.eu-west-2.amazonaws.com", + "tags" : [ "fips" ] + }, { + "hostname" : "snowball-fips.eu-west-2.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "snowball.eu-west-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-west-3" : { + "variants" : [ { + "hostname" : "snowball-fips.eu-west-3.amazonaws.com", + "tags" : [ "fips" ] + }, { + "hostname" : "snowball-fips.eu-west-3.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "snowball.eu-west-3.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "fips-af-south-1" : { + "credentialScope" : { + "region" : "af-south-1" + }, + "deprecated" : true, + "hostname" : "snowball-fips.af-south-1.amazonaws.com" + }, + "fips-ap-east-1" : { + "credentialScope" : { + "region" : "ap-east-1" + }, + "deprecated" : true, + "hostname" : "snowball-fips.ap-east-1.amazonaws.com" + }, + "fips-ap-northeast-1" : { + "credentialScope" : { + "region" : "ap-northeast-1" }, "deprecated" : true, "hostname" : "snowball-fips.ap-northeast-1.amazonaws.com" @@ -20499,6 +22117,13 @@ "deprecated" : true, "hostname" : "snowball-fips.ap-southeast-2.amazonaws.com" }, + "fips-ap-southeast-3" : { + "credentialScope" : { + "region" : "ap-southeast-3" + }, + "deprecated" : true, + "hostname" : "snowball-fips.ap-southeast-3.amazonaws.com" + }, "fips-ca-central-1" : { "credentialScope" : { "region" : "ca-central-1" @@ -20513,6 +22138,20 @@ "deprecated" : true, "hostname" : "snowball-fips.eu-central-1.amazonaws.com" }, + "fips-eu-north-1" : { + "credentialScope" : { + "region" : "eu-north-1" + }, + "deprecated" : true, + "hostname" : "snowball-fips.eu-north-1.amazonaws.com" + }, + "fips-eu-south-1" : { + "credentialScope" : { + "region" : "eu-south-1" + }, + "deprecated" : true, + "hostname" : "snowball-fips.eu-south-1.amazonaws.com" + }, "fips-eu-west-1" : { "credentialScope" : { "region" : "eu-west-1" @@ -20534,6 +22173,20 @@ "deprecated" : true, "hostname" : "snowball-fips.eu-west-3.amazonaws.com" }, + "fips-il-central-1" : { + "credentialScope" : { + "region" : "il-central-1" + }, + "deprecated" : true, + "hostname" : "snowball-fips.il-central-1.amazonaws.com" + }, + "fips-me-central-1" : { + "credentialScope" : { + "region" : "me-central-1" + }, + "deprecated" : true, + "hostname" : "snowball-fips.me-central-1.amazonaws.com" + }, "fips-sa-east-1" : { "credentialScope" : { "region" : "sa-east-1" @@ -20569,36 +22222,88 @@ "deprecated" : true, "hostname" : "snowball-fips.us-west-2.amazonaws.com" }, - "il-central-1" : { }, - "me-central-1" : { }, + "il-central-1" : { + "variants" : [ { + "hostname" : "snowball-fips.il-central-1.amazonaws.com", + "tags" : [ "fips" ] + }, { + "hostname" : "snowball-fips.il-central-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "snowball.il-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "me-central-1" : { + "variants" : [ { + "hostname" : "snowball-fips.me-central-1.amazonaws.com", + "tags" : [ "fips" ] + }, { + "hostname" : "snowball-fips.me-central-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "snowball.me-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, "sa-east-1" : { "variants" : [ { "hostname" : "snowball-fips.sa-east-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "snowball-fips.sa-east-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "snowball.sa-east-1.api.aws", + "tags" : [ "dualstack" ] } ] }, "us-east-1" : { "variants" : [ { "hostname" : "snowball-fips.us-east-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "snowball-fips.us-east-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "snowball.us-east-1.api.aws", + "tags" : [ "dualstack" ] } ] }, "us-east-2" : { "variants" : [ { "hostname" : "snowball-fips.us-east-2.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "snowball-fips.us-east-2.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "snowball.us-east-2.api.aws", + "tags" : [ "dualstack" ] } ] }, "us-west-1" : { "variants" : [ { "hostname" : "snowball-fips.us-west-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "snowball-fips.us-west-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "snowball.us-west-1.api.aws", + "tags" : [ "dualstack" ] } ] }, "us-west-2" : { "variants" : [ { "hostname" : "snowball-fips.us-west-2.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "snowball-fips.us-west-2.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "snowball.us-west-2.api.aws", + "tags" : [ "dualstack" ] } ] } } @@ -20620,6 +22325,7 @@ "ap-southeast-3" : { }, "ap-southeast-4" : { }, "ap-southeast-5" : { }, + "ap-southeast-7" : { }, "ca-central-1" : { }, "ca-west-1" : { "variants" : [ { @@ -20673,6 +22379,7 @@ "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, + "mx-central-1" : { }, "sa-east-1" : { }, "us-east-1" : { "variants" : [ { @@ -20718,6 +22425,7 @@ "ap-southeast-3" : { }, "ap-southeast-4" : { }, "ap-southeast-5" : { }, + "ap-southeast-7" : { }, "ca-central-1" : { }, "ca-west-1" : { }, "eu-central-1" : { }, @@ -20759,6 +22467,7 @@ "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, + "mx-central-1" : { }, "sa-east-1" : { }, "us-east-1" : { "sslCommonName" : "queue.{dnsSuffix}", @@ -20801,6 +22510,7 @@ "ap-southeast-3" : { }, "ap-southeast-4" : { }, "ap-southeast-5" : { }, + "ap-southeast-7" : { }, "ca-central-1" : { "variants" : [ { "hostname" : "ssm-fips.ca-central-1.amazonaws.com", @@ -20866,6 +22576,7 @@ "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, + "mx-central-1" : { }, "sa-east-1" : { }, "us-east-1" : { "variants" : [ { @@ -21123,31 +22834,132 @@ }, "ssm-sap" : { "endpoints" : { - "af-south-1" : { }, - "ap-east-1" : { }, - "ap-northeast-1" : { }, - "ap-northeast-2" : { }, - "ap-northeast-3" : { }, - "ap-south-1" : { }, - "ap-south-2" : { }, - "ap-southeast-1" : { }, - "ap-southeast-2" : { }, - "ap-southeast-3" : { }, - "ap-southeast-4" : { }, + "af-south-1" : { + "variants" : [ { + "hostname" : "ssm-sap.af-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-east-1" : { + "variants" : [ { + "hostname" : "ssm-sap.ap-east-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-northeast-1" : { + "variants" : [ { + "hostname" : "ssm-sap.ap-northeast-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-northeast-2" : { + "variants" : [ { + "hostname" : "ssm-sap.ap-northeast-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-northeast-3" : { + "variants" : [ { + "hostname" : "ssm-sap.ap-northeast-3.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-south-1" : { + "variants" : [ { + "hostname" : "ssm-sap.ap-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-south-2" : { + "variants" : [ { + "hostname" : "ssm-sap.ap-south-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-1" : { + "variants" : [ { + "hostname" : "ssm-sap.ap-southeast-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-2" : { + "variants" : [ { + "hostname" : "ssm-sap.ap-southeast-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-3" : { + "variants" : [ { + "hostname" : "ssm-sap.ap-southeast-3.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-4" : { + "variants" : [ { + "hostname" : "ssm-sap.ap-southeast-4.api.aws", + "tags" : [ "dualstack" ] + } ] + }, "ca-central-1" : { "variants" : [ { "hostname" : "ssm-sap-fips.ca-central-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "ssm-sap-fips.ca-central-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "ssm-sap.ca-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-central-1" : { + "variants" : [ { + "hostname" : "ssm-sap.eu-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-central-2" : { + "variants" : [ { + "hostname" : "ssm-sap.eu-central-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-north-1" : { + "variants" : [ { + "hostname" : "ssm-sap.eu-north-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-south-1" : { + "variants" : [ { + "hostname" : "ssm-sap.eu-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-south-2" : { + "variants" : [ { + "hostname" : "ssm-sap.eu-south-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-west-1" : { + "variants" : [ { + "hostname" : "ssm-sap.eu-west-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-west-2" : { + "variants" : [ { + "hostname" : "ssm-sap.eu-west-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-west-3" : { + "variants" : [ { + "hostname" : "ssm-sap.eu-west-3.api.aws", + "tags" : [ "dualstack" ] } ] }, - "eu-central-1" : { }, - "eu-central-2" : { }, - "eu-north-1" : { }, - "eu-south-1" : { }, - "eu-south-2" : { }, - "eu-west-1" : { }, - "eu-west-2" : { }, - "eu-west-3" : { }, "fips-ca-central-1" : { "credentialScope" : { "region" : "ca-central-1" @@ -21183,32 +22995,76 @@ "deprecated" : true, "hostname" : "ssm-sap-fips.us-west-2.amazonaws.com" }, - "il-central-1" : { }, - "me-central-1" : { }, - "me-south-1" : { }, - "sa-east-1" : { }, + "il-central-1" : { + "variants" : [ { + "hostname" : "ssm-sap.il-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "me-central-1" : { + "variants" : [ { + "hostname" : "ssm-sap.me-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "me-south-1" : { + "variants" : [ { + "hostname" : "ssm-sap.me-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "sa-east-1" : { + "variants" : [ { + "hostname" : "ssm-sap.sa-east-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, "us-east-1" : { "variants" : [ { "hostname" : "ssm-sap-fips.us-east-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "ssm-sap-fips.us-east-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "ssm-sap.us-east-1.api.aws", + "tags" : [ "dualstack" ] } ] }, "us-east-2" : { "variants" : [ { "hostname" : "ssm-sap-fips.us-east-2.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "ssm-sap-fips.us-east-2.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "ssm-sap.us-east-2.api.aws", + "tags" : [ "dualstack" ] } ] }, "us-west-1" : { "variants" : [ { "hostname" : "ssm-sap-fips.us-west-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "ssm-sap-fips.us-west-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "ssm-sap.us-west-1.api.aws", + "tags" : [ "dualstack" ] } ] }, "us-west-2" : { "variants" : [ { "hostname" : "ssm-sap-fips.us-west-2.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "ssm-sap-fips.us-west-2.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "ssm-sap.us-west-2.api.aws", + "tags" : [ "dualstack" ] } ] } } @@ -21260,6 +23116,7 @@ "ap-southeast-3" : { }, "ap-southeast-4" : { }, "ap-southeast-5" : { }, + "ap-southeast-7" : { }, "ca-central-1" : { }, "ca-west-1" : { }, "eu-central-1" : { }, @@ -21301,6 +23158,7 @@ "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, + "mx-central-1" : { }, "sa-east-1" : { }, "us-east-1" : { "variants" : [ { @@ -21454,6 +23312,7 @@ "ap-southeast-3" : { }, "ap-southeast-4" : { }, "ap-southeast-5" : { }, + "ap-southeast-7" : { }, "ca-central-1" : { }, "ca-west-1" : { }, "eu-central-1" : { }, @@ -21474,6 +23333,7 @@ }, "me-central-1" : { }, "me-south-1" : { }, + "mx-central-1" : { }, "sa-east-1" : { }, "us-east-1" : { }, "us-east-2" : { }, @@ -21495,6 +23355,7 @@ "ap-southeast-3" : { }, "ap-southeast-4" : { }, "ap-southeast-5" : { }, + "ap-southeast-7" : { }, "aws-global" : { "credentialScope" : { "region" : "us-east-1" @@ -21514,6 +23375,7 @@ "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, + "mx-central-1" : { }, "sa-east-1" : { }, "us-east-1" : { "variants" : [ { @@ -21602,6 +23464,7 @@ "ap-southeast-3" : { }, "ap-southeast-4" : { }, "ap-southeast-5" : { }, + "ap-southeast-7" : { }, "ca-central-1" : { "variants" : [ { "hostname" : "swf-fips.ca-central-1.amazonaws.com", @@ -21667,6 +23530,7 @@ "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, + "mx-central-1" : { }, "sa-east-1" : { }, "us-east-1" : { "variants" : [ { @@ -21696,38 +23560,150 @@ }, "synthetics" : { "endpoints" : { - "af-south-1" : { }, - "ap-east-1" : { }, - "ap-northeast-1" : { }, - "ap-northeast-2" : { }, - "ap-northeast-3" : { }, - "ap-south-1" : { }, - "ap-south-2" : { }, - "ap-southeast-1" : { }, - "ap-southeast-2" : { }, - "ap-southeast-3" : { }, - "ap-southeast-4" : { }, - "ap-southeast-5" : { }, + "af-south-1" : { + "variants" : [ { + "hostname" : "synthetics.af-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-east-1" : { + "variants" : [ { + "hostname" : "synthetics.ap-east-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-northeast-1" : { + "variants" : [ { + "hostname" : "synthetics.ap-northeast-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-northeast-2" : { + "variants" : [ { + "hostname" : "synthetics.ap-northeast-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-northeast-3" : { + "variants" : [ { + "hostname" : "synthetics.ap-northeast-3.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-south-1" : { + "variants" : [ { + "hostname" : "synthetics.ap-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-south-2" : { + "variants" : [ { + "hostname" : "synthetics.ap-south-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-1" : { + "variants" : [ { + "hostname" : "synthetics.ap-southeast-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-2" : { + "variants" : [ { + "hostname" : "synthetics.ap-southeast-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-3" : { + "variants" : [ { + "hostname" : "synthetics.ap-southeast-3.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-4" : { + "variants" : [ { + "hostname" : "synthetics.ap-southeast-4.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-5" : { + "variants" : [ { + "hostname" : "synthetics.ap-southeast-5.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-7" : { + "variants" : [ { + "hostname" : "synthetics.ap-southeast-7.api.aws", + "tags" : [ "dualstack" ] + } ] + }, "ca-central-1" : { "variants" : [ { - "hostname" : "synthetics-fips.ca-central-1.amazonaws.com", - "tags" : [ "fips" ] + "hostname" : "synthetics-fips.ca-central-1.amazonaws.com", + "tags" : [ "fips" ] + }, { + "hostname" : "synthetics.ca-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ca-west-1" : { + "variants" : [ { + "hostname" : "synthetics-fips.ca-west-1.amazonaws.com", + "tags" : [ "fips" ] + }, { + "hostname" : "synthetics.ca-west-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-central-1" : { + "variants" : [ { + "hostname" : "synthetics.eu-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-central-2" : { + "variants" : [ { + "hostname" : "synthetics.eu-central-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-north-1" : { + "variants" : [ { + "hostname" : "synthetics.eu-north-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-south-1" : { + "variants" : [ { + "hostname" : "synthetics.eu-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-south-2" : { + "variants" : [ { + "hostname" : "synthetics.eu-south-2.api.aws", + "tags" : [ "dualstack" ] } ] }, - "ca-west-1" : { + "eu-west-1" : { "variants" : [ { - "hostname" : "synthetics-fips.ca-west-1.amazonaws.com", - "tags" : [ "fips" ] + "hostname" : "synthetics.eu-west-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-west-2" : { + "variants" : [ { + "hostname" : "synthetics.eu-west-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-west-3" : { + "variants" : [ { + "hostname" : "synthetics.eu-west-3.api.aws", + "tags" : [ "dualstack" ] } ] }, - "eu-central-1" : { }, - "eu-central-2" : { }, - "eu-north-1" : { }, - "eu-south-1" : { }, - "eu-south-2" : { }, - "eu-west-1" : { }, - "eu-west-2" : { }, - "eu-west-3" : { }, "fips-ca-central-1" : { "credentialScope" : { "region" : "ca-central-1" @@ -21770,32 +23746,70 @@ "deprecated" : true, "hostname" : "synthetics-fips.us-west-2.amazonaws.com" }, - "il-central-1" : { }, - "me-central-1" : { }, - "me-south-1" : { }, - "sa-east-1" : { }, + "il-central-1" : { + "variants" : [ { + "hostname" : "synthetics.il-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "me-central-1" : { + "variants" : [ { + "hostname" : "synthetics.me-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "me-south-1" : { + "variants" : [ { + "hostname" : "synthetics.me-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "mx-central-1" : { + "variants" : [ { + "hostname" : "synthetics.mx-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "sa-east-1" : { + "variants" : [ { + "hostname" : "synthetics.sa-east-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, "us-east-1" : { "variants" : [ { "hostname" : "synthetics-fips.us-east-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "synthetics.us-east-1.api.aws", + "tags" : [ "dualstack" ] } ] }, "us-east-2" : { "variants" : [ { "hostname" : "synthetics-fips.us-east-2.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "synthetics.us-east-2.api.aws", + "tags" : [ "dualstack" ] } ] }, "us-west-1" : { "variants" : [ { "hostname" : "synthetics-fips.us-west-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "synthetics.us-west-1.api.aws", + "tags" : [ "dualstack" ] } ] }, "us-west-2" : { "variants" : [ { "hostname" : "synthetics-fips.us-west-2.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "synthetics.us-west-2.api.aws", + "tags" : [ "dualstack" ] } ] } } @@ -21814,6 +23828,7 @@ "ap-southeast-3" : { }, "ap-southeast-4" : { }, "ap-southeast-5" : { }, + "ap-southeast-7" : { }, "ca-central-1" : { }, "ca-west-1" : { }, "eu-central-1" : { }, @@ -21827,6 +23842,7 @@ "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, + "mx-central-1" : { }, "sa-east-1" : { }, "us-east-1" : { }, "us-east-2" : { }, @@ -23851,6 +25867,7 @@ "ap-southeast-3" : { }, "ap-southeast-4" : { }, "ap-southeast-5" : { }, + "ap-southeast-7" : { }, "ca-central-1" : { }, "ca-west-1" : { }, "eu-central-1" : { }, @@ -23892,6 +25909,7 @@ "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, + "mx-central-1" : { }, "sa-east-1" : { }, "us-east-1" : { "variants" : [ { @@ -23997,13 +26015,21 @@ "credentialScope" : { "region" : "cn-north-1" }, - "hostname" : "api.ecr.cn-north-1.amazonaws.com.cn" + "hostname" : "api.ecr.cn-north-1.amazonaws.com.cn", + "variants" : [ { + "hostname" : "ecr.cn-north-1.api.amazonwebservices.com.cn", + "tags" : [ "dualstack" ] + } ] }, "cn-northwest-1" : { "credentialScope" : { "region" : "cn-northwest-1" }, - "hostname" : "api.ecr.cn-northwest-1.amazonaws.com.cn" + "hostname" : "api.ecr.cn-northwest-1.amazonaws.com.cn", + "variants" : [ { + "hostname" : "ecr.cn-northwest-1.api.amazonwebservices.com.cn", + "tags" : [ "dualstack" ] + } ] } } }, @@ -24024,9 +26050,34 @@ } }, "api.tunneling.iot" : { + "defaults" : { + "variants" : [ { + "dnsSuffix" : "amazonaws.com.cn", + "hostname" : "api.tunneling.iot-fips.{region}.{dnsSuffix}", + "tags" : [ "fips" ] + }, { + "dnsSuffix" : "api.amazonwebservices.com.cn", + "hostname" : "api.iot-tunneling-fips.{region}.{dnsSuffix}", + "tags" : [ "dualstack", "fips" ] + }, { + "dnsSuffix" : "api.amazonwebservices.com.cn", + "hostname" : "api.iot-tunneling.{region}.{dnsSuffix}", + "tags" : [ "dualstack" ] + } ] + }, "endpoints" : { - "cn-north-1" : { }, - "cn-northwest-1" : { } + "cn-north-1" : { + "variants" : [ { + "hostname" : "api.iot-tunneling.cn-north-1.api.amazonwebservices.com.cn", + "tags" : [ "dualstack" ] + } ] + }, + "cn-northwest-1" : { + "variants" : [ { + "hostname" : "api.iot-tunneling.cn-northwest-1.api.amazonwebservices.com.cn", + "tags" : [ "dualstack" ] + } ] + } } }, "apigateway" : { @@ -24309,8 +26360,18 @@ }, "datasync" : { "endpoints" : { - "cn-north-1" : { }, - "cn-northwest-1" : { } + "cn-north-1" : { + "variants" : [ { + "hostname" : "datasync.cn-north-1.api.amazonwebservices.com.cn", + "tags" : [ "dualstack" ] + } ] + }, + "cn-northwest-1" : { + "variants" : [ { + "hostname" : "datasync.cn-northwest-1.api.amazonwebservices.com.cn", + "tags" : [ "dualstack" ] + } ] + } } }, "datazone" : { @@ -24345,8 +26406,18 @@ }, "dlm" : { "endpoints" : { - "cn-north-1" : { }, - "cn-northwest-1" : { } + "cn-north-1" : { + "variants" : [ { + "hostname" : "dlm.cn-north-1.api.amazonwebservices.com.cn", + "tags" : [ "dualstack" ] + } ] + }, + "cn-northwest-1" : { + "variants" : [ { + "hostname" : "dlm.cn-northwest-1.api.amazonwebservices.com.cn", + "tags" : [ "dualstack" ] + } ] + } } }, "dms" : { @@ -24601,8 +26672,18 @@ }, "glue" : { "endpoints" : { - "cn-north-1" : { }, - "cn-northwest-1" : { } + "cn-north-1" : { + "variants" : [ { + "hostname" : "glue.cn-north-1.api.amazonwebservices.com.cn", + "tags" : [ "dualstack" ] + } ] + }, + "cn-northwest-1" : { + "variants" : [ { + "hostname" : "glue.cn-northwest-1.api.amazonwebservices.com.cn", + "tags" : [ "dualstack" ] + } ] + } } }, "greengrass" : { @@ -24836,10 +26917,10 @@ "mediaconvert" : { "endpoints" : { "cn-northwest-1" : { - "credentialScope" : { - "region" : "cn-northwest-1" - }, - "hostname" : "mediaconvert.cn-northwest-1.amazonaws.com.cn" + "variants" : [ { + "hostname" : "mediaconvert.cn-northwest-1.api.amazonwebservices.com.cn", + "tags" : [ "dualstack" ] + } ] } } }, @@ -25030,8 +27111,18 @@ }, "rbin" : { "endpoints" : { - "cn-north-1" : { }, - "cn-northwest-1" : { } + "cn-north-1" : { + "variants" : [ { + "hostname" : "rbin.cn-north-1.api.amazonwebservices.com.cn", + "tags" : [ "dualstack" ] + } ] + }, + "cn-northwest-1" : { + "variants" : [ { + "hostname" : "rbin.cn-northwest-1.api.amazonwebservices.com.cn", + "tags" : [ "dualstack" ] + } ] + } } }, "rds" : { @@ -25381,8 +27472,18 @@ }, "synthetics" : { "endpoints" : { - "cn-north-1" : { }, - "cn-northwest-1" : { } + "cn-north-1" : { + "variants" : [ { + "hostname" : "synthetics.cn-north-1.api.amazonwebservices.com.cn", + "tags" : [ "dualstack" ] + } ] + }, + "cn-northwest-1" : { + "variants" : [ { + "hostname" : "synthetics.cn-northwest-1.api.amazonwebservices.com.cn", + "tags" : [ "dualstack" ] + } ] + } } }, "tagging" : { @@ -25742,6 +27843,12 @@ "variants" : [ { "hostname" : "ecr-fips.us-gov-east-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "ecr-fips.us-gov-east-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "ecr.us-gov-east-1.api.aws", + "tags" : [ "dualstack" ] } ] }, "us-gov-west-1" : { @@ -25752,6 +27859,12 @@ "variants" : [ { "hostname" : "ecr-fips.us-gov-west-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "ecr-fips.us-gov-west-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "ecr.us-gov-west-1.api.aws", + "tags" : [ "dualstack" ] } ] } } @@ -25800,8 +27913,17 @@ "api.tunneling.iot" : { "defaults" : { "variants" : [ { + "dnsSuffix" : "amazonaws.com", "hostname" : "api.tunneling.iot-fips.{region}.{dnsSuffix}", "tags" : [ "fips" ] + }, { + "dnsSuffix" : "api.aws", + "hostname" : "api.iot-tunneling-fips.{region}.{dnsSuffix}", + "tags" : [ "dualstack", "fips" ] + }, { + "dnsSuffix" : "api.aws", + "hostname" : "api.iot-tunneling.{region}.{dnsSuffix}", + "tags" : [ "dualstack" ] } ] }, "endpoints" : { @@ -25821,12 +27943,24 @@ }, "us-gov-east-1" : { "variants" : [ { + "hostname" : "api.iot-tunneling-fips.us-gov-east-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "api.iot-tunneling.us-gov-east-1.api.aws", + "tags" : [ "dualstack" ] + }, { "hostname" : "api.tunneling.iot-fips.us-gov-east-1.amazonaws.com", "tags" : [ "fips" ] } ] }, "us-gov-west-1" : { "variants" : [ { + "hostname" : "api.iot-tunneling-fips.us-gov-west-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "api.iot-tunneling.us-gov-west-1.api.aws", + "tags" : [ "dualstack" ] + }, { "hostname" : "api.tunneling.iot-fips.us-gov-west-1.amazonaws.com", "tags" : [ "fips" ] } ] @@ -26549,8 +28683,14 @@ }, "us-gov-west-1" : { "variants" : [ { + "hostname" : "cognito-idp-fips.us-gov-west-1.amazonaws.com", + "tags" : [ "dualstack", "fips" ] + }, { "hostname" : "cognito-idp-fips.us-gov-west-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "cognito-idp.us-gov-west-1.amazonaws.com", + "tags" : [ "dualstack" ] } ] } } @@ -26830,12 +28970,24 @@ "variants" : [ { "hostname" : "datasync-fips.us-gov-east-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "datasync-fips.us-gov-east-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "datasync.us-gov-east-1.api.aws", + "tags" : [ "dualstack" ] } ] }, "us-gov-west-1" : { "variants" : [ { "hostname" : "datasync-fips.us-gov-west-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "datasync-fips.us-gov-west-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "datasync.us-gov-west-1.api.aws", + "tags" : [ "dualstack" ] } ] } } @@ -28609,6 +30761,12 @@ "variants" : [ { "hostname" : "mediaconvert.us-gov-west-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "mediaconvert.us-gov-west-1.api.aws", + "tags" : [ "dualstack" ] + }, { + "hostname" : "mediaconvert.us-gov-west-1.api.aws", + "tags" : [ "dualstack", "fips" ] } ] } } @@ -29163,12 +31321,24 @@ "variants" : [ { "hostname" : "rbin-fips.us-gov-east-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "rbin-fips.us-gov-east-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "rbin.us-gov-east-1.api.aws", + "tags" : [ "dualstack" ] } ] }, "us-gov-west-1" : { "variants" : [ { "hostname" : "rbin-fips.us-gov-west-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "rbin-fips.us-gov-west-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "rbin.us-gov-west-1.api.aws", + "tags" : [ "dualstack" ] } ] } } @@ -29323,12 +31493,24 @@ "variants" : [ { "hostname" : "resiliencehub-fips.us-gov-east-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "resiliencehub-fips.us-gov-east-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "resiliencehub.us-gov-east-1.api.aws", + "tags" : [ "dualstack" ] } ] }, "us-gov-west-1" : { "variants" : [ { "hostname" : "resiliencehub-fips.us-gov-west-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "resiliencehub-fips.us-gov-west-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "resiliencehub.us-gov-west-1.api.aws", + "tags" : [ "dualstack" ] } ] } } @@ -30903,6 +33085,24 @@ "us-iso-east-1" : { } } }, + "budgets" : { + "endpoints" : { + "aws-iso-global" : { + "credentialScope" : { + "region" : "us-iso-east-1" + }, + "hostname" : "budgets.c2s.ic.gov" + }, + "us-iso-east-1" : { + "credentialScope" : { + "region" : "us-iso-east-1" + }, + "hostname" : "budgets.c2s.ic.gov" + } + }, + "isRegionalized" : false, + "partitionEndpoint" : "aws-iso-global" + }, "cloudcontrolapi" : { "endpoints" : { "us-iso-east-1" : { }, @@ -30962,7 +33162,19 @@ "protocols" : [ "https" ] }, "endpoints" : { - "us-iso-east-1" : { } + "fips-us-iso-east-1" : { + "credentialScope" : { + "region" : "us-iso-east-1" + }, + "deprecated" : true, + "hostname" : "comprehend-fips.us-iso-east-1.c2s.ic.gov" + }, + "us-iso-east-1" : { + "variants" : [ { + "hostname" : "comprehend-fips.us-iso-east-1.c2s.ic.gov", + "tags" : [ "fips" ] + } ] + } } }, "config" : { @@ -31389,6 +33601,18 @@ "us-iso-east-1" : { } } }, + "organizations" : { + "endpoints" : { + "aws-iso-global" : { + "credentialScope" : { + "region" : "us-iso-east-1" + }, + "hostname" : "organizations.us-iso-east-1.c2s.ic.gov" + } + }, + "isRegionalized" : false, + "partitionEndpoint" : "aws-iso-global" + }, "outposts" : { "endpoints" : { "us-iso-east-1" : { } diff --git a/models/fms.json b/models/fms.json index 2cb7332f6c..7158767a1f 100644 --- a/models/fms.json +++ b/models/fms.json @@ -6153,6 +6153,12 @@ "traits": { "smithy.api#documentation": "

Indicates whether the policy is in or out of an admin's policy or Region scope.

\n
    \n
  • \n

    \n ACTIVE - The administrator can manage and delete the policy.

    \n
  • \n
  • \n

    \n OUT_OF_ADMIN_SCOPE - The administrator can view the policy, but they can't edit or delete the policy. Existing policy protections stay in place. Any new resources that come into scope of the policy won't be protected.

    \n
  • \n
" } + }, + "ResourceTagLogicalOperator": { + "target": "com.amazonaws.fms#ResourceTagLogicalOperator", + "traits": { + "smithy.api#documentation": "

Specifies whether to combine multiple resource tags with AND, \n so that a resource must have all tags to be included or excluded, or OR, \n so that a resource must have at least one tag.

\n

Default: AND\n

" + } } }, "traits": { @@ -7407,7 +7413,7 @@ } }, "traits": { - "smithy.api#documentation": "

The resource tags that Firewall Manager uses to determine if a particular resource\n should be included or excluded from the Firewall Manager policy. Tags enable you to\n categorize your Amazon Web Services resources in different ways, for example, by purpose, owner, or\n environment. Each tag consists of a key and an optional value. Firewall Manager combines the\n tags with \"AND\" so that, if you add more than one tag to a policy scope, a resource must have\n all the specified tags to be included or excluded. For more information, see\n Working with Tag Editor.

\n

Every resource tag must have a string value, either a non-empty string or an empty string. If you don't \n provide a value for a resource tag, Firewall Manager saves the value as an empty string: \"\". When Firewall Manager compares tags, it only \n matches two tags if they have the same key and the same value. A tag with an empty string value only \n matches with tags that also have an empty string value.

" + "smithy.api#documentation": "

The resource tags that Firewall Manager uses to determine if a particular resource\n should be included or excluded from the Firewall Manager policy. Tags enable you to\n categorize your Amazon Web Services resources in different ways, for example, by purpose, owner, or\n environment. Each tag consists of a key and an optional value. If you add more than one tag to a policy, you can \n specify whether to combine them using the logical AND operator or the logical OR operator. For more information, see\n Working with Tag Editor.

\n

Every resource tag must have a string value, either a non-empty string or an empty string. If you don't \n provide a value for a resource tag, Firewall Manager saves the value as an empty string: \"\". When Firewall Manager compares tags, it only \n matches two tags if they have the same key and the same value. A tag with an empty string value only \n matches with tags that also have an empty string value.

" } }, "com.amazonaws.fms#ResourceTagKey": { @@ -7417,7 +7423,24 @@ "min": 1, "max": 128 }, - "smithy.api#pattern": "^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)$" + "smithy.api#pattern": "^([\\p{L}\\p{Z}\\p{N}_.:\\/=+\\-@*\\\\]*)$" + } + }, + "com.amazonaws.fms#ResourceTagLogicalOperator": { + "type": "enum", + "members": { + "AND": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "AND" + } + }, + "OR": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "OR" + } + } } }, "com.amazonaws.fms#ResourceTagValue": { @@ -7427,7 +7450,7 @@ "min": 0, "max": 256 }, - "smithy.api#pattern": "^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)$" + "smithy.api#pattern": "^([\\p{L}\\p{Z}\\p{N}_.:\\/=+\\-@*\\\\]*)$" } }, "com.amazonaws.fms#ResourceTags": { diff --git a/models/gamelift.json b/models/gamelift.json index 6f776de74c..e9f7ef65ee 100644 --- a/models/gamelift.json +++ b/models/gamelift.json @@ -285,7 +285,7 @@ } }, "traits": { - "smithy.api#documentation": "

Amazon Web Services account security credentials that allow interactions with Amazon GameLift resources. The\n credentials are temporary and valid for a limited time span. You can request fresh\n credentials at any time.

\n

Amazon Web Services security credentials consist of three parts: an access key ID, a secret access\n key, and a session token. You must use all three parts together to authenticate your\n access requests.

\n

You need Amazon Web Services credentials for the following tasks:

\n
    \n
  • \n

    To upload a game server build directly to Amazon GameLift S3 storage using CreateBuild. To get access for this\n task, call RequestUploadCredentials.

    \n
  • \n
  • \n

    To remotely connect to an active Amazon GameLift fleet instances. \n To get remote access, call GetComputeAccess.

    \n
  • \n
", + "smithy.api#documentation": "

Amazon Web Services account security credentials that allow interactions with Amazon GameLift resources. The\n credentials are temporary and valid for a limited time span. You can request fresh\n credentials at any time.

\n

Amazon Web Services security credentials consist of three parts: an access key ID, a secret access\n key, and a session token. You must use all three parts together to authenticate your\n access requests.

\n

You need Amazon Web Services credentials for the following tasks:

\n ", "smithy.api#sensitive": {} } }, @@ -448,7 +448,7 @@ "target": "com.amazonaws.gamelift#CertificateType", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

Indicates whether a TLS/SSL certificate is generated for a fleet.

\n

Valid values include:

\n
    \n
  • \n

    \n GENERATED -- Generate a TLS/SSL certificate\n for this fleet.

    \n
  • \n
  • \n

    \n DISABLED -- (default) Do not generate a\n TLS/SSL certificate for this fleet.

    \n
  • \n
", + "smithy.api#documentation": "

Indicates whether a TLS/SSL certificate is generated for a fleet.

\n

Valid values include:

\n
    \n
  • \n

    \n GENERATED - Generate a TLS/SSL certificate\n for this fleet.

    \n
  • \n
  • \n

    \n DISABLED - (default) Do not generate a\n TLS/SSL certificate for this fleet.

    \n
  • \n
", "smithy.api#required": {} } } @@ -846,7 +846,7 @@ } }, "traits": { - "smithy.api#documentation": "

A unique identifier for a container in a container fleet compute.

\n

\n Returned by:\n DescribeCompute\n

" + "smithy.api#documentation": "

A unique identifier for a container in a container fleet compute.

\n

\n Returned by:\n DescribeCompute\n

" } }, "com.amazonaws.gamelift#ContainerAttributes": { @@ -894,7 +894,7 @@ } }, "traits": { - "smithy.api#documentation": "

A container's dependency on another container in the same container group. The dependency\n impacts how the dependent container is able to start or shut down based the status of the\n other container.

\n

For example, ContainerA is configured with the following dependency: a\n START dependency on ContainerB. This means that\n ContainerA can't start until ContainerB has\n started. It also means that ContainerA must shut down before\n ContainerB.

\n

eiifcbfhgrdurhnucnufkgbnbnnerrvbtjvljdetkehcPart of:\n GameServerContainerDefinition, GameServerContainerDefinitionInput, SupportContainerDefinition, SupportContainerDefinitionInput\n

" + "smithy.api#documentation": "

A container's dependency on another container in the same container group. The dependency\n impacts how the dependent container is able to start or shut down based the status of the\n other container.

\n

For example, ContainerA is configured with the following dependency: a\n START dependency on ContainerB. This means that\n ContainerA can't start until ContainerB has\n started. It also means that ContainerA must shut down before\n ContainerB.

\n

\n Part of:\n GameServerContainerDefinition, \n GameServerContainerDefinitionInput, \n SupportContainerDefinition, \n SupportContainerDefinitionInput\n

" } }, "com.amazonaws.gamelift#ContainerDependencyCondition": { @@ -959,7 +959,7 @@ } }, "traits": { - "smithy.api#documentation": "

An environment variable to set inside a container, in the form of a key-value pair.\n

\n

\n Part of:\n GameServerContainerDefinition, GameServerContainerDefinitionInput, SupportContainerDefinition, SupportContainerDefinitionInput\n

" + "smithy.api#documentation": "

An environment variable to set inside a container, in the form of a key-value pair.\n

\n

\n Part of:\n GameServerContainerDefinition, \n GameServerContainerDefinitionInput, \n SupportContainerDefinition, \n SupportContainerDefinitionInput\n

" } }, "com.amazonaws.gamelift#ContainerEnvironmentList": { @@ -990,7 +990,7 @@ } }, "FleetRoleArn": { - "target": "com.amazonaws.gamelift#ArnStringModel", + "target": "com.amazonaws.gamelift#IamRoleArn", "traits": { "smithy.api#documentation": "

The unique identifier for an Identity and Access Management (IAM) role with permissions to run your\n containers on resources that are managed by Amazon GameLift. See Set up an IAM service\n role. This fleet property can't be changed.

" } @@ -1362,12 +1362,12 @@ "StatusReason": { "target": "com.amazonaws.gamelift#NonZeroAndMaxString", "traits": { - "smithy.api#documentation": "

Additional information about a container group definition that's in FAILED\n status. Possible reasons include:

\n
    \n
  • \n

    An internal issue prevented Amazon GameLift from creating\n the container group definition resource. Delete the failed resource and call \n CreateContainerGroupDefinitionagain.

    \n
  • \n
  • \n

    An access-denied message means that you don't have permissions to access the container image on ECR. See \n \n IAM permission examples\n for help setting up required IAM permissions for Amazon GameLift.

    \n
  • \n
  • \n

    The ImageUri value for at least one\n of the containers in the container group definition was invalid or not found in the current\n Amazon Web Services account.

    \n
  • \n
  • \n

    At least one\n of the container images referenced in the container group definition exceeds the \n allowed size. For size limits, see \n Amazon GameLift endpoints and quotas.

    \n
  • \n
  • \n

    At least one of the container images referenced in the \n container group definition uses a different operating system than the one defined for the container group.

    \n
  • \n
" + "smithy.api#documentation": "

Additional information about a container group definition that's in FAILED\n status. Possible reasons include:

\n
    \n
  • \n

    An internal issue prevented Amazon GameLift from creating\n the container group definition resource. Delete the failed resource and call \n CreateContainerGroupDefinitionagain.

    \n
  • \n
  • \n

    An access-denied message means that you don't have permissions to access the container image on ECR. See \n \n IAM permission examples\n for help setting up required IAM permissions for Amazon GameLift.

    \n
  • \n
  • \n

    The ImageUri value for at least one\n of the containers in the container group definition was invalid or not found in the current\n Amazon Web Services account.

    \n
  • \n
  • \n

    At least one\n of the container images referenced in the container group definition exceeds the \n allowed size. For size limits, see \n Amazon GameLift endpoints and quotas.

    \n
  • \n
  • \n

    At least one of the container images referenced in the \n container group definition uses a different operating system than the one defined for the container group.

    \n
  • \n
" } } }, "traits": { - "smithy.api#documentation": "

The properties that describe a container group resource. You can update all properties of\n a container group definition properties. Updates to a container group definition are saved as\n new versions.

\n

\n Used with:\n CreateContainerGroupDefinition\n

\n

\n Returned by:\n DescribeContainerGroupDefinition, ListContainerGroupDefinitions, UpdateContainerGroupDefinition\n

" + "smithy.api#documentation": "

The properties that describe a container group resource. You can update all properties of\n a container group definition properties. Updates to a container group definition are saved as\n new versions.

\n

\n Used with:\n CreateContainerGroupDefinition\n

\n

\n Returned by:\n DescribeContainerGroupDefinition, \n ListContainerGroupDefinitions, \n UpdateContainerGroupDefinition\n

" } }, "com.amazonaws.gamelift#ContainerGroupDefinitionArn": { @@ -1483,7 +1483,7 @@ } }, "traits": { - "smithy.api#documentation": "

Instructions on when and how to check the health of a support container in a container\n fleet. These properties override any Docker health checks that are set in the container image.\n For more information on container health checks, see HealthCheck command in the Amazon Elastic Container Service API. Game server\n containers don't have a health check parameter; Amazon GameLift automatically handles health checks\n for these containers.

\n

The following example instructs the container to initiate a health check command every 60\n seconds and wait 10 seconds for it to succeed. If it fails, retry the command 3 times before\n flagging the container as unhealthy. It also tells the container to wait 100 seconds after\n launch before counting failed health checks.

\n

\n {\"Command\": [ \"CMD-SHELL\", \"ps cax | grep \"processmanager\" || exit 1\" ], \"Interval\":\n 60, \"Timeout\": 10, \"Retries\": 3, \"StartPeriod\": 100 }\n

\n

\n Part of:\n SupportContainerDefinition, SupportContainerDefinitionInput\n

" + "smithy.api#documentation": "

Instructions on when and how to check the health of a support container in a container\n fleet. These properties override any Docker health checks that are set in the container image.\n For more information on container health checks, see HealthCheck command in the Amazon Elastic Container Service API. Game server\n containers don't have a health check parameter; Amazon GameLift automatically handles health checks\n for these containers.

\n

The following example instructs the container to initiate a health check command every 60\n seconds and wait 10 seconds for it to succeed. If it fails, retry the command 3 times before\n flagging the container as unhealthy. It also tells the container to wait 100 seconds after\n launch before counting failed health checks.

\n

\n {\"Command\": [ \"CMD-SHELL\", \"ps cax | grep \"processmanager\" || exit 1\" ], \"Interval\":\n 60, \"Timeout\": 10, \"Retries\": 3, \"StartPeriod\": 100 }\n

\n

\n Part of:\n SupportContainerDefinition, SupportContainerDefinitionInput\n

" } }, "com.amazonaws.gamelift#ContainerHealthCheckInterval": { @@ -1539,7 +1539,7 @@ } }, "traits": { - "smithy.api#documentation": "

A unique identifier for a container in a compute on a managed container fleet instance.\n This information makes it possible to remotely connect to a specific container on a fleet\n instance.

\n

\n Related to:\n ContainerAttribute\n

\n

\n Use with: \n GetComputeAccess\n

" + "smithy.api#documentation": "

A unique identifier for a container in a compute on a managed container fleet instance.\n This information makes it possible to remotely connect to a specific container on a fleet\n instance.

\n

\n Related to:\n ContainerAttribute\n

\n

\n Use with: \n GetComputeAccess\n

" } }, "com.amazonaws.gamelift#ContainerIdentifierList": { @@ -1588,7 +1588,7 @@ } }, "traits": { - "smithy.api#documentation": "

A mount point that binds a container to a file or directory on the host system.

\n

\n Part of:\n GameServerContainerDefinition, GameServerContainerDefinitionInput, SupportContainerDefinition, SupportContainerDefinitionInput\n

" + "smithy.api#documentation": "

A mount point that binds a container to a file or directory on the host system.

\n

\n Part of:\n GameServerContainerDefinition, https://docs.aws.amazon.com/gamelift/latest/apireference/API_GameServerContainerDefinitionInput.html, SupportContainerDefinition, https://docs.aws.amazon.com/gamelift/latest/apireference/API_SupportContainerDefinitionInput.html\n

" } }, "com.amazonaws.gamelift#ContainerMountPointAccessLevel": { @@ -1654,7 +1654,7 @@ } }, "traits": { - "smithy.api#documentation": "

A set of port ranges that can be opened on the container. A process that's running in the\n container can bind to a port number, making it accessible to inbound traffic. Container ports\n map to a container fleet's connection ports.

\n

\n Part of:\n GameServerContainerDefinition, GameServerContainerDefinitionInput, SupportContainerDefinition, SupportContainerDefinitionInput\n

" + "smithy.api#documentation": "

A set of port ranges that can be opened on the container. A process that's running in the\n container can bind to a port number, making it accessible to inbound traffic. Container ports\n map to a container fleet's connection ports.

\n

\n Part of:\n GameServerContainerDefinition, \n GameServerContainerDefinitionInput, \n SupportContainerDefinition, \n SupportContainerDefinitionInput\n

" } }, "com.amazonaws.gamelift#ContainerPortRange": { @@ -1686,7 +1686,7 @@ } }, "traits": { - "smithy.api#documentation": "

A set of one or more port numbers that can be opened on the container. \n

\n

\n Part of:\n ContainerPortConfiguration\n

" + "smithy.api#documentation": "

A set of one or more port numbers that can be opened on the container. \n

\n

\n Part of:\n ContainerPortConfiguration\n

" } }, "com.amazonaws.gamelift#ContainerPortRangeList": { @@ -1836,7 +1836,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates an Amazon GameLift build resource for your game server software and stores the software\n for deployment to hosting resources. Combine game server binaries and dependencies into\n a single .zip file

\n \n

Use the CLI command \n upload-build\n to quickly and simply create a new build\n and upload your game build .zip file to Amazon GameLift Amazon S3. This helper command eliminates\n the need to explicitly manage access permissions.

\n
\n

Alternatively, use the CreateBuild action for the following\n scenarios:

\n
    \n
  • \n

    You want to create a build and upload a game build zip file from in an Amazon S3\n location that you control. In this scenario, you need to give Amazon GameLift permission\n to access to the Amazon S3 bucket. With permission in place, call\n CreateBuild and specify a build name, the build's runtime\n operating system, and the Amazon S3 storage location where the build file is\n stored.

    \n
  • \n
  • \n

    You want to create a build and upload a local game build zip file to an Amazon S3\n location that's controlled by Amazon GameLift. (See the upload-build CLI\n command for this scenario.) In this scenario, you need to request temporary\n access credentials to the Amazon GameLift Amazon S3 location. Specify a build name and the\n build's runtime operating system. The response provides an Amazon S3 location and a\n set of temporary access credentials. Use the credentials to upload your build\n files to the specified Amazon S3 location (see Uploading Objects in\n the Amazon S3 Developer Guide). You can't update build files\n after uploading them to Amazon GameLift Amazon S3.

    \n
  • \n
\n

If successful, this action creates a new build resource with a unique build ID and\n places it in INITIALIZED status. When the build reaches READY\n status, you can create fleets with it.

\n

\n Learn more\n

\n

\n Uploading Your\n Game\n

\n

\n Create a Build with Files in Amazon S3\n

\n

\n All APIs by task\n

" + "smithy.api#documentation": "

Creates a new Amazon GameLift build resource for your game server binary files. Combine game\n server binaries into a zip file for use with Amazon GameLift.

\n \n

When setting up a new game build for Amazon GameLift, we recommend using the CLI command \n upload-build\n . This helper command combines two tasks: (1) it\n uploads your build files from a file directory to an Amazon GameLift Amazon S3 location, and (2)\n it creates a new build resource.

\n
\n

You can use the CreateBuild operation in the following scenarios:

\n
    \n
  • \n

    Create a new game build with build files that are in an Amazon S3 location under an\n Amazon Web Services account that you control. To use this option, you give Amazon GameLift access to\n the Amazon S3 bucket. With permissions in place, specify a build name, operating\n system, and the Amazon S3 storage location of your game build.

    \n
  • \n
  • \n

    Upload your build files to a Amazon GameLift Amazon S3 location. To use this option,\n specify a build name and operating system. This operation creates a new build\n resource and also returns an Amazon S3 location with temporary access credentials.\n Use the credentials to manually upload your build files to the specified Amazon S3\n location. For more information, see Uploading Objects in\n the Amazon S3 Developer Guide. After you upload build files to\n the Amazon GameLift Amazon S3 location, you can't update them.

    \n
  • \n
\n

If successful, this operation creates a new build resource with a unique build ID and\n places it in INITIALIZED status. A build must be in READY\n status before you can create fleets with it.

\n

\n Learn more\n

\n

\n Uploading Your\n Game\n

\n

\n Create a Build with Files in Amazon S3\n

\n

\n All APIs by task\n

" } }, "com.amazonaws.gamelift#CreateBuildInput": { @@ -1863,7 +1863,7 @@ "OperatingSystem": { "target": "com.amazonaws.gamelift#OperatingSystem", "traits": { - "smithy.api#documentation": "

The environment that your game server binaries run on. This value determines the type\n of fleet resources that you use for this build. If your game build contains multiple\n executables, they all must run on the same operating system. This parameter is required,\n and there's no default value. You can't change a build's operating system later.

\n \n

Amazon Linux 2 (AL2) will reach end of support on 6/30/2025. See more details in \n the Amazon Linux 2 FAQs. \n For game servers\n that are hosted on AL2 and use Amazon GameLift server SDK 4.x., first update the\n game server build to server SDK 5.x, and then deploy to AL2023 instances. See\n \n Migrate to Amazon GameLift server SDK version 5.\n

\n
" + "smithy.api#documentation": "

The operating system that your game server binaries run on. This value determines the\n type of fleet resources that you use for this build. If your game build contains\n multiple executables, they all must run on the same operating system. You must specify a\n valid operating system in this request. There is no default value. You can't change a\n build's operating system later.

\n \n

Amazon Linux 2 (AL2) will reach end of support on 6/30/2025. See more details in \n the Amazon Linux 2 FAQs. \n For game servers\n that are hosted on AL2 and use Amazon GameLift server SDK 4.x., first update the\n game server build to server SDK 5.x, and then deploy to AL2023 instances. See\n \n Migrate to Amazon GameLift server SDK version 5.\n

\n
" } }, "Tags": { @@ -1948,7 +1948,7 @@ "type": "structure", "members": { "FleetRoleArn": { - "target": "com.amazonaws.gamelift#ArnStringModel", + "target": "com.amazonaws.gamelift#IamRoleArn", "traits": { "smithy.api#clientOptional": {}, "smithy.api#documentation": "

The unique identifier for an Identity and Access Management (IAM) role with permissions to run your\n containers on resources that are managed by Amazon GameLift. Use an IAM service role with the\n GameLiftContainerFleetPolicy managed policy attached. For more\n information, see Set up an IAM service\n role. You can't change this fleet property after the fleet is\n created.

\n

IAM role ARN values use the following pattern: arn:aws:iam::[Amazon Web Services account]:role/[role name].

", @@ -1964,13 +1964,13 @@ "GameServerContainerGroupDefinitionName": { "target": "com.amazonaws.gamelift#ContainerGroupDefinitionNameOrArn", "traits": { - "smithy.api#documentation": "

A container group definition resource that describes how to deploy containers with\n your game server build and support software onto each fleet instance. You can specify\n the container group definition's name to use the latest version. Alternatively, provide\n an ARN value with a specific version number.

\n

Create a container group definition by calling CreateContainerGroupDefinition. This operation creates a ContainerGroupDefinition resource.

" + "smithy.api#documentation": "

A container group definition resource that describes how to deploy containers with\n your game server build and support software onto each fleet instance. You can specify\n the container group definition's name to use the latest version. Alternatively, provide\n an ARN value with a specific version number.

\n

Create a container group definition by calling \n CreateContainerGroupDefinition. \n This operation creates a \n ContainerGroupDefinition resource.

" } }, "PerInstanceContainerGroupDefinitionName": { "target": "com.amazonaws.gamelift#ContainerGroupDefinitionNameOrArn", "traits": { - "smithy.api#documentation": "

The name of a container group definition resource that describes a set of axillary\n software. A fleet instance has one process for executables in this container group. A\n per-instance container group is optional. You can update the fleet to add or remove a\n per-instance container group at any time. You can specify the container group\n definition's name to use the latest version. Alternatively, provide an ARN value with a\n specific version number.

\n

Create a container group definition by calling CreateContainerGroupDefinition. \n This operation creates a ContainerGroupDefinition resource.

" + "smithy.api#documentation": "

The name of a container group definition resource that describes a set of axillary\n software. A fleet instance has one process for executables in this container group. A\n per-instance container group is optional. You can update the fleet to add or remove a\n per-instance container group at any time. You can specify the container group\n definition's name to use the latest version. Alternatively, provide an ARN value with a\n specific version number.

\n

Create a container group definition by calling \n https://docs.aws.amazon.com/gamelift/latest/apireference/API_CreateContainerGroupDefinition.html. \n This operation creates a \n https://docs.aws.amazon.com/gamelift/latest/apireference/API_ContainerGroupDefinition.html resource.

" } }, "InstanceConnectionPortRange": { @@ -2090,7 +2090,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates a ContainerGroupDefinition that describes a set of containers for\n hosting your game server with Amazon GameLift managed containers hosting. An Amazon GameLift container group\n is similar to a container task or pod. Use container group definitions when you create a\n container fleet with CreateContainerFleet.

\n

A container group definition determines how Amazon GameLift deploys your containers to each\n instance in a container fleet. You can maintain multiple versions of a container group\n definition.

\n

There are two types of container groups:

\n
    \n
  • \n

    A game server container group has the containers that run\n your game server application and supporting software. A game server container group can\n have these container types:

    \n
      \n
    • \n

      Game server container. This container runs your game server. You can define one\n game server container in a game server container group.

      \n
    • \n
    • \n

      Support container. This container runs software in parallel with your game server.\n You can define up to 8 support containers in a game server group.

      \n
    • \n
    \n

    When building a game server container group definition, you can choose to bundle your\n game server executable and all dependent software into a single game server container.\n Alternatively, you can separate the software into one game server container and one or\n more support containers.

    \n

    On a container fleet instance, a game server container group can be deployed multiple\n times (depending on the compute resources of the instance). This means that all containers\n in the container group are replicated together.

    \n
  • \n
  • \n

    A per-instance container group has containers for processes\n that aren't replicated on a container fleet instance. This might include background\n services, logging, test processes, or processes that need to persist independently of the\n game server container group. When building a per-instance container group, you can define\n up to 10 support containers.

    \n
  • \n
\n \n

This operation requires Identity and Access Management (IAM) permissions to access container images in\n Amazon ECR repositories. See IAM permissions\n for Amazon GameLift for help setting the appropriate permissions.

\n
\n

\n Request options\n

\n

Use this operation to make the following types of requests. You can specify values for the\n minimum required parameters and customize optional values later.

\n
    \n
  • \n

    Create a game server container group definition. Provide the following required parameter values:

    \n
      \n
    • \n

      \n Name\n

      \n
    • \n
    • \n

      \n ContainerGroupType (GAME_SERVER)

      \n
    • \n
    • \n

      \n OperatingSystem (omit to use default value)

      \n
    • \n
    • \n

      \n TotalMemoryLimitMebibytes (omit to use default value)

      \n
    • \n
    • \n

      \n TotalVcpuLimit (omit to use default value)

      \n
    • \n
    • \n

      At least one GameServerContainerDefinition\n

      \n
        \n
      • \n

        \n ContainerName\n

        \n
      • \n
      • \n

        \n ImageUrl\n

        \n
      • \n
      • \n

        \n PortConfiguration\n

        \n
      • \n
      • \n

        \n ServerSdkVersion (omit to use default value)

        \n
      • \n
      \n
    • \n
    \n
  • \n
  • \n

    Create a per-instance container group definition. Provide the following required parameter\n values:

    \n
      \n
    • \n

      \n Name\n

      \n
    • \n
    • \n

      \n ContainerGroupType (PER_INSTANCE)

      \n
    • \n
    • \n

      \n OperatingSystem (omit to use default value)

      \n
    • \n
    • \n

      \n TotalMemoryLimitMebibytes (omit to use default value)

      \n
    • \n
    • \n

      \n TotalVcpuLimit (omit to use default value)

      \n
    • \n
    • \n

      At least one SupportContainerDefinition\n

      \n
        \n
      • \n

        \n ContainerName\n

        \n
      • \n
      • \n

        \n ImageUrl\n

        \n
      • \n
      \n
    • \n
    \n
  • \n
\n

\n Results\n

\n

If successful, this request creates a ContainerGroupDefinition resource and\n assigns a unique ARN value. You can update most properties of a container group definition by\n calling UpdateContainerGroupDefinition, and optionally save the update as a new version.

" + "smithy.api#documentation": "

Creates a ContainerGroupDefinition that describes a set of containers for\n hosting your game server with Amazon GameLift managed containers hosting. An Amazon GameLift container group\n is similar to a container task or pod. Use container group definitions when you create a\n container fleet with CreateContainerFleet.

\n

A container group definition determines how Amazon GameLift deploys your containers to each\n instance in a container fleet. You can maintain multiple versions of a container group\n definition.

\n

There are two types of container groups:

\n
    \n
  • \n

    A game server container group has the containers that run\n your game server application and supporting software. A game server container group can\n have these container types:

    \n
      \n
    • \n

      Game server container. This container runs your game server. You can define one\n game server container in a game server container group.

      \n
    • \n
    • \n

      Support container. This container runs software in parallel with your game server.\n You can define up to 8 support containers in a game server group.

      \n
    • \n
    \n

    When building a game server container group definition, you can choose to bundle your\n game server executable and all dependent software into a single game server container.\n Alternatively, you can separate the software into one game server container and one or\n more support containers.

    \n

    On a container fleet instance, a game server container group can be deployed multiple\n times (depending on the compute resources of the instance). This means that all containers\n in the container group are replicated together.

    \n
  • \n
  • \n

    A per-instance container group has containers for processes\n that aren't replicated on a container fleet instance. This might include background\n services, logging, test processes, or processes that need to persist independently of the\n game server container group. When building a per-instance container group, you can define\n up to 10 support containers.

    \n
  • \n
\n \n

This operation requires Identity and Access Management (IAM) permissions to access container images in\n Amazon ECR repositories. See IAM permissions\n for Amazon GameLift for help setting the appropriate permissions.

\n
\n

\n Request options\n

\n

Use this operation to make the following types of requests. You can specify values for the\n minimum required parameters and customize optional values later.

\n
    \n
  • \n

    Create a game server container group definition. Provide the following required parameter values:

    \n
      \n
    • \n

      \n Name\n

      \n
    • \n
    • \n

      \n ContainerGroupType (GAME_SERVER)

      \n
    • \n
    • \n

      \n OperatingSystem (omit to use default value)

      \n
    • \n
    • \n

      \n TotalMemoryLimitMebibytes (omit to use default value)

      \n
    • \n
    • \n

      \n TotalVcpuLimit (omit to use default value)

      \n
    • \n
    • \n

      At least one GameServerContainerDefinition\n

      \n
        \n
      • \n

        \n ContainerName\n

        \n
      • \n
      • \n

        \n ImageUrl\n

        \n
      • \n
      • \n

        \n PortConfiguration\n

        \n
      • \n
      • \n

        \n ServerSdkVersion (omit to use default value)

        \n
      • \n
      \n
    • \n
    \n
  • \n
  • \n

    Create a per-instance container group definition. Provide the following required parameter\n values:

    \n
      \n
    • \n

      \n Name\n

      \n
    • \n
    • \n

      \n ContainerGroupType (PER_INSTANCE)

      \n
    • \n
    • \n

      \n OperatingSystem (omit to use default value)

      \n
    • \n
    • \n

      \n TotalMemoryLimitMebibytes (omit to use default value)

      \n
    • \n
    • \n

      \n TotalVcpuLimit (omit to use default value)

      \n
    • \n
    • \n

      At least one SupportContainerDefinition\n

      \n
        \n
      • \n

        \n ContainerName\n

        \n
      • \n
      • \n

        \n ImageUrl\n

        \n
      • \n
      \n
    • \n
    \n
  • \n
\n

\n Results\n

\n

If successful, this request creates a ContainerGroupDefinition resource and\n assigns a unique ARN value. You can update most properties of a container group definition by\n calling UpdateContainerGroupDefinition, and optionally save the update as a new version.

" } }, "com.amazonaws.gamelift#CreateContainerGroupDefinitionInput": { @@ -2274,7 +2274,7 @@ "EC2InboundPermissions": { "target": "com.amazonaws.gamelift#IpPermissionsList", "traits": { - "smithy.api#documentation": "

The IP address ranges and port settings that allow inbound traffic to access game\n server processes and other processes on this fleet. Set this parameter for managed EC2 \n fleets. You can leave this parameter empty when creating the fleet, but you must call \n UpdateFleetPortSettings to set it before players can connect to game sessions. \n As a best practice, we recommend \n opening ports for remote access only when you need them and closing them when you're finished. \n For Realtime Servers fleets, Amazon GameLift automatically sets TCP and UDP ranges.

" + "smithy.api#documentation": "

The IP address ranges and port settings that allow inbound traffic to access game\n server processes and other processes on this fleet. Set this parameter for managed EC2 fleets. You can leave this parameter empty when creating the fleet, but you must call \n https://docs.aws.amazon.com/gamelift/latest/apireference/API_UpdateFleetPortSettings to set it before players can connect to game sessions. \n As a best practice, we recommend \n opening ports for remote access only when you need them and closing them when you're finished. \n For Realtime Servers fleets, Amazon GameLift automatically sets TCP and UDP ranges.

" } }, "NewGameSessionProtectionPolicy": { @@ -2346,7 +2346,7 @@ "ComputeType": { "target": "com.amazonaws.gamelift#ComputeType", "traits": { - "smithy.api#documentation": "

The type of compute resource used to host your game servers.

\n
    \n
  • \n

    \n EC2 – The game server build is deployed to Amazon EC2 instances for\n cloud hosting. This is the default setting.

    \n
  • \n
  • \n

    \n ANYWHERE – Your game server\n and supporting software is deployed to compute resources that are provided and\n managed by you. With this compute type, you can also set the\n AnywhereConfiguration parameter.

    \n
  • \n
" + "smithy.api#documentation": "

The type of compute resource used to host your game servers.

\n
    \n
  • \n

    \n EC2 – The game server build is deployed to Amazon EC2 instances for\n cloud hosting. This is the default setting.

    \n
  • \n
  • \n

    \n ANYWHERE – Game servers \n and supporting software are deployed to compute resources that you provide and\n manage. With this compute type, you can also set the\n AnywhereConfiguration parameter.

    \n
  • \n
" } }, "AnywhereConfiguration": { @@ -2404,7 +2404,7 @@ } ], "traits": { - "smithy.api#documentation": "

Adds remote locations to a managed EC2 fleet or managed container fleet and begins populating the new\n locations with instances. The new instances conform to the fleet's instance type,\n auto-scaling, and other configuration settings.

\n \n

You can't add remote locations to a fleet that resides in an Amazon Web Services Region that\n doesn't support multiple locations. Fleets created prior to March 2021 can't support\n multiple locations.

\n
\n

To add fleet locations, specify the fleet to be updated and provide a list of one or\n more locations.

\n

If successful, this operation returns the list of added locations with their status\n set to NEW. Amazon GameLift initiates the process of starting an instance in each\n added location. You can track the status of each new location by monitoring location\n creation events using DescribeFleetEvents.

\n

\n Learn more\n

\n

\n Setting up\n fleets\n

\n

\n Update fleet locations\n

\n

\n \n Amazon GameLift service locations for managed hosting.

" + "smithy.api#documentation": "

Adds remote locations to an EC2 and begins populating the new locations with\n instances. The new instances conform to the fleet's instance type, auto-scaling, and\n other configuration settings.

\n \n

You can't add remote locations to a fleet that resides in an Amazon Web Services Region that\n doesn't support multiple locations. Fleets created prior to March 2021 can't support\n multiple locations.

\n
\n

To add fleet locations, specify the fleet to be updated and provide a list of one or\n more locations.

\n

If successful, this operation returns the list of added locations with their status\n set to NEW. Amazon GameLift initiates the process of starting an instance in each\n added location. You can track the status of each new location by monitoring location\n creation events using DescribeFleetEvents.

\n

\n Learn more\n

\n

\n Setting up\n fleets\n

\n

\n Update fleet locations\n

\n

\n \n Amazon GameLift service locations for managed hosting.

" } }, "com.amazonaws.gamelift#CreateFleetLocationsInput": { @@ -2650,7 +2650,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates a multiplayer game session for players in a specific fleet location. This\n operation prompts an available server process to start a game session and retrieves\n connection information for the new game session. As an alternative, consider using the\n Amazon GameLift game session placement feature with StartGameSessionPlacement , which uses the FleetIQ algorithm and queues to\n optimize the placement process.

\n

When creating a game session, you specify exactly where you want to place it and\n provide a set of game session configuration settings. The target fleet must be in\n ACTIVE status.

\n

You can use this operation in the following ways:

\n
    \n
  • \n

    To create a game session on an instance in a fleet's home Region, provide a\n fleet or alias ID along with your game session configuration.

    \n
  • \n
  • \n

    To create a game session on an instance in a fleet's remote location, provide\n a fleet or alias ID and a location name, along with your game session\n configuration.

    \n
  • \n
  • \n

    To create a game session on an instance in an Anywhere fleet, specify the\n fleet's custom location.

    \n
  • \n
\n

If successful, Amazon GameLift initiates a workflow to start a new game session and returns a\n GameSession object containing the game session configuration and\n status. When the game session status is ACTIVE, it is updated with\n connection information and you can create player sessions for the game session. By\n default, newly created game sessions are open to new players. You can restrict new\n player access by using UpdateGameSession to change the game session's player session creation\n policy.

\n

Amazon GameLift retains logs for active for 14 days. To access the logs, call GetGameSessionLogUrl to download the log files.

\n

\n Available in Amazon GameLift Local.\n

\n

\n Learn more\n

\n

\n Start a game session\n

\n

\n All APIs by task\n

" + "smithy.api#documentation": "

Creates a multiplayer game session for players in a specific fleet location. This\n operation prompts an available server process to start a game session and retrieves\n connection information for the new game session. As an alternative, consider using the\n Amazon GameLift game session placement feature with StartGameSessionPlacement, which uses the FleetIQ algorithm and queues to\n optimize the placement process.

\n

When creating a game session, you specify exactly where you want to place it and\n provide a set of game session configuration settings. The target fleet must be in\n ACTIVE status.

\n

You can use this operation in the following ways:

\n
    \n
  • \n

    To create a game session on an instance in a fleet's home Region, provide a\n fleet or alias ID along with your game session configuration.

    \n
  • \n
  • \n

    To create a game session on an instance in a fleet's remote location, provide\n a fleet or alias ID and a location name, along with your game session\n configuration.

    \n
  • \n
  • \n

    To create a game session on an instance in an Anywhere fleet, specify the\n fleet's custom location.

    \n
  • \n
\n

If successful, Amazon GameLift initiates a workflow to start a new game session and returns a\n GameSession object containing the game session configuration and\n status. When the game session status is ACTIVE, it is updated with\n connection information and you can create player sessions for the game session. By\n default, newly created game sessions are open to new players. You can restrict new\n player access by using UpdateGameSession to change the game session's player session creation\n policy.

\n

Amazon GameLift retains logs for active for 14 days. To access the logs, call GetGameSessionLogUrl to download the log files.

\n

\n Available in Amazon GameLift Local.\n

\n

\n Learn more\n

\n

\n Start a game session\n

\n

\n All APIs by task\n

" } }, "com.amazonaws.gamelift#CreateGameSessionInput": { @@ -3326,7 +3326,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates a script resource for your Realtime Servers script. Realtime scripts are JavaScript files\n that provide configuration settings and optional custom game logic for your game. Script\n logic is executed during an active game session. To deploy Realtime Servers for hosting, create an\n Amazon GameLift managed fleet with the script.

\n

To create a script resource, specify a script name and provide the script file(s). The\n script files and all dependencies must be combined into a single .zip file. You can\n upload the .zip file from either of these locations:

\n
    \n
  • \n

    A locally available directory. Use the ZipFile parameter\n for this option.

    \n
  • \n
  • \n

    An Amazon Simple Storage Service (Amazon S3) bucket under your Amazon Web Services account. Use the\n StorageLocation parameter for this option. You'll need\n to have an Identity Access Management (IAM) role that allows the Amazon GameLift service\n to access your S3 bucket.

    \n
  • \n
\n

If the call is successful, Amazon GameLift creates a new script resource with a unique script\n ID. The script is uploaded to an Amazon S3 bucket that is owned by Amazon GameLift.

\n

\n Learn more\n

\n

\n Amazon GameLift Realtime Servers\n

\n

\n Set Up a Role for Amazon GameLift Access\n

\n

\n Related actions\n

\n

\n All APIs by task\n

" + "smithy.api#documentation": "

Creates a new script record for your Realtime Servers script. Realtime scripts are JavaScript that\n provide configuration settings and optional custom game logic for your game. The script\n is deployed when you create a Realtime Servers fleet to host your game sessions. Script logic is\n executed during an active game session.

\n

To create a new script record, specify a script name and provide the script file(s).\n The script files and all dependencies must be zipped into a single file. You can pull\n the zip file from either of these locations:

\n
    \n
  • \n

    A locally available directory. Use the ZipFile parameter\n for this option.

    \n
  • \n
  • \n

    An Amazon Simple Storage Service (Amazon S3) bucket under your Amazon Web Services account. Use the\n StorageLocation parameter for this option. You'll need\n to have an Identity Access Management (IAM) role that allows the Amazon GameLift service\n to access your S3 bucket.

    \n
  • \n
\n

If the call is successful, a new script record is created with a unique script ID. If\n the script file is provided as a local file, the file is uploaded to an Amazon GameLift-owned S3\n bucket and the script record's storage location reflects this location. If the script\n file is provided as an S3 bucket, Amazon GameLift accesses the file at this storage location as\n needed for deployment.

\n

\n Learn more\n

\n

\n Amazon GameLift Realtime Servers\n

\n

\n Set Up a Role for Amazon GameLift Access\n

\n

\n Related actions\n

\n

\n All APIs by task\n

" } }, "com.amazonaws.gamelift#CreateScriptInput": { @@ -3713,7 +3713,7 @@ } ], "traits": { - "smithy.api#documentation": "

Deletes a container group definition. You can delete a container group definition if there\n are no fleets using the definition.

\n

\n Request options:\n

\n
    \n
  • \n

    Delete an entire container group definition, including all versions. Specify the\n container group definition name, or use an ARN value without the version number.

    \n
  • \n
  • \n

    Delete a particular version. Specify the container group definition name and a version\n number, or use an ARN value that includes the version number.

    \n
  • \n
  • \n

    Keep the newest versions and delete all older versions. Specify the container group\n definition name and the number of versions to retain. For example, set\n VersionCountToRetain to 5 to delete all but the five most recent\n versions.

    \n
  • \n
\n

\n Learn more\n

\n " + "smithy.api#documentation": "

Deletes a container group definition.

\n

\n Request options:\n

\n
    \n
  • \n

    Delete an entire container group definition, including all versions. Specify the\n container group definition name, or use an ARN value without the version number.

    \n
  • \n
  • \n

    Delete a particular version. Specify the container group definition name and a version\n number, or use an ARN value that includes the version number.

    \n
  • \n
  • \n

    Keep the newest versions and delete all older versions. Specify the container group\n definition name and the number of versions to retain. For example, set\n VersionCountToRetain to 5 to delete all but the five most recent\n versions.

    \n
  • \n
\n

\n Result\n

\n

If successful, Amazon GameLift removes the container group definition versions that you request deletion for. \n This request will fail for any requested versions if the following is true:

\n
    \n
  • \n

    If the version is being used in an active fleet

    \n
  • \n
  • \n

    If the version is being deployed to a fleet in a deployment that's currently in progress.

    \n
  • \n
  • \n

    If the version is designated as a rollback definition in a fleet deployment that's currently in progress.

    \n
  • \n
\n

\n Learn more\n

\n " } }, "com.amazonaws.gamelift#DeleteContainerGroupDefinitionInput": { @@ -4733,7 +4733,7 @@ } ], "traits": { - "smithy.api#documentation": "

Retrieves properties for a compute resource in an Amazon GameLift fleet. To get a list of all\n computes in a fleet, call ListCompute.

\n

To request information on a specific compute, provide the fleet ID and compute\n name.

\n

If successful, this operation returns details for the requested compute resource.\n Depending on the fleet's compute type, the result includes the following information:

\n
    \n
  • \n

    For managed EC2 fleets, this operation returns information about the EC2\n instance.

    \n
  • \n
  • \n

    For Anywhere fleets, this operation returns information about the\n registered compute.

    \n
  • \n
" + "smithy.api#documentation": "

Retrieves properties for a compute resource in an Amazon GameLift fleet. To get a list of all\n computes in a fleet, call https://docs.aws.amazon.com/gamelift/latest/apireference/API_ListCompute.html.

\n

To request information on a specific compute, provide the fleet ID and compute\n name.

\n

If successful, this operation returns details for the requested compute resource.\n Depending on the fleet's compute type, the result includes the following information:

\n
    \n
  • \n

    For managed EC2 fleets, this operation returns information about the EC2\n instance.

    \n
  • \n
  • \n

    For Anywhere fleets, this operation returns information about the\n registered compute.

    \n
  • \n
" } }, "com.amazonaws.gamelift#DescribeComputeInput": { @@ -5062,7 +5062,7 @@ } ], "traits": { - "smithy.api#documentation": "

Retrieves the resource capacity settings for one or more fleets. For a container\n fleet, this operation also returns counts for game server container groups.

\n

With multi-location fleets, this operation retrieves data for the fleet's home Region\n only. To retrieve capacity for remote locations, see \n DescribeFleetLocationCapacity.

\n

This operation can be used in the following ways:

\n
    \n
  • \n

    To get capacity data for one or more specific fleets, provide a list of fleet\n IDs or fleet ARNs.

    \n
  • \n
  • \n

    To get capacity data for all fleets, do not provide a fleet identifier.\n

    \n
  • \n
\n

When requesting multiple fleets, use the pagination parameters to retrieve results as\n a set of sequential pages.

\n

If successful, a FleetCapacity object is returned for each requested\n fleet ID. Each FleetCapacity object includes a Location\n property, which is set to the fleet's home Region. Capacity values are returned only for\n fleets that currently exist.

\n \n

Some API operations may limit the number of fleet IDs that are allowed in one\n request. If a request exceeds this limit, the request fails and the error message\n includes the maximum allowed.

\n
\n

\n Learn more\n

\n

\n Setting up Amazon GameLift\n fleets\n

\n

\n GameLift metrics for fleets\n

", + "smithy.api#documentation": "

Retrieves the resource capacity settings for one or more fleets. For a container\n fleet, this operation also returns counts for game server container groups.

\n

With multi-location fleets, this operation retrieves data for the fleet's home Region\n only. To retrieve capacity for remote locations, see \n https://docs.aws.amazon.com/gamelift/latest/apireference/API_DescribeFleetLocationCapacity.html.

\n

This operation can be used in the following ways:

\n
    \n
  • \n

    To get capacity data for one or more specific fleets, provide a list of fleet\n IDs or fleet ARNs.

    \n
  • \n
  • \n

    To get capacity data for all fleets, do not provide a fleet identifier.\n

    \n
  • \n
\n

When requesting multiple fleets, use the pagination parameters to retrieve results as\n a set of sequential pages.

\n

If successful, a FleetCapacity object is returned for each requested\n fleet ID. Each FleetCapacity object includes a Location\n property, which is set to the fleet's home Region. Capacity values are returned only for\n fleets that currently exist.

\n \n

Some API operations may limit the number of fleet IDs that are allowed in one\n request. If a request exceeds this limit, the request fails and the error message\n includes the maximum allowed.

\n
\n

\n Learn more\n

\n

\n Setting up Amazon GameLift\n fleets\n

\n

\n GameLift metrics for fleets\n

", "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", @@ -5543,7 +5543,7 @@ } ], "traits": { - "smithy.api#documentation": "

Retrieves a fleet's inbound connection permissions. Inbound permissions specify IP\n addresses and port settings that incoming traffic can use to access server processes in\n the fleet. Game server processes that are running in the fleet must use a port that\n falls within this range. To connect to game server processes on a managed container fleet, the\n port settings should include one or more of the container fleet's connection ports.

\n

Use this operation in the following ways:

\n
    \n
  • \n

    To retrieve the port settings for a fleet, identify the fleet's unique\n identifier.

    \n
  • \n
  • \n

    To check the status of recent updates to a fleet remote location, specify the\n fleet ID and a location. Port setting updates can take time to propagate across\n all locations.

    \n
  • \n
\n

If successful, a set of IpPermission objects is returned for the\n requested fleet ID. When specifying a location, this operation returns a pending status.\n If the requested fleet has been deleted, the result set is empty.

\n

\n Learn more\n

\n

\n Setting up Amazon GameLift\n fleets\n

" + "smithy.api#documentation": "

Retrieves a fleet's inbound connection permissions. Connection permissions specify IP\n addresses and port settings that incoming traffic can use to access server processes in\n the fleet. Game server processes that are running in the fleet must use a port that\n falls within this range.

\n

Use this operation in the following ways:

\n
    \n
  • \n

    To retrieve the port settings for a fleet, identify the fleet's unique\n identifier.

    \n
  • \n
  • \n

    To check the status of recent updates to a fleet remote location, specify the\n fleet ID and a location. Port setting updates can take time to propagate across\n all locations.

    \n
  • \n
\n

If successful, a set of IpPermission objects is returned for the\n requested fleet ID. When specifying a location, this operation returns a pending status.\n If the requested fleet has been deleted, the result set is empty.

\n

\n Learn more\n

\n

\n Setting up Amazon GameLift\n fleets\n

" } }, "com.amazonaws.gamelift#DescribeFleetPortSettingsInput": { @@ -6021,7 +6021,7 @@ } ], "traits": { - "smithy.api#documentation": "

Retrieves information, including current status, about a game session placement\n request.

\n

To get game session placement details, specify the placement ID.

\n

This operation is not designed to be continually called to track game session status.\n This practice can cause you to exceed your API limit, which results in errors. Instead,\n you must configure configure an Amazon Simple Notification Service (SNS) topic to receive notifications from\n FlexMatch or queues. Continuously polling with DescribeGameSessionPlacement\n should only be used for games in development with low game session usage.

" + "smithy.api#documentation": "

Retrieves information, including current status, about a game session placement\n request.

\n

To get game session placement details, specify the placement ID.

\n

This operation is not designed to be continually called to track game session status.\n This practice can cause you to exceed your API limit, which results in errors. Instead,\n you must configure an Amazon Simple Notification Service (SNS) topic to receive notifications from FlexMatch or\n queues. Continuously polling with DescribeGameSessionPlacement should only\n be used for games in development with low game session usage.

" } }, "com.amazonaws.gamelift#DescribeGameSessionPlacementInput": { @@ -6266,7 +6266,7 @@ } ], "traits": { - "smithy.api#documentation": "

Retrieves information about the EC2 instances in an Amazon GameLift managed fleet, including\n instance ID, connection data, and status. You can use this operation with a\n multi-location fleet to get location-specific instance information. As an alternative,\n use the operations ListCompute and DescribeCompute\n to retrieve information for compute resources, including EC2 and Anywhere fleets.

\n

You can call this operation in the following ways:

\n
    \n
  • \n

    To get information on all instances in a fleet's home Region, specify the\n fleet ID.

    \n
  • \n
  • \n

    To get information on all instances in a fleet's remote location, specify the\n fleet ID and location name.

    \n
  • \n
  • \n

    To get information on a specific instance in a fleet, specify the fleet ID and\n instance ID.

    \n
  • \n
\n

Use the pagination parameters to retrieve results as a set of sequential pages.

\n

If successful, this operation returns Instance objects for each requested\n instance, listed in no particular order. If you call this operation for an Anywhere\n fleet, you receive an InvalidRequestException.

\n

\n Learn more\n

\n

\n Remotely connect to\n fleet instances\n

\n

\n Debug fleet\n issues\n

\n

\n Related actions\n

\n

\n All APIs by task\n

", + "smithy.api#documentation": "

Retrieves information about the EC2 instances in an Amazon GameLift managed fleet, including\n instance ID, connection data, and status. You can use this operation with a\n multi-location fleet to get location-specific instance information. As an alternative,\n use the operations https://docs.aws.amazon.com/gamelift/latest/apireference/API_ListCompute and https://docs.aws.amazon.com/gamelift/latest/apireference/API_DescribeCompute\n to retrieve information for compute resources, including EC2 and Anywhere fleets.

\n

You can call this operation in the following ways:

\n
    \n
  • \n

    To get information on all instances in a fleet's home Region, specify the\n fleet ID.

    \n
  • \n
  • \n

    To get information on all instances in a fleet's remote location, specify the\n fleet ID and location name.

    \n
  • \n
  • \n

    To get information on a specific instance in a fleet, specify the fleet ID and\n instance ID.

    \n
  • \n
\n

Use the pagination parameters to retrieve results as a set of sequential pages.

\n

If successful, this operation returns Instance objects for each requested\n instance, listed in no particular order. If you call this operation for an Anywhere\n fleet, you receive an InvalidRequestException.

\n

\n Learn more\n

\n

\n Remotely connect to\n fleet instances\n

\n

\n Debug fleet\n issues\n

\n

\n Related actions\n

\n

\n All APIs by task\n

", "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", @@ -6668,7 +6668,7 @@ } ], "traits": { - "smithy.api#documentation": "

Retrieves a fleet's runtime configuration settings. The runtime configuration\n determines which server processes run, and how they run, and how many run concurrently \n on computes in managed EC2 and Anywhere fleets. You can update a fleet's runtime configuration\n at any time using UpdateRuntimeConfiguration.

\n

To get the current runtime configuration for a fleet, provide the fleet ID.

\n

If successful, a RuntimeConfiguration object is returned for the\n requested fleet. If the requested fleet has been deleted, the result set is\n empty.

\n

\n Learn more\n

\n

\n Setting up Amazon GameLift\n fleets\n

\n

\n Running multiple\n processes on a fleet\n

" + "smithy.api#documentation": "

Retrieves a fleet's runtime configuration settings. The runtime configuration\n determines which server processes run, and how, on computes in the fleet. For managed\n EC2 fleets, the runtime configuration describes server processes that run on each fleet\n instance.\n can update a fleet's runtime configuration at any time using \n UpdateRuntimeConfiguration.

\n

To get the current runtime configuration for a fleet, provide the fleet ID.

\n

If successful, a RuntimeConfiguration object is returned for the\n requested fleet. If the requested fleet has been deleted, the result set is\n empty.

\n

\n Learn more\n

\n

\n Setting up Amazon GameLift\n fleets\n

\n

\n Running multiple\n processes on a fleet\n

" } }, "com.amazonaws.gamelift#DescribeRuntimeConfigurationInput": { @@ -8156,7 +8156,7 @@ "EventCode": { "target": "com.amazonaws.gamelift#EventCode", "traits": { - "smithy.api#documentation": "

The type of event being logged.

\n

\n Fleet state transition events:\n

\n
    \n
  • \n

    FLEET_CREATED -- A fleet resource was successfully created with a status of\n NEW. Event messaging includes the fleet ID.

    \n
  • \n
  • \n

    FLEET_STATE_DOWNLOADING -- Fleet status changed from NEW to\n DOWNLOADING. Amazon GameLift is downloading the compressed build and\n running install scripts.

    \n
  • \n
  • \n

    FLEET_STATE_VALIDATING -- Fleet status changed from DOWNLOADING\n to VALIDATING. Amazon GameLift has successfully installed build and is now\n validating the build files.

    \n
  • \n
  • \n

    FLEET_STATE_BUILDING -- Fleet status changed from VALIDATING to\n BUILDING. Amazon GameLift has successfully verified the build files and\n is now launching a fleet instance.

    \n
  • \n
  • \n

    FLEET_STATE_ACTIVATING -- Fleet status changed from BUILDING to\n ACTIVATING. Amazon GameLift is launching a game server process on the\n fleet instance and is testing its connectivity with the Amazon GameLift service.

    \n
  • \n
  • \n

    FLEET_STATE_ACTIVE -- The fleet's status changed from ACTIVATING\n to ACTIVE. The fleet is now ready to host game sessions.

    \n
  • \n
  • \n

    FLEET_STATE_ERROR -- The Fleet's status changed to ERROR.\n Describe the fleet event message for more details.

    \n
  • \n
\n

\n Fleet creation events (ordered by fleet creation\n activity):\n

\n
    \n
  • \n

    FLEET_BINARY_DOWNLOAD_FAILED -- The build failed to download to the fleet\n instance.

    \n
  • \n
  • \n

    FLEET_CREATION_EXTRACTING_BUILD -- The game server build was successfully\n downloaded to an instance, and Amazon GameLiftis now extracting the build files from the\n uploaded build. Failure at this stage prevents a fleet from moving to ACTIVE\n status. Logs for this stage display a list of the files that are extracted and\n saved on the instance. Access the logs by using the URL in\n PreSignedLogUrl.

    \n
  • \n
  • \n

    FLEET_CREATION_RUNNING_INSTALLER -- The game server build files were\n successfully extracted, and Amazon GameLift is now running the build's install script\n (if one is included). Failure in this stage prevents a fleet from moving to\n ACTIVE status. Logs for this stage list the installation steps and whether or\n not the install completed successfully. Access the logs by using the URL in\n PreSignedLogUrl.

    \n
  • \n
  • \n

    FLEET_CREATION_COMPLETED_INSTALLER -- The game server build files were\n successfully installed and validation of the installation will begin\n soon.

    \n
  • \n
  • \n

    FLEET_CREATION_FAILED_INSTALLER -- The installed failed while attempting to\n install the build files. This event indicates that the failure occurred before\n Amazon GameLift could start validation.

    \n
  • \n
  • \n

    FLEET_CREATION_VALIDATING_RUNTIME_CONFIG -- The build process was successful,\n and the GameLift is now verifying that the game server launch paths, which are\n specified in the fleet's runtime configuration, exist. If any listed launch path\n exists, Amazon GameLift tries to launch a game server process and waits for the process\n to report ready. Failures in this stage prevent a fleet from moving to\n ACTIVE status. Logs for this stage list the launch paths in the\n runtime configuration and indicate whether each is found. Access the logs by\n using the URL in PreSignedLogUrl.

    \n
  • \n
  • \n

    FLEET_VALIDATION_LAUNCH_PATH_NOT_FOUND -- Validation of the runtime\n configuration failed because the executable specified in a launch path does not\n exist on the instance.

    \n
  • \n
  • \n

    FLEET_VALIDATION_EXECUTABLE_RUNTIME_FAILURE -- Validation of the runtime\n configuration failed because the executable specified in a launch path failed to\n run on the fleet instance.

    \n
  • \n
  • \n

    FLEET_VALIDATION_TIMED_OUT -- Validation of the fleet at the end of creation\n timed out. Try fleet creation again.

    \n
  • \n
  • \n

    FLEET_ACTIVATION_FAILED -- The fleet failed to successfully complete one of\n the steps in the fleet activation process. This event code indicates that the\n game build was successfully downloaded to a fleet instance, built, and\n validated, but was not able to start a server process. For more information, see\n Debug Fleet Creation Issues.

    \n
  • \n
  • \n

    FLEET_ACTIVATION_FAILED_NO_INSTANCES -- Fleet creation was not able to obtain\n any instances based on the input fleet attributes. Try again at a different time\n or choose a different combination of fleet attributes such as fleet type,\n instance type, etc.

    \n
  • \n
  • \n

    FLEET_INITIALIZATION_FAILED -- A generic exception occurred during fleet\n creation. Describe the fleet event message for more details.

    \n
  • \n
\n

\n VPC peering events:\n

\n
    \n
  • \n

    FLEET_VPC_PEERING_SUCCEEDED -- A VPC peering connection has been established\n between the VPC for an Amazon GameLift fleet and a VPC in your Amazon Web Services account.

    \n
  • \n
  • \n

    FLEET_VPC_PEERING_FAILED -- A requested VPC peering connection has failed.\n Event details and status information provide additional detail. A common reason\n for peering failure is that the two VPCs have overlapping CIDR blocks of IPv4\n addresses. To resolve this, change the CIDR block for the VPC in your Amazon Web Services\n account. For more information on VPC peering failures, see https://docs.aws.amazon.com/AmazonVPC/latest/PeeringGuide/invalid-peering-configurations.html\n

    \n
  • \n
  • \n

    FLEET_VPC_PEERING_DELETED -- A VPC peering connection has been successfully\n deleted.

    \n
  • \n
\n

\n Container group events:\n

\n
    \n
  • \n

    CONTAINER_GROUP_REGISTRATION_FAILED – A game server container group started, but \n timed out before calling RegisterCompute.

    \n
  • \n
  • \n

    CONTAINER_GROUP_CRASHED A game server container group started and terminated without \n calling RegisterCompute.

    \n
  • \n
\n

\n Spot instance events:\n

\n
    \n
  • \n

    INSTANCE_INTERRUPTED -- A spot instance was interrupted by EC2 with a\n two-minute notification.

    \n
  • \n
  • \n

    INSTANCE_RECYCLED -- A spot instance was determined to have a high risk \n of interruption and is scheduled to be recycled once it has no active \n game sessions.

    \n
  • \n
\n

\n Server process events:\n

\n
    \n
  • \n

    SERVER_PROCESS_INVALID_PATH -- The game server executable or script could not\n be found based on the Fleet runtime configuration. Check that the launch path is\n correct based on the operating system of the Fleet.

    \n
  • \n
  • \n

    SERVER_PROCESS_SDK_INITIALIZATION_TIMEOUT -- The server process did not call\n InitSDK() within the time expected (5 minutes). Check your game\n session log to see why InitSDK() was not called in time.

    \n
  • \n
  • \n

    SERVER_PROCESS_PROCESS_READY_TIMEOUT -- The server process did not call\n ProcessReady() within the time expected (5 minutes) after\n calling InitSDK(). Check your game session log to see why\n ProcessReady() was not called in time.

    \n
  • \n
  • \n

    SERVER_PROCESS_CRASHED -- The server process exited without calling\n ProcessEnding(). Check your game session log to see why\n ProcessEnding() was not called.

    \n
  • \n
  • \n

    SERVER_PROCESS_TERMINATED_UNHEALTHY -- The server process did not report a\n valid health check for too long and was therefore terminated by GameLift. Check\n your game session log to see if the thread became stuck processing a synchronous\n task for too long.

    \n
  • \n
  • \n

    SERVER_PROCESS_FORCE_TERMINATED -- The server process did not exit cleanly\n within the time expected after OnProcessTerminate() was sent. Check\n your game session log to see why termination took longer than expected.

    \n
  • \n
  • \n

    SERVER_PROCESS_PROCESS_EXIT_TIMEOUT -- The server process did not exit cleanly\n within the time expected (30 seconds) after calling\n ProcessEnding(). Check your game session log to see why termination\n took longer than expected.

    \n
  • \n
\n

\n Game session events:\n

\n
    \n
  • \n

    GAME_SESSION_ACTIVATION_TIMEOUT -- GameSession failed to activate within the\n expected time. Check your game session log to see why\n ActivateGameSession() took longer to complete than\n expected.

    \n
  • \n
\n

\n Other fleet events:\n

\n
    \n
  • \n

    FLEET_SCALING_EVENT -- A change was made to the fleet's capacity settings\n (desired instances, minimum/maximum scaling limits). Event messaging includes\n the new capacity settings.

    \n
  • \n
  • \n

    FLEET_NEW_GAME_SESSION_PROTECTION_POLICY_UPDATED -- A change was made to the\n fleet's game session protection policy setting. Event messaging includes both\n the old and new policy setting.

    \n
  • \n
  • \n

    FLEET_DELETED -- A request to delete a fleet was initiated.

    \n
  • \n
  • \n

    GENERIC_EVENT -- An unspecified event has occurred.

    \n
  • \n
" + "smithy.api#documentation": "

The type of event being logged.

\n

\n Fleet state transition events:\n

\n
    \n
  • \n

    FLEET_CREATED -- A fleet resource was successfully created with a status of\n NEW. Event messaging includes the fleet ID.

    \n
  • \n
  • \n

    FLEET_STATE_DOWNLOADING -- Fleet status changed from NEW to\n DOWNLOADING. Amazon GameLift is downloading the compressed build and\n running install scripts.

    \n
  • \n
  • \n

    FLEET_STATE_VALIDATING -- Fleet status changed from DOWNLOADING\n to VALIDATING. Amazon GameLift has successfully installed build and is now\n validating the build files.

    \n
  • \n
  • \n

    FLEET_STATE_BUILDING -- Fleet status changed from VALIDATING to\n BUILDING. Amazon GameLift has successfully verified the build files and\n is now launching a fleet instance.

    \n
  • \n
  • \n

    FLEET_STATE_ACTIVATING -- Fleet status changed from BUILDING to\n ACTIVATING. Amazon GameLift is launching a game server process on the\n fleet instance and is testing its connectivity with the Amazon GameLift service.

    \n
  • \n
  • \n

    FLEET_STATE_ACTIVE -- The fleet's status changed from ACTIVATING\n to ACTIVE. The fleet is now ready to host game sessions.

    \n
  • \n
  • \n

    FLEET_STATE_ERROR -- The Fleet's status changed to ERROR.\n Describe the fleet event message for more details.

    \n
  • \n
\n

\n Fleet creation events (ordered by fleet creation\n activity):\n

\n
    \n
  • \n

    FLEET_BINARY_DOWNLOAD_FAILED -- The build failed to download to the fleet\n instance.

    \n
  • \n
  • \n

    FLEET_CREATION_EXTRACTING_BUILD -- The game server build was successfully\n downloaded to an instance, and Amazon GameLiftis now extracting the build files from the\n uploaded build. Failure at this stage prevents a fleet from moving to ACTIVE\n status. Logs for this stage display a list of the files that are extracted and\n saved on the instance. Access the logs by using the URL in\n PreSignedLogUrl.

    \n
  • \n
  • \n

    FLEET_CREATION_RUNNING_INSTALLER -- The game server build files were\n successfully extracted, and Amazon GameLift is now running the build's install script\n (if one is included). Failure in this stage prevents a fleet from moving to\n ACTIVE status. Logs for this stage list the installation steps and whether or\n not the install completed successfully. Access the logs by using the URL in\n PreSignedLogUrl.

    \n
  • \n
  • \n

    FLEET_CREATION_COMPLETED_INSTALLER -- The game server build files were\n successfully installed and validation of the installation will begin\n soon.

    \n
  • \n
  • \n

    FLEET_CREATION_FAILED_INSTALLER -- The installed failed while attempting to\n install the build files. This event indicates that the failure occurred before\n Amazon GameLift could start validation.

    \n
  • \n
  • \n

    FLEET_CREATION_VALIDATING_RUNTIME_CONFIG -- The build process was successful,\n and the GameLift is now verifying that the game server launch paths, which are\n specified in the fleet's runtime configuration, exist. If any listed launch path\n exists, Amazon GameLift tries to launch a game server process and waits for the process\n to report ready. Failures in this stage prevent a fleet from moving to\n ACTIVE status. Logs for this stage list the launch paths in the\n runtime configuration and indicate whether each is found. Access the logs by\n using the URL in PreSignedLogUrl.

    \n
  • \n
  • \n

    FLEET_VALIDATION_LAUNCH_PATH_NOT_FOUND -- Validation of the runtime\n configuration failed because the executable specified in a launch path does not\n exist on the instance.

    \n
  • \n
  • \n

    FLEET_VALIDATION_EXECUTABLE_RUNTIME_FAILURE -- Validation of the runtime\n configuration failed because the executable specified in a launch path failed to\n run on the fleet instance.

    \n
  • \n
  • \n

    FLEET_VALIDATION_TIMED_OUT -- Validation of the fleet at the end of creation\n timed out. Try fleet creation again.

    \n
  • \n
  • \n

    FLEET_ACTIVATION_FAILED -- The fleet failed to successfully complete one of\n the steps in the fleet activation process. This event code indicates that the\n game build was successfully downloaded to a fleet instance, built, and\n validated, but was not able to start a server process. For more information, see\n Debug Fleet Creation Issues.

    \n
  • \n
  • \n

    FLEET_ACTIVATION_FAILED_NO_INSTANCES -- Fleet creation was not able to obtain\n any instances based on the input fleet attributes. Try again at a different time\n or choose a different combination of fleet attributes such as fleet type,\n instance type, etc.

    \n
  • \n
  • \n

    FLEET_INITIALIZATION_FAILED -- A generic exception occurred during fleet\n creation. Describe the fleet event message for more details.

    \n
  • \n
\n

\n VPC peering events:\n

\n
    \n
  • \n

    FLEET_VPC_PEERING_SUCCEEDED -- A VPC peering connection has been established\n between the VPC for an Amazon GameLift fleet and a VPC in your Amazon Web Services account.

    \n
  • \n
  • \n

    FLEET_VPC_PEERING_FAILED -- A requested VPC peering connection has failed.\n Event details and status information provide additional detail. A common reason\n for peering failure is that the two VPCs have overlapping CIDR blocks of IPv4\n addresses. To resolve this, change the CIDR block for the VPC in your Amazon Web Services\n account. For more information on VPC peering failures, see https://docs.aws.amazon.com/AmazonVPC/latest/PeeringGuide/invalid-peering-configurations.html\n

    \n
  • \n
  • \n

    FLEET_VPC_PEERING_DELETED -- A VPC peering connection has been successfully\n deleted.

    \n
  • \n
\n

\n Spot instance events:\n

\n
    \n
  • \n

    INSTANCE_INTERRUPTED -- A spot instance was interrupted by EC2 with a\n two-minute notification.

    \n
  • \n
  • \n

    INSTANCE_RECYCLED -- A spot instance was determined to have a high risk \n of interruption and is scheduled to be recycled once it has no active \n game sessions.

    \n
  • \n
\n

\n Server process events:\n

\n
    \n
  • \n

    SERVER_PROCESS_INVALID_PATH -- The game server executable or script could not\n be found based on the Fleet runtime configuration. Check that the launch path is\n correct based on the operating system of the Fleet.

    \n
  • \n
  • \n

    SERVER_PROCESS_SDK_INITIALIZATION_TIMEOUT -- The server process did not call\n InitSDK() within the time expected (5 minutes). Check your game\n session log to see why InitSDK() was not called in time.

    \n
  • \n
  • \n

    SERVER_PROCESS_PROCESS_READY_TIMEOUT -- The server process did not call\n ProcessReady() within the time expected (5 minutes) after\n calling InitSDK(). Check your game session log to see why\n ProcessReady() was not called in time.

    \n
  • \n
  • \n

    SERVER_PROCESS_CRASHED -- The server process exited without calling\n ProcessEnding(). Check your game session log to see why\n ProcessEnding() was not called.

    \n
  • \n
  • \n

    SERVER_PROCESS_TERMINATED_UNHEALTHY -- The server process did not report a\n valid health check for too long and was therefore terminated by GameLift. Check\n your game session log to see if the thread became stuck processing a synchronous\n task for too long.

    \n
  • \n
  • \n

    SERVER_PROCESS_FORCE_TERMINATED -- The server process did not exit cleanly\n within the time expected after OnProcessTerminate() was sent. Check\n your game session log to see why termination took longer than expected.

    \n
  • \n
  • \n

    SERVER_PROCESS_PROCESS_EXIT_TIMEOUT -- The server process did not exit cleanly\n within the time expected (30 seconds) after calling\n ProcessEnding(). Check your game session log to see why termination\n took longer than expected.

    \n
  • \n
\n

\n Game session events:\n

\n
    \n
  • \n

    GAME_SESSION_ACTIVATION_TIMEOUT -- GameSession failed to activate within the\n expected time. Check your game session log to see why\n ActivateGameSession() took longer to complete than\n expected.

    \n
  • \n
\n

\n Other fleet events:\n

\n
    \n
  • \n

    FLEET_SCALING_EVENT -- A change was made to the fleet's capacity settings\n (desired instances, minimum/maximum scaling limits). Event messaging includes\n the new capacity settings.

    \n
  • \n
  • \n

    FLEET_NEW_GAME_SESSION_PROTECTION_POLICY_UPDATED -- A change was made to the\n fleet's game session protection policy setting. Event messaging includes both\n the old and new policy setting.

    \n
  • \n
  • \n

    FLEET_DELETED -- A request to delete a fleet was initiated.

    \n
  • \n
  • \n

    GENERIC_EVENT -- An unspecified event has occurred.

    \n
  • \n
" } }, "Message": { @@ -8555,7 +8555,7 @@ } }, "traits": { - "smithy.api#documentation": "

A list of fleet locations where a game session queue can place new game sessions. You\n can use a filter to temporarily turn off placements for specific locations. For queues\n that have multi-location fleets, you can use a filter configuration allow placement with\n some, but not all of these locations.

" + "smithy.api#documentation": "

A list of fleet locations where a game session queue can place new game sessions. You\n can use a filter to temporarily exclude specific locations from receiving placements.\n For queues that have multi-location fleets, you can use a filter configuration allow\n placement with some, but not all, of a fleet's locations.

" } }, "com.amazonaws.gamelift#FilterInstanceStatus": { @@ -8638,7 +8638,7 @@ "InstanceType": { "target": "com.amazonaws.gamelift#EC2InstanceType", "traits": { - "smithy.api#documentation": "

The Amazon EC2 instance type that the fleet uses. Instance type determines the computing\n resources of each instance in the fleet, including CPU, memory, storage, and networking\n capacity. See Amazon Elastic Compute Cloud Instance\n Types for detailed descriptions. This attribute is used with\n fleets where ComputeType is \"EC2\".

" + "smithy.api#documentation": "

The Amazon EC2 instance type that the fleet uses. Instance type determines the computing\n resources of each instance in the fleet, including CPU, memory, storage, and networking\n capacity. See Amazon Elastic Compute Cloud Instance\n Types for detailed descriptions. This attribute is used with fleets where\n ComputeType is EC2.

" } }, "Description": { @@ -8716,13 +8716,13 @@ "NewGameSessionProtectionPolicy": { "target": "com.amazonaws.gamelift#ProtectionPolicy", "traits": { - "smithy.api#documentation": "

The type of game session protection to set on all new instances that are started in\n the fleet. This attribute is used with\n fleets where ComputeType is \"EC2\".

\n
    \n
  • \n

    \n NoProtection -- The game session can be\n terminated during a scale-down event.

    \n
  • \n
  • \n

    \n FullProtection -- If the game session is in an\n ACTIVE status, it cannot be terminated during a scale-down\n event.

    \n
  • \n
" + "smithy.api#documentation": "

The type of game session protection to set on all new instances that are started in\n the fleet. This attribute is used with fleets where ComputeType is\n EC2.

\n
    \n
  • \n

    \n NoProtection -- The game session can be\n terminated during a scale-down event.

    \n
  • \n
  • \n

    \n FullProtection -- If the game session is in an\n ACTIVE status, it cannot be terminated during a scale-down\n event.

    \n
  • \n
" } }, "OperatingSystem": { "target": "com.amazonaws.gamelift#OperatingSystem", "traits": { - "smithy.api#documentation": "

The operating system of the fleet's computing resources. A fleet's operating system is\n determined by the OS of the build or script that is deployed on this fleet.\n This attribute is used with\n fleets where ComputeType is \"EC2\".

\n \n

Amazon Linux 2 (AL2) will reach end of support on 6/30/2025. See more details in the Amazon Linux 2 FAQs.\n For game servers that are hosted on AL2 and use Amazon GameLift server SDK 4.x,\n first update the game server build to server SDK 5.x, and then deploy to AL2023\n instances. See \n Migrate to Amazon GameLift server SDK version 5.\n

\n
" + "smithy.api#documentation": "

The operating system of the fleet's computing resources. A fleet's operating system is\n determined by the OS of the build or script that is deployed on this fleet. This\n attribute is used with fleets where ComputeType is\n EC2.

\n \n

Amazon Linux 2 (AL2) will reach end of support on 6/30/2025. See more details in \n the Amazon Linux 2 FAQs. \n For game servers\n that are hosted on AL2 and use Amazon GameLift server SDK 4.x., first update the\n game server build to server SDK 5.x, and then deploy to AL2023 instances. See\n \n Migrate to Amazon GameLift server SDK version 5.\n

\n
" } }, "ResourceCreationLimitPolicy": { @@ -8731,19 +8731,19 @@ "MetricGroups": { "target": "com.amazonaws.gamelift#MetricGroupList", "traits": { - "smithy.api#documentation": "

Name of a metric group that metrics for this fleet are added to. In Amazon CloudWatch,\n you can view aggregated metrics for fleets that are in a metric group. A fleet can be\n included in only one metric group at a time. This attribute is used with\n fleets where ComputeType is \"EC2\".

" + "smithy.api#documentation": "

Name of a metric group that metrics for this fleet are added to. In Amazon CloudWatch,\n you can view aggregated metrics for fleets that are in a metric group. A fleet can be\n included in only one metric group at a time. This attribute is used with fleets where\n ComputeType is EC2.

" } }, "StoppedActions": { "target": "com.amazonaws.gamelift#FleetActionList", "traits": { - "smithy.api#documentation": "

A list of fleet activity that has been suspended using StopFleetActions. \n This includes fleet auto-scaling. This attribute is used with\n fleets where ComputeType is \"EC2\".

" + "smithy.api#documentation": "

A list of fleet activity that has been suspended using StopFleetActions. This includes fleet auto-scaling. This attribute is used\n with fleets where ComputeType is EC2.

" } }, "InstanceRoleArn": { "target": "com.amazonaws.gamelift#NonEmptyString", "traits": { - "smithy.api#documentation": "

A unique identifier for an IAM role that manages access to your Amazon Web Services services. \n With an instance role ARN set, any application that runs on an instance in this fleet can assume the role, \n including install scripts, server processes, and daemons (background processes). Create a role or look up a role's \n ARN by using the IAM dashboard in the Amazon Web Services Management Console.\n Learn more about using on-box credentials for your game servers at \n \n Access external resources from a game server. This attribute is used with\n fleets where ComputeType is \"EC2\".

" + "smithy.api#documentation": "

A unique identifier for an IAM role that manages access to your Amazon Web Services services. \n With an instance role ARN set, any application that runs on an instance in this fleet can assume the role, \n including install scripts, server processes, and daemons (background processes). Create a role or look up a role's \n ARN by using the IAM dashboard in the Amazon Web Services Management Console.\n Learn more about using on-box credentials for your game servers at \n \n Access external resources from a game server. This attribute is used with fleets where ComputeType is\n EC2.

" } }, "CertificateConfiguration": { @@ -8761,18 +8761,18 @@ "AnywhereConfiguration": { "target": "com.amazonaws.gamelift#AnywhereConfiguration", "traits": { - "smithy.api#documentation": "

Amazon GameLift Anywhere configuration options.

" + "smithy.api#documentation": "

A set of attributes that are specific to an Anywhere fleet.

" } }, "InstanceRoleCredentialsProvider": { "target": "com.amazonaws.gamelift#InstanceRoleCredentialsProvider", "traits": { - "smithy.api#documentation": "

Indicates that fleet instances maintain a shared credentials file for the IAM role\n defined in InstanceRoleArn. Shared credentials allow applications that are\n deployed with the game server executable to communicate with other Amazon Web Services resources. This property is used \n only when the game server is integrated with the\n server SDK version 5.x. For more information about using shared credentials, see Communicate\n with other Amazon Web Services resources from your fleets.\n This attribute is used with\n fleets where ComputeType is \"EC2\".

" + "smithy.api#documentation": "

Indicates that fleet instances maintain a shared credentials file for the IAM role defined in InstanceRoleArn. Shared credentials allow\n applications that are deployed with the game server executable to communicate with other\n Amazon Web Services resources. This property is used only when the game server is integrated with the\n server SDK version 5.x. For more information about using shared credentials, see Communicate\n with other Amazon Web Services resources from your fleets. This attribute is used with\n fleets where ComputeType is EC2.

" } } }, "traits": { - "smithy.api#documentation": "

Describes an Amazon GameLift fleet of game hosting resources. Attributes differ based on\n the fleet's compute type, as follows:

\n
    \n
  • \n

    EC2 fleet attributes identify a Build resource (for fleets with \n customer game server builds) or a Script resource (for Realtime Servers fleets).

    \n
  • \n
  • \n

    Amazon GameLift Anywhere fleets have an abbreviated set of attributes, because most fleet configurations\n are set directly on the fleet's computes. Attributes include fleet identifiers and descriptive\n properties, creation/termination time, and fleet status.

    \n
  • \n
\n

\n Returned by:\n DescribeFleetAttributes\n

" + "smithy.api#documentation": "

Describes an Amazon GameLift fleet of game hosting resources. Attributes differ based on\n the fleet's compute type, as follows:

\n
    \n
  • \n

    EC2 fleet attributes identify a Build resource (for fleets with \n customer game server builds) or a Script resource (for Realtime Servers fleets).

    \n
  • \n
  • \n

    Amazon GameLift Anywhere fleets have an abbreviated set of attributes, because most fleet configurations\n are set directly on the fleet's computes. Attributes include fleet identifiers and descriptive\n properties, creation/termination time, and fleet status.

    \n
  • \n
\n

\n Returned by:\n https://docs.aws.amazon.com/gamelift/latest/apireference/API_DescribeFleetAttributes\n

" } }, "com.amazonaws.gamelift#FleetAttributesList": { @@ -8877,19 +8877,19 @@ "RollbackGameServerBinaryArn": { "target": "com.amazonaws.gamelift#FleetBinaryArn", "traits": { - "smithy.api#documentation": "

The unique identifier for the version of the game server container group definition to\n roll back to if deployment fails.

" + "smithy.api#documentation": "

The unique identifier for the version of the game server container group definition to\n roll back to if deployment fails. Amazon GameLift sets this property to the container group definition\n version that the fleet used when it was last active.

" } }, "PerInstanceBinaryArn": { "target": "com.amazonaws.gamelift#FleetBinaryArn", "traits": { - "smithy.api#documentation": "

The unique identifier for the version of the per-instance container group definition\n that is being deployed.

" + "smithy.api#documentation": "

The unique identifier for the version of the per-instance container group definition\n that is being deployed.

" } }, "RollbackPerInstanceBinaryArn": { "target": "com.amazonaws.gamelift#FleetBinaryArn", "traits": { - "smithy.api#documentation": "

The unique identifier for the version of the per-instance container group definition\n to roll back to if deployment fails.

" + "smithy.api#documentation": "

The unique identifier for the version of the per-instance container group definition\n to roll back to if deployment fails. Amazon GameLift sets this property to the container group definition\n version that the fleet used when it was last active.

" } }, "DeploymentStatus": { @@ -9431,6 +9431,9 @@ { "target": "com.amazonaws.gamelift#TagResource" }, + { + "target": "com.amazonaws.gamelift#TerminateGameSession" + }, { "target": "com.amazonaws.gamelift#UntagResource" }, @@ -10439,7 +10442,7 @@ } }, "traits": { - "smithy.api#documentation": "

This key-value pair can store custom data about a game session.\n For example, you might use a GameProperty to track a game session's map, level of difficulty, or remaining time.\n The difficulty level could be specified like this: {\"Key\": \"difficulty\", \"Value\":\"Novice\"}.\n

\n

\n You can set game properties when creating a game session. You can also modify game properties of an active game session. When searching for game sessions, you can filter on game property keys and values. You can't delete game properties from a game session.\n

\n

For examples of working with game properties, see Create a game session with properties. \n

" + "smithy.api#documentation": "

This key-value pair can store custom data about a game session. For example, you might\n use a GameProperty to track a game session's map, level of difficulty, or\n remaining time. The difficulty level could be specified like this: {\"Key\":\n \"difficulty\", \"Value\":\"Novice\"}.

\n

You can set game properties when creating a game session. You can also modify game\n properties of an active game session. When searching for game sessions, you can filter\n on game property keys and values. You can't delete game properties from a game session.

\n

For examples of working with game properties, see Create a game session with properties.

" } }, "com.amazonaws.gamelift#GamePropertyKey": { @@ -10620,7 +10623,7 @@ } }, "traits": { - "smithy.api#documentation": "

Describes the game server container in an existing game server container group. A game\n server container identifies a container image with your game server build. A game server\n container is automatically considered essential; if an essential container fails, the entire\n container group restarts.

\n

You can update a container definition and deploy the updates to an existing fleet. When\n creating or updating a game server container group definition, use the property GameServerContainerDefinitionInput.

\n

\n Part of:\n ContainerGroupDefinition\n

\n

\n Returned by:\n DescribeContainerGroupDefinition, ListContainerGroupDefinitions, UpdateContainerGroupDefinition\n

" + "smithy.api#documentation": "

Describes the game server container in an existing game server container group. A game\n server container identifies a container image with your game server build. A game server\n container is automatically considered essential; if an essential container fails, the entire\n container group restarts.

\n

You can update a container definition and deploy the updates to an existing fleet. When\n creating or updating a game server container group definition, use the property \n https://docs.aws.amazon.com/gamelift/latest/apireference/API_GameServerContainerDefinitionInput.

\n

\n Part of:\n ContainerGroupDefinition\n

\n

\n Returned by:\n DescribeContainerGroupDefinition, \n ListContainerGroupDefinitions, \n UpdateContainerGroupDefinition\n

" } }, "com.amazonaws.gamelift#GameServerContainerDefinitionInput": { @@ -10678,7 +10681,7 @@ } }, "traits": { - "smithy.api#documentation": "

Describes the configuration for a container that runs your game server executable. This\n definition includes container configuration, resources, and start instructions. Use this data\n type when creating or updating a game server container group definition. For properties of a\n deployed container, see GameServerContainerDefinition. A game server\n container is automatically considered essential; if an essential container fails, the entire\n container group restarts.

\n

\n Use with: \n CreateContainerGroupDefinition, UpdateContainerGroupDefinition\n

" + "smithy.api#documentation": "

Describes the configuration for a container that runs your game server executable. This\n definition includes container configuration, resources, and start instructions. Use this data\n type when creating or updating a game server container group definition. For properties of a\n deployed container, see GameServerContainerDefinition. A game server\n container is automatically considered essential; if an essential container fails, the entire\n container group restarts.

\n

\n Use with: \n CreateContainerGroupDefinition, \n UpdateContainerGroupDefinition\n

" } }, "com.amazonaws.gamelift#GameServerContainerGroupCounts": { @@ -10710,7 +10713,7 @@ } }, "traits": { - "smithy.api#documentation": "

The number and status of game server container groups that are deployed across a container fleet. \n Combine this count with the number of server processes that each game server container group runs \n to learn how many game sessions the fleet is capable of hosting concurrently. For example, if a \n fleet has 50 game server container groups, and the game server container in each group runs 1 game server \n process, then the fleet has the capacity to run host 50 game sessions at a time.

\n

\n Returned by:\n DescribeFleetCapacity, DescribeFleetLocationCapacity\n

" + "smithy.api#documentation": "

The number and status of game server container groups that are deployed across a container fleet. \n Combine this count with the number of server processes that each game server container group runs \n to learn how many game sessions the fleet is capable of hosting concurrently. For example, if a \n fleet has 50 game server container groups, and the game server container in each group runs 1 game server \n process, then the fleet has the capacity to run host 50 game sessions at a time.

\n

\n Returned by:\n https://docs.aws.amazon.com/gamelift/latest/apireference/API_DescribeFleetCapacity.html, https://docs.aws.amazon.com/gamelift/latest/apireference/API_DescribeFleetLocationCapacity.html\n

" } }, "com.amazonaws.gamelift#GameServerContainerGroupsPerInstance": { @@ -11700,7 +11703,7 @@ "StatusReason": { "target": "com.amazonaws.gamelift#GameSessionStatusReason", "traits": { - "smithy.api#documentation": "

Provides additional information about game session status. INTERRUPTED\n indicates that the game session was hosted on a spot instance that was reclaimed,\n causing the active game session to be terminated.

" + "smithy.api#documentation": "

Provides additional information about game session status.

\n
    \n
  • \n

    \n INTERRUPTED -- The game session was hosted on an EC2 Spot instance that was\n reclaimed, causing the active game session to be stopped.

    \n
  • \n
  • \n

    \n TRIGGERED_ON_PROCESS_TERMINATE – The game session was stopped by calling\n TerminateGameSession with the termination mode\n TRIGGER_ON_PROCESS_TERMINATE.

    \n
  • \n
  • \n

    \n FORCE_TERMINATED – The game session was stopped by calling\n TerminateGameSession with the termination mode\n FORCE_TERMINATE.

    \n
  • \n
\n

" } }, "GameProperties": { @@ -11730,7 +11733,7 @@ "PlayerSessionCreationPolicy": { "target": "com.amazonaws.gamelift#PlayerSessionCreationPolicy", "traits": { - "smithy.api#documentation": "

Indicates whether or not the game session is accepting new players.

" + "smithy.api#documentation": "

Indicates whether the game session is accepting new players.

" } }, "CreatorId": { @@ -11900,7 +11903,7 @@ "Status": { "target": "com.amazonaws.gamelift#GameSessionPlacementState", "traits": { - "smithy.api#documentation": "

Current status of the game session placement request.

\n
    \n
  • \n

    \n PENDING -- The placement request is \n in the queue waiting to be processed. Game session properties are not\n yet final.

    \n
  • \n
  • \n

    \n FULFILLED -- A new game session has been \n successfully placed. Game session properties are now final.

    \n
  • \n
  • \n

    \n CANCELLED -- The placement request was\n canceled.

    \n
  • \n
  • \n

    \n TIMED_OUT -- A new game session was not\n successfully created before the time limit expired. You can resubmit as a new\n placement request as needed.

    \n
  • \n
  • \n

    \n FAILED -- Amazon GameLift is not able to complete the\n process of placing the game session. Common reasons are the game session\n terminated before the placement process was completed, or an unexpected internal\n error.

    \n
  • \n
" + "smithy.api#documentation": "

Current status of the game session placement request.

\n
    \n
  • \n

    \n PENDING -- The placement request is \n in the queue waiting to be processed. Game session properties are not\n yet final.

    \n
  • \n
  • \n

    \n FULFILLED -- A new game session has been \n successfully placed. Game session properties are now final.

    \n
  • \n
  • \n

    \n CANCELLED -- The placement request was\n canceled.

    \n
  • \n
  • \n

    \n TIMED_OUT -- A new game session was not\n successfully created before the time limit expired. You can resubmit the\n placement request as needed.

    \n
  • \n
  • \n

    \n FAILED -- Amazon GameLift is not able to complete the\n process of placing the game session. Common reasons are the game session\n terminated before the placement process was completed, or an unexpected internal\n error.

    \n
  • \n
" } }, "GameProperties": { @@ -11942,7 +11945,7 @@ "PlayerLatencies": { "target": "com.amazonaws.gamelift#PlayerLatencyList", "traits": { - "smithy.api#documentation": "

A set of values, expressed in milliseconds, that indicates the amount of latency that a player experiences when connected to @aws; Regions.

" + "smithy.api#documentation": "

A set of values, expressed in milliseconds, that indicates the amount of latency that a player experiences when connected to Amazon Web Services Regions.

" } }, "StartTime": { @@ -11992,10 +11995,16 @@ "traits": { "smithy.api#documentation": "

Information on the matchmaking process for this game. Data is in JSON syntax,\n formatted as a string. It identifies the matchmaking configuration used to create the\n match, and contains data on all players assigned to the match, including player\n attributes and team assignments. For more details on matchmaker data, see Match\n Data.

" } + }, + "PriorityConfigurationOverride": { + "target": "com.amazonaws.gamelift#PriorityConfigurationOverride", + "traits": { + "smithy.api#documentation": "

A prioritized list of locations to use with a game session placement request and\n instructions on how to use it. This list overrides a queue's prioritized location list\n for a single game session placement request only. The list can include Amazon Web Services Regions,\n local zones, and custom locations (for Anywhere fleets). The fallback strategy instructs\n Amazon GameLift to use the override list for the first placement attempt only or for all\n placement attempts.

" + } } }, "traits": { - "smithy.api#documentation": "

Represents a potential game session placement, including the full details of the\n original placement request and the current status.

\n \n

If the game session placement status is PENDING, the properties for game\n session ID/ARN, region, IP address/DNS, and port aren't final. A game session is not\n active and ready to accept players until placement status reaches\n FULFILLED. When the placement is in PENDING status,\n Amazon GameLift may attempt to place a game session multiple times before succeeding. With\n each attempt it creates a GameSession object and updates this\n placement object with the new game session properties..

\n
" + "smithy.api#documentation": "

Represents a potential game session placement, including the full details of the\n original placement request and the current status.

\n \n

If the game session placement status is PENDING, the properties for game\n session ID/ARN, region, IP address/DNS, and port aren't final. A game session is not\n active and ready to accept players until placement status reaches\n FULFILLED. When the placement is in PENDING status,\n Amazon GameLift may attempt to place a game session multiple times before succeeding. With\n each attempt it creates a https://docs.aws.amazon.com/gamelift/latest/apireference/API_GameSession object and updates this\n placement object with the new game session properties.

\n
" } }, "com.amazonaws.gamelift#GameSessionPlacementState": { @@ -12200,6 +12209,18 @@ "traits": { "smithy.api#enumValue": "INTERRUPTED" } + }, + "TRIGGERED_ON_PROCESS_TERMINATE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "TRIGGERED_ON_PROCESS_TERMINATE" + } + }, + "FORCE_TERMINATED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "FORCE_TERMINATED" + } } } }, @@ -12247,7 +12268,7 @@ "target": "com.amazonaws.gamelift#ComputeNameOrArn", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

A unique identifier for the compute resource that you want to connect to. For an EC2\n fleet compute, use the instance ID. Use\n ListCompute to retrieve compute identifiers.

", + "smithy.api#documentation": "

A unique identifier for the compute resource that you want to connect to. For an EC2\n fleet compute, use the instance ID. Use\n https://docs.aws.amazon.com/gamelift/latest/apireference/API_ListCompute.html to retrieve compute identifiers.

", "smithy.api#required": {} } } @@ -12482,7 +12503,7 @@ } ], "traits": { - "smithy.api#documentation": "

Requests authorization to remotely connect to an instance in an Amazon GameLift managed fleet.\n Use this operation to connect to instances with game servers that use Amazon GameLift server SDK\n 4.x or earlier. To connect to instances with game servers that use server SDK 5.x or\n later, call GetComputeAccess.

\n

To request access to an instance, specify IDs for the instance and the fleet it\n belongs to. You can retrieve instance IDs for a fleet by calling DescribeInstances with the fleet ID.

\n

If successful, this operation returns an IP address and credentials. The returned\n credentials match the operating system of the instance, as follows:

\n
    \n
  • \n

    For a Windows instance: returns a user name and secret (password) for use with\n a Windows Remote Desktop client.

    \n
  • \n
  • \n

    For a Linux instance: returns a user name and secret (RSA private key) for use\n with an SSH client. You must save the secret to a .pem file. If\n you're using the CLI, see the example Get credentials for a Linux instance for tips on automatically\n saving the secret to a .pem file.

    \n
  • \n
\n

\n Learn more\n

\n

\n Remotely connect to\n fleet instances\n

\n

\n Debug fleet\n issues\n

\n

\n Related actions\n

\n

\n All APIs by task\n

" + "smithy.api#documentation": "

Requests authorization to remotely connect to an instance in an Amazon GameLift managed fleet.\n Use this operation to connect to instances with game servers that use Amazon GameLift server SDK\n 4.x or earlier. To connect to instances with game servers that use server SDK 5.x or\n later, call https://docs.aws.amazon.com/gamelift/latest/apireference/API_GetComputeAccess.

\n

To request access to an instance, specify IDs for the instance and the fleet it\n belongs to. You can retrieve instance IDs for a fleet by calling DescribeInstances with the fleet ID.

\n

If successful, this operation returns an IP address and credentials. The returned\n credentials match the operating system of the instance, as follows:

\n
    \n
  • \n

    For a Windows instance: returns a user name and secret (password) for use with\n a Windows Remote Desktop client.

    \n
  • \n
  • \n

    For a Linux instance: returns a user name and secret (RSA private key) for use\n with an SSH client. You must save the secret to a .pem file. If\n you're using the CLI, see the example Get credentials for a Linux instance for tips on automatically\n saving the secret to a .pem file.

    \n
  • \n
\n

\n Learn more\n

\n

\n Remotely connect to\n fleet instances\n

\n

\n Debug fleet\n issues\n

\n

\n Related actions\n

\n

\n All APIs by task\n

" } }, "com.amazonaws.gamelift#GetInstanceAccessInput": { @@ -12668,7 +12689,7 @@ } }, "traits": { - "smithy.api#documentation": "

Information and credentials that you can use to remotely connect to an instance in an\n EC2 managed fleet. This data type is returned in response to a call to \n GetInstanceAccess.

" + "smithy.api#documentation": "

Information and credentials that you can use to remotely connect to an instance in an\n EC2 managed fleet. This data type is returned in response to a call to \n https://docs.aws.amazon.com/gamelift/latest/apireference/API_GetInstanceAccess.

" } }, "com.amazonaws.gamelift#InstanceCredentials": { @@ -12688,7 +12709,7 @@ } }, "traits": { - "smithy.api#documentation": "

A set of credentials that allow remote access to an instance in an EC2 managed fleet.\n These credentials are returned in response to a call to \n GetInstanceAccess, which requests access for instances that are running\n game servers with the Amazon GameLift server SDK version 4.x or earlier.

", + "smithy.api#documentation": "

A set of credentials that allow remote access to an instance in an EC2 managed fleet.\n These credentials are returned in response to a call to \n https://docs.aws.amazon.com/gamelift/latest/apireference/API_GetInstanceAccess, which requests access for instances that are running\n game servers with the Amazon GameLift server SDK version 4.x or earlier.

", "smithy.api#sensitive": {} } }, @@ -12881,7 +12902,7 @@ } }, "traits": { - "smithy.api#documentation": "

A range of IP addresses and port settings that allow inbound traffic to connect to\n processes on an instance in a fleet. Processes are assigned an IP address/port number\n combination, which must fall into the fleet's allowed ranges. For managed container fleets, the\n port settings must use the same port numbers as the fleet's connection ports.

\n

For Realtime Servers fleets, Amazon GameLift automatically opens two port ranges, one for TCP messaging\n and one for UDP.

" + "smithy.api#documentation": "

A range of IP addresses and port settings that allow inbound traffic to connect to\n processes on an instance in a fleet. Processes are assigned an IP address/port number\n combination, which must fall into the fleet's allowed ranges.\n

\n

For Realtime Servers fleets, Amazon GameLift automatically opens two port ranges, one for TCP messaging\n and one for UDP.

" } }, "com.amazonaws.gamelift#IpPermissionsList": { @@ -13503,7 +13524,7 @@ } ], "traits": { - "smithy.api#documentation": "

Retrieves container group definitions for the Amazon Web Services account and Amazon Web Services Region. Use the pagination parameters to retrieve results in a set of sequential\n pages.

\n

This operation returns only the latest version of each definition. To retrieve all\n versions of a container group definition, use ListContainerGroupDefinitionVersions.

\n

\n Request options:\n

\n
    \n
  • \n

    Retrieve the most recent versions of all container group definitions.

    \n
  • \n
  • \n

    Retrieve the most recent versions of all container group definitions, filtered by\n type. Specify the container group type to filter on.

    \n
  • \n
\n

\n Results:\n

\n

If successful, this operation returns the complete properties of a set of container group\n definition versions that match the request.

\n \n

This operation returns the list of container group definitions in no particular order.

\n
\n

\n Learn more\n

\n ", + "smithy.api#documentation": "

Retrieves container group definitions for the Amazon Web Services account and Amazon Web Services Region. Use the pagination parameters to retrieve results in a set of sequential\n pages.

\n

This operation returns only the latest version of each definition. To retrieve all\n versions of a container group definition, use ListContainerGroupDefinitionVersions.

\n

\n Request options:\n

\n
    \n
  • \n

    Retrieve the most recent versions of all container group definitions.

    \n
  • \n
  • \n

    Retrieve the most recent versions of all container group definitions, filtered by\n type. Specify the container group type to filter on.

    \n
  • \n
\n

\n Results:\n

\n

If successful, this operation returns the complete properties of a set of container group\n definition versions that match the request.

\n \n

This operation returns the list of container group definitions in no particular order.

\n
", "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", @@ -13593,7 +13614,7 @@ } ], "traits": { - "smithy.api#documentation": "

Retrieves a collection of container fleet deployments in an Amazon Web Services Region.

\n

\n Request options\n

\n
    \n
  • \n

    Get a list of all deployments. Call this operation without specifying a fleet ID.

    \n
  • \n
  • \n

    Get a list of all deployments for a fleet. Specify the container fleet ID or ARN value.

    \n
  • \n
  • \n

    To get a list of all Realtime Servers fleets with a specific configuration script,\n provide the script ID.

    \n
  • \n
\n

Use the pagination parameters to retrieve results as a set of sequential pages.

\n

\n Results\n

\n

If successful, this operation returns a list of deployments that match the request\n parameters. A NextToken value is also returned if there are more result pages to\n retrieve.

\n \n

Fleet IDs are returned in no particular order.

\n
", + "smithy.api#documentation": "

Retrieves a collection of container fleet deployments in an Amazon Web Services Region. Use the\n pagination parameters to retrieve results as a set of sequential pages.

\n

\n Request options\n

\n
    \n
  • \n

    Get a list of all deployments. Call this operation without specifying a fleet ID.

    \n
  • \n
  • \n

    Get a list of all deployments for a fleet. Specify the container fleet ID or ARN value.

    \n
  • \n
\n

\n Results\n

\n

If successful, this operation returns a list of deployments that match the request\n parameters. A NextToken value is also returned if there are more result pages to\n retrieve.

\n \n

Deployments are returned starting with the latest.

\n
", "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", @@ -14224,7 +14245,7 @@ } }, "traits": { - "smithy.api#documentation": "

Properties of a custom location for use in an Amazon GameLift Anywhere fleet. This data type is returned in response to a call to CreateLocation.

" + "smithy.api#documentation": "

Properties of a custom location for use in an Amazon GameLift Anywhere fleet. This data type is returned in response to a call to https://docs.aws.amazon.com/gamelift/latest/apireference/API_CreateLocation.

" } }, "com.amazonaws.gamelift#LocationModelList": { @@ -14233,6 +14254,18 @@ "target": "com.amazonaws.gamelift#LocationModel" } }, + "com.amazonaws.gamelift#LocationOrderOverrideList": { + "type": "list", + "member": { + "target": "com.amazonaws.gamelift#LocationStringModel" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 10 + } + } + }, "com.amazonaws.gamelift#LocationState": { "type": "structure", "members": { @@ -15058,6 +15091,23 @@ "target": "com.amazonaws.gamelift#PlacedPlayerSession" } }, + "com.amazonaws.gamelift#PlacementFallbackStrategy": { + "type": "enum", + "members": { + "DEFAULT_AFTER_SINGLE_PASS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DEFAULT_AFTER_SINGLE_PASS" + } + }, + "NONE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "NONE" + } + } + } + }, "com.amazonaws.gamelift#Player": { "type": "structure", "members": { @@ -15082,7 +15132,7 @@ "LatencyInMs": { "target": "com.amazonaws.gamelift#LatencyMap", "traits": { - "smithy.api#documentation": "

A set of values, expressed in milliseconds, that indicates the amount of latency that a player experiences when connected to @aws; Regions. If this property is present, FlexMatch considers placing the match only in\n Regions for which latency is reported.

\n

If a matchmaker has a rule that evaluates player latency, players must report latency\n in order to be matched. If no latency is reported in this scenario, FlexMatch assumes that\n no Regions are available to the player and the ticket is not matchable.

" + "smithy.api#documentation": "

A set of values, expressed in milliseconds, that indicates the amount of latency that a player experiences when connected to Amazon Web Services Regions. If this property is present, FlexMatch considers placing the match only in\n Regions for which latency is reported.

\n

If a matchmaker has a rule that evaluates player latency, players must report latency\n in order to be matched. If no latency is reported in this scenario, FlexMatch assumes that\n no Regions are available to the player and the ticket is not matchable.

" } } }, @@ -15424,18 +15474,40 @@ "PriorityOrder": { "target": "com.amazonaws.gamelift#PriorityTypeList", "traits": { - "smithy.api#documentation": "

The recommended sequence to use when prioritizing where to place new game sessions.\n Each type can only be listed once.

\n
    \n
  • \n

    \n LATENCY -- FleetIQ prioritizes locations where the average player\n latency (provided in each game session request) is lowest.

    \n
  • \n
  • \n

    \n COST -- FleetIQ prioritizes destinations with the lowest current\n hosting costs. Cost is evaluated based on the location, instance type, and fleet\n type (Spot or On-Demand) for each destination in the queue.

    \n
  • \n
  • \n

    \n DESTINATION -- FleetIQ prioritizes based on the order that\n destinations are listed in the queue configuration.

    \n
  • \n
  • \n

    \n LOCATION -- FleetIQ prioritizes based on the provided order of\n locations, as defined in LocationOrder.

    \n
  • \n
" + "smithy.api#documentation": "

A custom sequence to use when prioritizing where to place new game sessions. Each\n priority type is listed once.

\n
    \n
  • \n

    \n LATENCY -- Amazon GameLift prioritizes locations where the average player\n latency is lowest. Player latency data is provided in each game session\n placement request.

    \n
  • \n
  • \n

    \n COST -- Amazon GameLift prioritizes destinations with the lowest current\n hosting costs. Cost is evaluated based on the location, instance type, and fleet\n type (Spot or On-Demand) of each destination in the queue.

    \n
  • \n
  • \n

    \n DESTINATION -- Amazon GameLift prioritizes based on the list order of\n destinations in the queue configuration.

    \n
  • \n
  • \n

    \n LOCATION -- Amazon GameLift prioritizes based on the provided order of\n locations, as defined in LocationOrder.

    \n
  • \n
" } }, "LocationOrder": { "target": "com.amazonaws.gamelift#LocationList", "traits": { - "smithy.api#documentation": "

The prioritization order to use for fleet locations, when the\n PriorityOrder property includes LOCATION. Locations are\n identified by Amazon Web Services Region codes such as us-west-2. Each location can only\n be listed once.

" + "smithy.api#documentation": "

The prioritization order to use for fleet locations, when the\n PriorityOrder property includes LOCATION. Locations can\n include Amazon Web Services Region codes (such as us-west-2), local zones, and custom\n locations (for Anywhere fleets). Each location must be listed only once. For details, see \n Amazon GameLift service locations.\n

" } } }, "traits": { - "smithy.api#documentation": "

Custom prioritization settings for use by a game session queue when placing new game\n sessions with available game servers. When defined, this configuration replaces the\n default FleetIQ prioritization process, which is as follows:

\n
    \n
  • \n

    If player latency data is included in a game session request, destinations and\n locations are prioritized first based on lowest average latency (1), then on\n lowest hosting cost (2), then on destination list order (3), and finally on\n location (alphabetical) (4). This approach ensures that the queue's top priority\n is to place game sessions where average player latency is lowest, and--if\n latency is the same--where the hosting cost is less, etc.

    \n
  • \n
  • \n

    If player latency data is not included, destinations and locations are\n prioritized first on destination list order (1), and then on location\n (alphabetical) (2). This approach ensures that the queue's top priority is to\n place game sessions on the first destination fleet listed. If that fleet has\n multiple locations, the game session is placed on the first location (when\n listed alphabetically).

    \n
  • \n
\n

Changing the priority order will affect how game sessions are placed.

" + "smithy.api#documentation": "

Custom prioritization settings for a game session queue to use when searching for\n available game servers to place new game sessions. This configuration replaces the\n default FleetIQ prioritization process.

\n

By default, a queue makes placements based on the following default\n prioritizations:

\n
    \n
  • \n

    If player latency data is included in a game session request, Amazon GameLift\n prioritizes placing game sessions where the average player latency is lowest.\n Amazon GameLift re-orders the queue's destinations and locations (for multi-location\n fleets) based on the following priorities: (1) the lowest average latency across\n all players, (2) the lowest hosting cost, (3) the queue's default destination\n order, and then (4), an alphabetic list of locations.

    \n
  • \n
  • \n

    If player latency data is not included, Amazon GameLift prioritizes placing game\n sessions in the queue's first destination. If that fleet has multiple locations,\n the game session is placed on the first location (when listed alphabetically).\n Amazon GameLift re-orders the queue's destinations and locations (for multi-location\n fleets) based on the following priorities: (1) the queue's default destination\n order, and then (2) an alphabetic list of locations.

    \n
  • \n
" + } + }, + "com.amazonaws.gamelift#PriorityConfigurationOverride": { + "type": "structure", + "members": { + "PlacementFallbackStrategy": { + "target": "com.amazonaws.gamelift#PlacementFallbackStrategy", + "traits": { + "smithy.api#documentation": "

Instructions for how to use the override list if the first round of placement attempts fails. The first round is a failure if \n Amazon GameLift searches all listed locations, in all of the queue's destinations, without finding an available hosting resource\n for a new game session. Valid strategies include:

\n
    \n
  • \n

    \n DEFAULT_AFTER_SINGLE_PASS -- After the first round of placement attempts, discard the override list and\n use the queue's default location priority list. Continue to use the queue's default list until the placement request times out.

    \n
  • \n
  • \n

    \n NONE -- Continue to use the override list for all rounds of placement attempts until the placement request times out.

    \n
  • \n
" + } + }, + "LocationOrder": { + "target": "com.amazonaws.gamelift#LocationOrderOverrideList", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "

A prioritized list of hosting locations. The list can include Amazon Web Services Regions (such as\n us-west-2), local zones, and custom locations (for Anywhere fleets).\n Each location must be listed only once. For details, see \n Amazon GameLift service locations.\n

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

An alternate list of prioritized locations for use with a game session queue. When\n this property is included in a StartGameSessionPlacement request, this list overrides the queue's default\n location prioritization, as defined in the queue's PriorityConfiguration setting (LocationOrder). This\n property overrides the queue's default priority list for individual placement requests\n only. Use this property only with queues that have a PriorityConfiguration\n setting that prioritizes first.

\n \n

A priority configuration override list does not override a queue's\n FilterConfiguration setting, if the queue has one. Filter configurations are used to\n limit placements to a subset of the locations in a queue's destinations. If the\n override list includes a location that's not included in the FilterConfiguration\n allowed list, Amazon GameLift won't attempt to place a game session there.

\n
" } }, "com.amazonaws.gamelift#PriorityType": { @@ -16104,7 +16176,7 @@ "MaxConcurrentGameSessionActivations": { "target": "com.amazonaws.gamelift#MaxConcurrentGameSessionActivations", "traits": { - "smithy.api#documentation": "

The number of game sessions in status ACTIVATING to allow on an instance.\n This setting limits the instance resources that can be used for new game activations at\n any one time.

" + "smithy.api#documentation": "

The number of game sessions in status ACTIVATING to allow on an\n instance or compute. This setting limits the instance resources that can be\n used for new game activations at any one time.

" } }, "GameSessionActivationTimeoutSeconds": { @@ -16421,7 +16493,7 @@ } ], "traits": { - "smithy.api#documentation": "

Retrieves all active game sessions that match a set of search criteria and sorts them\n into a specified order.

\n

This operation is not designed to continually track game session status because that practice can cause you to exceed your API limit and generate errors. Instead, configure an Amazon Simple Notification Service (Amazon SNS) topic to receive notifications from a matchmaker or a game session placement queue.

\n

When searching for game sessions, you specify exactly where you want to search and\n provide a search filter expression, a sort expression, or both. A search request can\n search only one fleet, but it can search all of a fleet's locations.

\n

This operation can be used in the following ways:

\n
    \n
  • \n

    To search all game sessions that are currently running on all locations in a\n fleet, provide a fleet or alias ID. This approach returns game sessions in the\n fleet's home Region and all remote locations that fit the search\n criteria.

    \n
  • \n
  • \n

    To search all game sessions that are currently running on a specific fleet\n location, provide a fleet or alias ID and a location name. For location, you can\n specify a fleet's home Region or any remote location.

    \n
  • \n
\n

Use the pagination parameters to retrieve results as a set of sequential pages.

\n

If successful, a GameSession object is returned for each game session\n that matches the request. Search finds game sessions that are in ACTIVE\n status only. To retrieve information on game sessions in other statuses, use DescribeGameSessions .

\n

To set search and sort criteria, create a filter expression using the following game session attributes. For game session search examples, see the Examples section of this topic.

\n
    \n
  • \n

    \n gameSessionId -- A unique identifier for the game session. You can use either a\n GameSessionId or GameSessionArn value.

    \n
  • \n
  • \n

    \n gameSessionName -- Name assigned to a game\n session. Game session names do not need to be unique to a game session.

    \n
  • \n
  • \n

    \n gameSessionProperties -- A set of key-value pairs that can store custom data in a game session.\n For example: {\"Key\": \"difficulty\", \"Value\": \"novice\"}.\n The filter expression must specify the GameProperty -- a Key and a string Value to search for the game sessions.

    \n

    For example, to search for the above key-value pair, specify the following search filter: gameSessionProperties.difficulty = \"novice\".\n All game property values are searched as strings.

    \n

    \n For examples of searching game sessions, see the ones below, and also see Search game sessions by game property.\n

    \n
  • \n
  • \n

    \n maximumSessions -- Maximum number of player\n sessions allowed for a game session.

    \n
  • \n
  • \n

    \n creationTimeMillis -- Value indicating when a\n game session was created. It is expressed in Unix time as milliseconds.

    \n
  • \n
  • \n

    \n playerSessionCount -- Number of players\n currently connected to a game session. This value changes rapidly as players\n join the session or drop out.

    \n
  • \n
  • \n

    \n hasAvailablePlayerSessions -- Boolean value\n indicating whether a game session has reached its maximum number of players. It\n is highly recommended that all search requests include this filter attribute to\n optimize search performance and return only sessions that players can join.\n

    \n
  • \n
\n \n

Returned values for playerSessionCount and\n hasAvailablePlayerSessions change quickly as players join sessions\n and others drop out. Results should be considered a snapshot in time. Be sure to\n refresh search results often, and handle sessions that fill up before a player can\n join.

\n
\n

\n All APIs by task\n

", + "smithy.api#documentation": "

Retrieves all active game sessions that match a set of search criteria and sorts them\n into a specified order.

\n

This operation is not designed to continually track game session status because that practice can cause you to exceed your API limit and generate errors. Instead, configure an Amazon Simple Notification Service (Amazon SNS) topic to receive notifications from a matchmaker or a game session placement queue.

\n

When searching for game sessions, you specify exactly where you want to search and\n provide a search filter expression, a sort expression, or both. A search request can\n search only one fleet, but it can search all of a fleet's locations.

\n

This operation can be used in the following ways:

\n
    \n
  • \n

    To search all game sessions that are currently running on all locations in a\n fleet, provide a fleet or alias ID. This approach returns game sessions in the\n fleet's home Region and all remote locations that fit the search\n criteria.

    \n
  • \n
  • \n

    To search all game sessions that are currently running on a specific fleet\n location, provide a fleet or alias ID and a location name. For location, you can\n specify a fleet's home Region or any remote location.

    \n
  • \n
\n

Use the pagination parameters to retrieve results as a set of sequential pages.

\n

If successful, a GameSession object is returned for each game session\n that matches the request. Search finds game sessions that are in ACTIVE\n status only. To retrieve information on game sessions in other statuses, use DescribeGameSessions.

\n

To set search and sort criteria, create a filter expression using the following game session attributes. For game session search examples, see the Examples section of this topic.

\n
    \n
  • \n

    \n gameSessionId -- A unique identifier for the game session. You can use either a\n GameSessionId or GameSessionArn value.

    \n
  • \n
  • \n

    \n gameSessionName -- Name assigned to a game\n session. Game session names do not need to be unique to a game session.

    \n
  • \n
  • \n

    \n gameSessionProperties -- A set of key-value pairs that can store custom data in a game session.\n For example: {\"Key\": \"difficulty\", \"Value\": \"novice\"}.\n The filter expression must specify the https://docs.aws.amazon.com/gamelift/latest/apireference/API_GameProperty -- a Key and a string Value to search for the game sessions.

    \n

    For example, to search for the above key-value pair, specify the following search filter: gameSessionProperties.difficulty = \"novice\".\n All game property values are searched as strings.

    \n

    \n For examples of searching game sessions, see the ones below, and also see Search game sessions by game property.\n

    \n
  • \n
  • \n

    \n maximumSessions -- Maximum number of player\n sessions allowed for a game session.

    \n
  • \n
  • \n

    \n creationTimeMillis -- Value indicating when a\n game session was created. It is expressed in Unix time as milliseconds.

    \n
  • \n
  • \n

    \n playerSessionCount -- Number of players\n currently connected to a game session. This value changes rapidly as players\n join the session or drop out.

    \n
  • \n
  • \n

    \n hasAvailablePlayerSessions -- Boolean value\n indicating whether a game session has reached its maximum number of players. It\n is highly recommended that all search requests include this filter attribute to\n optimize search performance and return only sessions that players can join.\n

    \n
  • \n
\n \n

Returned values for playerSessionCount and\n hasAvailablePlayerSessions change quickly as players join sessions\n and others drop out. Results should be considered a snapshot in time. Be sure to\n refresh search results often, and handle sessions that fill up before a player can\n join.

\n
\n

\n All APIs by task\n

", "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", @@ -16694,10 +16766,13 @@ }, { "target": "com.amazonaws.gamelift#UnauthorizedException" + }, + { + "target": "com.amazonaws.gamelift#UnsupportedRegionException" } ], "traits": { - "smithy.api#documentation": "

Places a request for a new game session in a queue. When processing a placement\n request, Amazon GameLift searches for available resources on the queue's destinations, scanning\n each until it finds resources or the placement request times out.

\n

A game session placement request can also request player sessions. When a new game\n session is successfully created, Amazon GameLift creates a player session for each player\n included in the request.

\n

When placing a game session, by default Amazon GameLift tries each fleet in the order they are\n listed in the queue configuration. Ideally, a queue's destinations are listed in\n preference order.

\n

Alternatively, when requesting a game session with players, you can also provide\n latency data for each player in relevant Regions. Latency data indicates the performance\n lag a player experiences when connected to a fleet in the Region. Amazon GameLift uses latency\n data to reorder the list of destinations to place the game session in a Region with\n minimal lag. If latency data is provided for multiple players, Amazon GameLift calculates each\n Region's average lag for all players and reorders to get the best game play across all\n players.

\n

To place a new game session request, specify the following:

\n
    \n
  • \n

    The queue name and a set of game session properties and settings

    \n
  • \n
  • \n

    A unique ID (such as a UUID) for the placement. You use this ID to track the\n status of the placement request

    \n
  • \n
  • \n

    (Optional) A set of player data and a unique player ID for each player that\n you are joining to the new game session (player data is optional, but if you\n include it, you must also provide a unique ID for each player)

    \n
  • \n
  • \n

    Latency data for all players (if you want to optimize game play for the\n players)

    \n
  • \n
\n

If successful, a new game session placement is created.

\n

To track the status of a placement request, call DescribeGameSessionPlacement and check the request's status. If the status\n is FULFILLED, a new game session has been created and a game session ARN\n and Region are referenced. If the placement request times out, submit a new request to the same\n queue or a different queue.

" + "smithy.api#documentation": "

Makes a request to start a new game session using a game session queue. When\n processing a placement request in a queue, Amazon GameLift finds the best possible available\n resource to host the game session and prompts the resource to start the game session.

\n

\n Request options\n

\n

Call this API with the following minimum parameters:\n GameSessionQueueName,\n MaximumPlayerSessionCount, and\n PlacementID. You can also include game session data (data formatted\n as strings) or game properties (data formatted as key-value pairs) to pass to the new\n game session.

\n
    \n
  • \n

    You can change how Amazon GameLift chooses a hosting resource for the new game session.\n Prioritizing resources for game session placements is defined when you configure\n a game session queue. You can use the default prioritization process or specify\n a custom process by providing a \n PriorityConfiguration when you create or update a queue.

    \n
      \n
    • \n

      Prioritize based on resource cost and location, using the queue's\n configured priority settings. Call this API with the minimum\n parameters.

      \n
    • \n
    • \n

      Prioritize based on latency. Include a set of values for\n PlayerLatencies. You can provide latency data\n with or without player session data. This option instructs Amazon GameLift to\n reorder the queue's prioritized locations list based on the latency\n data. If latency data is provided for multiple players, Amazon GameLift\n calculates each location's average latency for all players and reorders\n to find the lowest latency across all players. Don't include latency\n data if you're providing a custom list of locations.

      \n
    • \n
    • \n

      Prioritize based on a custom list of locations. If you're using a\n queue that's configured to prioritize location first (see PriorityConfiguration for game session queues), use the\n PriorityConfigurationOverride parameter to\n substitute a different location list for this placement request. When\n prioritizing placements by location, Amazon GameLift searches each location in\n prioritized order to find an available hosting resource for the new game\n session. You can choose whether to use the override list for the first\n placement attempt only or for all attempts.

      \n
    • \n
    \n
  • \n
  • \n

    You can request new player sessions for a group of players. Include the\n DesiredPlayerSessions parameter and include at minimum\n a unique player ID for each. You can also include player-specific data to pass\n to the new game session.

    \n
  • \n
\n

\n Result\n

\n

If successful, this request generates a new game session placement request and adds it\n to the game session queue for Amazon GameLift to process in turn. You can track the status of\n individual placement requests by calling DescribeGameSessionPlacement. A new game session is running if the status\n is FULFILLED and the request returns the game session connection\n information (IP address and port). If you include player session data, Amazon GameLift creates a\n player session for each player ID in the request.

\n

The request results in a BadRequestException in the following\n situations:

\n
    \n
  • \n

    If the request includes both PlayerLatencies and\n PriorityConfigurationOverride parameters.

    \n
  • \n
  • \n

    If the request includes the PriorityConfigurationOverride\n parameter and designates a queue doesn't prioritize locations.

    \n
  • \n
\n

Amazon GameLift continues to retry each placement request until it reaches the queue's timeout\n setting. If a request times out, you can resubmit the request to the same queue or try a\n different queue.

" } }, "com.amazonaws.gamelift#StartGameSessionPlacementInput": { @@ -16742,7 +16817,7 @@ "PlayerLatencies": { "target": "com.amazonaws.gamelift#PlayerLatencyList", "traits": { - "smithy.api#documentation": "

A set of values, expressed in milliseconds, that indicates the amount of latency that a player experiences when connected to @aws; Regions. This information is used to try to place the new game session where it\n can offer the best possible gameplay experience for the players.

" + "smithy.api#documentation": "

A set of values, expressed in milliseconds, that indicates the amount of latency that a player experiences when connected to Amazon Web Services Regions. This information is used to try to place the new game session where it\n can offer the best possible gameplay experience for the players.

" } }, "DesiredPlayerSessions": { @@ -16756,6 +16831,12 @@ "traits": { "smithy.api#documentation": "

A set of custom game session properties, formatted as a single string value. This data is passed to a game server process with a request to start a new game session. For more information, see Start a game session.

" } + }, + "PriorityConfigurationOverride": { + "target": "com.amazonaws.gamelift#PriorityConfigurationOverride", + "traits": { + "smithy.api#documentation": "

A prioritized list of locations to use for the game session placement and instructions\n on how to use it. This list overrides a queue's prioritized location list for this game\n session placement request only. You can include Amazon Web Services Regions, local zones, and custom\n locations (for Anywhere fleets). Choose a fallback strategy to instruct Amazon GameLift to use\n the override list for the first placement attempt only or for all placement\n attempts.

" + } } }, "traits": { @@ -17163,7 +17244,7 @@ "MemoryHardLimitMebibytes": { "target": "com.amazonaws.gamelift#ContainerMemoryLimit", "traits": { - "smithy.api#documentation": "

The amount of memory that Amazon GameLift makes available to the container. If memory limits\n aren't set for an individual container, the container shares the container group's total\n memory allocation.

\n

\n Related data type: \n ContainerGroupDefinition$TotalMemoryLimitMebibytes\n

" + "smithy.api#documentation": "

The amount of memory that Amazon GameLift makes available to the container. If memory limits\n aren't set for an individual container, the container shares the container group's total\n memory allocation.

\n

\n Related data type: \n ContainerGroupDefinition TotalMemoryLimitMebibytes\n

" } }, "PortConfiguration": { @@ -17181,12 +17262,12 @@ "Vcpu": { "target": "com.amazonaws.gamelift#ContainerVcpu", "traits": { - "smithy.api#documentation": "

The number of vCPU units that are reserved for the container. If no resources are\n reserved, the container shares the total vCPU limit for the container group.

\n

\n Related data type: \n ContainerGroupDefinition$TotalVcpuLimit\n

" + "smithy.api#documentation": "

The number of vCPU units that are reserved for the container. If no resources are\n reserved, the container shares the total vCPU limit for the container group.

\n

\n Related data type: \n ContainerGroupDefinition TotalVcpuLimit\n

" } } }, "traits": { - "smithy.api#documentation": "

Describes a support container in a container group. A support container might be in a game\n server container group or a per-instance container group. Support containers don't run game\n server processes.

\n

You can update a support container definition and deploy the updates to an existing fleet.\n When creating or updating a game server container group definition, use the property GameServerContainerDefinitionInput.

\n

\n Part of:\n ContainerGroupDefinition\n

\n

\n Returned by:\n DescribeContainerGroupDefinition, ListContainerGroupDefinitions, UpdateContainerGroupDefinition\n

" + "smithy.api#documentation": "

Describes a support container in a container group. A support container might be in a game\n server container group or a per-instance container group. Support containers don't run game\n server processes.

\n

You can update a support container definition and deploy the updates to an existing fleet.\n When creating or updating a game server container group definition, use the property \n GameServerContainerDefinitionInput.

\n

\n Part of:\n ContainerGroupDefinition\n

\n

\n Returned by:\n DescribeContainerGroupDefinition, \n ListContainerGroupDefinitions, \n UpdateContainerGroupDefinition\n

" } }, "com.amazonaws.gamelift#SupportContainerDefinitionInput": { @@ -17241,7 +17322,7 @@ "MemoryHardLimitMebibytes": { "target": "com.amazonaws.gamelift#ContainerMemoryLimit", "traits": { - "smithy.api#documentation": "

A specified amount of memory (in MiB) to reserve for this container. If you don't specify\n a container-specific memory limit, the container shares the container group's total memory\n allocation.

\n

\n Related data type: \n ContainerGroupDefinition TotalMemoryLimitMebibytes\n

" + "smithy.api#documentation": "

A specified amount of memory (in MiB) to reserve for this container. If you don't specify\n a container-specific memory limit, the container shares the container group's total memory\n allocation.

\n

\n Related data type: \n ContainerGroupDefinitionTotalMemoryLimitMebibytes\n

" } }, "PortConfiguration": { @@ -17253,12 +17334,12 @@ "Vcpu": { "target": "com.amazonaws.gamelift#ContainerVcpu", "traits": { - "smithy.api#documentation": "

The number of vCPU units to reserve for this container. The container can use more\n resources when needed, if available. If you don't reserve CPU units for this container, it\n shares the container group's total vCPU limit.

\n

\n Related data type: \n ContainerGroupDefinition TotalCpuLimit\n

" + "smithy.api#documentation": "

The number of vCPU units to reserve for this container. The container can use more\n resources when needed, if available. If you don't reserve CPU units for this container, it\n shares the container group's total vCPU limit.

\n

\n Related data type: \n ContainerGroupDefinition TotalCpuLimit\n

" } } }, "traits": { - "smithy.api#documentation": "

Describes a support container in a container group. You can define a support container in\n either a game server container group or a per-instance container group. Support containers\n don't run game server processes.

\n

This definition includes container configuration, resources, and start instructions. Use\n this data type when creating or updating a container group definition. For properties of a\n deployed support container, see SupportContainerDefinition.

\n

\n Use with: \n CreateContainerGroupDefinition, UpdateContainerGroupDefinition\n

" + "smithy.api#documentation": "

Describes a support container in a container group. You can define a support container in\n either a game server container group or a per-instance container group. Support containers\n don't run game server processes.

\n

This definition includes container configuration, resources, and start instructions. Use\n this data type when creating or updating a container group definition. For properties of a\n deployed support container, see SupportContainerDefinition.

\n

\n Use with: \n CreateContainerGroupDefinition, \n UpdateContainerGroupDefinition\n

" } }, "com.amazonaws.gamelift#SupportContainerDefinitionInputList": { @@ -17531,6 +17612,90 @@ "smithy.api#error": "client" } }, + "com.amazonaws.gamelift#TerminateGameSession": { + "type": "operation", + "input": { + "target": "com.amazonaws.gamelift#TerminateGameSessionInput" + }, + "output": { + "target": "com.amazonaws.gamelift#TerminateGameSessionOutput" + }, + "errors": [ + { + "target": "com.amazonaws.gamelift#InternalServiceException" + }, + { + "target": "com.amazonaws.gamelift#InvalidGameSessionStatusException" + }, + { + "target": "com.amazonaws.gamelift#InvalidRequestException" + }, + { + "target": "com.amazonaws.gamelift#NotFoundException" + }, + { + "target": "com.amazonaws.gamelift#NotReadyException" + }, + { + "target": "com.amazonaws.gamelift#UnauthorizedException" + } + ], + "traits": { + "smithy.api#documentation": "

Ends a game session that's currently in progress. Use this action to terminate any\n game session that isn't in ERROR status. Terminating a game session is the\n most efficient way to free up a server process when it's hosting a game session that's\n in a bad state or not ending properly. You can use this action to terminate a game\n session that's being hosted on any type of Amazon GameLift fleet compute, including computes for\n managed EC2, managed container, and Anywhere fleets. The game server must be integrated\n with Amazon GameLift server SDK 5.x or greater.

\n

\n Request options\n

\n

Request termination for a single game session. Provide the game session ID and the\n termination mode. There are two potential methods for terminating a game session:

\n
    \n
  • \n

    Initiate a graceful termination using the normal game session shutdown\n sequence. With this mode, the Amazon GameLift service prompts the server process that's\n hosting the game session by calling the server SDK callback method\n OnProcessTerminate(). The callback implementation is part of\n the custom game server code. It might involve a variety of actions to gracefully\n end a game session, such as notifying players, before stopping the server\n process.

    \n
  • \n
  • \n

    Force an immediate game session termination. With this mode, the Amazon GameLift\n service takes action to stop the server process, which ends the game session\n without the normal game session shutdown sequence.

    \n
  • \n
\n

\n Results\n

\n

If successful, game session termination is initiated. During this activity, the game\n session status is changed to TERMINATING. When completed, the server\n process that was hosting the game session has been stopped and replaced with a new\n server process that's ready to host a new game session. The old game session's status is\n changed to TERMINATED with a status reason that indicates the termination\n method used.

\n

\n Learn more\n

\n

\n Add Amazon GameLift to your game server\n

\n

Amazon GameLift server SDK 5 reference guide for OnProcessTerminate()\n (C++)\n (C#) \n (Unreal) \n (Go)\n

" + } + }, + "com.amazonaws.gamelift#TerminateGameSessionInput": { + "type": "structure", + "members": { + "GameSessionId": { + "target": "com.amazonaws.gamelift#ArnStringModel", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "

A unique identifier for the game session to be terminated. A game session ARN has the following format: \n arn:aws:gamelift:::gamesession//.

", + "smithy.api#required": {} + } + }, + "TerminationMode": { + "target": "com.amazonaws.gamelift#TerminationMode", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "

The method to use to terminate the game session. Available methods include:

\n
    \n
  • \n

    \n TRIGGER_ON_PROCESS_TERMINATE – Prompts the Amazon GameLift service to\n send an OnProcessTerminate() callback to the server process and\n initiate the normal game session shutdown sequence. The\n OnProcessTerminate method, which is implemented in the game\n server code, must include a call to the server SDK action\n ProcessEnding(), which is how the server process signals to\n Amazon GameLift that a game session is ending. If the server process doesn't call\n ProcessEnding(), the game session termination won't conclude\n successfully.

    \n
  • \n
  • \n

    \n FORCE_TERMINATE – Prompts the Amazon GameLift service to stop the server\n process immediately. Amazon GameLift takes action (depending on the type of fleet) to\n shut down the server process without the normal game session shutdown sequence.

    \n \n

    This method is not available for game sessions that are running on\n Anywhere fleets unless the fleet is deployed with the Amazon GameLift Agent. In this\n scenario, a force terminate request results in an invalid or bad request\n exception.

    \n
    \n
  • \n
", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.gamelift#TerminateGameSessionOutput": { + "type": "structure", + "members": { + "GameSession": { + "target": "com.amazonaws.gamelift#GameSession" + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.gamelift#TerminationMode": { + "type": "enum", + "members": { + "TRIGGER_ON_PROCESS_TERMINATE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "TRIGGER_ON_PROCESS_TERMINATE" + } + }, + "FORCE_TERMINATE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "FORCE_TERMINATE" + } + } + } + }, "com.amazonaws.gamelift#Timestamp": { "type": "timestamp" }, @@ -17792,7 +17957,7 @@ } ], "traits": { - "smithy.api#documentation": "

Updates the properties of a managed container fleet. Depending on the properties being\n updated, this operation might initiate a fleet deployment. You can track deployments for\n a fleet using DescribeFleetDeployment.

\n

\n Request options\n

\n

As with CreateContainerFleet, many fleet properties use common defaults or are\n calculated based on the fleet's container group definitions.

\n
    \n
  • \n

    Update fleet properties that result in a fleet deployment. Include only those\n properties that you want to change. Specify deployment configuration\n settings.

    \n
  • \n
  • \n

    Update fleet properties that don't result in a fleet deployment. Include only\n those properties that you want to change.

    \n
  • \n
\n

Changes to the following properties initiate a fleet deployment:

\n
    \n
  • \n

    \n GameServerContainerGroupDefinition\n

    \n
  • \n
  • \n

    \n PerInstanceContainerGroupDefinition\n

    \n
  • \n
  • \n

    \n GameServerContainerGroupsPerInstance\n

    \n
  • \n
  • \n

    \n InstanceInboundPermissions\n

    \n
  • \n
  • \n

    \n InstanceConnectionPortRange\n

    \n
  • \n
  • \n

    \n LogConfiguration\n

    \n
  • \n
\n

\n Results\n

\n

If successful, this operation updates the container fleet resource, and might initiate\n a new deployment of fleet resources using the deployment configuration provided. A\n deployment replaces existing fleet instances with new instances that are deployed with\n the updated fleet properties. The fleet is placed in UPDATING status until\n the deployment is complete, then return to ACTIVE.

\n

You can have only one update deployment active at a time for a fleet. If a second\n update request initiates a deployment while another deployment is in progress, the first\n deployment is cancelled.

" + "smithy.api#documentation": "

Updates the properties of a managed container fleet. Depending on the properties being\n updated, this operation might initiate a fleet deployment. You can track deployments for\n a fleet using https://docs.aws.amazon.com/gamelift/latest/apireference/API_DescribeFleetDeployment.html.

\n

\n Request options\n

\n

As with CreateContainerFleet, many fleet properties use common defaults or are\n calculated based on the fleet's container group definitions.

\n
    \n
  • \n

    Update fleet properties that result in a fleet deployment. Include only those\n properties that you want to change. Specify deployment configuration\n settings.

    \n
  • \n
  • \n

    Update fleet properties that don't result in a fleet deployment. Include only\n those properties that you want to change.

    \n
  • \n
\n

Changes to the following properties initiate a fleet deployment:

\n
    \n
  • \n

    \n GameServerContainerGroupDefinition\n

    \n
  • \n
  • \n

    \n PerInstanceContainerGroupDefinition\n

    \n
  • \n
  • \n

    \n GameServerContainerGroupsPerInstance\n

    \n
  • \n
  • \n

    \n InstanceInboundPermissions\n

    \n
  • \n
  • \n

    \n InstanceConnectionPortRange\n

    \n
  • \n
  • \n

    \n LogConfiguration\n

    \n
  • \n
\n

\n Results\n

\n

If successful, this operation updates the container fleet resource, and might initiate\n a new deployment of fleet resources using the deployment configuration provided. A\n deployment replaces existing fleet instances with new instances that are deployed with\n the updated fleet properties. The fleet is placed in UPDATING status until\n the deployment is complete, then return to ACTIVE.

\n

You can have only one update deployment active at a time for a fleet. If a second\n update request initiates a deployment while another deployment is in progress, the first\n deployment is cancelled.

" } }, "com.amazonaws.gamelift#UpdateContainerFleetInput": { @@ -17809,13 +17974,13 @@ "GameServerContainerGroupDefinitionName": { "target": "com.amazonaws.gamelift#ContainerGroupDefinitionNameOrArn", "traits": { - "smithy.api#documentation": "

The name or ARN value of a new game server container group definition to deploy on the\n fleet. If you're updating the fleet to a specific version of a container group\n definition, use the ARN value and include the version number. If you're updating the\n fleet to the latest version of a container group definition, you can use the name value.\n You can't remove a fleet's game server container group definition, you can only update\n or replace it with another definition.

\n

Update a container group definition by calling UpdateContainerGroupDefinition. This operation creates a ContainerGroupDefinition resource with an incremented version.

" + "smithy.api#documentation": "

The name or ARN value of a new game server container group definition to deploy on the\n fleet. If you're updating the fleet to a specific version of a container group\n definition, use the ARN value and include the version number. If you're updating the\n fleet to the latest version of a container group definition, you can use the name value.\n You can't remove a fleet's game server container group definition, you can only update\n or replace it with another definition.

\n

Update a container group definition by calling \n UpdateContainerGroupDefinition. \n This operation creates a \n ContainerGroupDefinition \n resource with an incremented version.

" } }, "PerInstanceContainerGroupDefinitionName": { "target": "com.amazonaws.gamelift#ContainerGroupDefinitionNameOrArn", "traits": { - "smithy.api#documentation": "

The name or ARN value of a new per-instance container group definition to deploy on\n the fleet. If you're updating the fleet to a specific version of a container group\n definition, use the ARN value and include the version number. If you're updating the\n fleet to the latest version of a container group definition, you can use the name\n value.

\n

Update a container group definition by calling UpdateContainerGroupDefinition. This operation creates a ContainerGroupDefinition resource with an incremented version.

\n

To remove a fleet's per-instance container group definition, leave this parameter empty\n and use the parameter RemoveAttributes.

" + "smithy.api#documentation": "

The name or ARN value of a new per-instance container group definition to deploy on\n the fleet. If you're updating the fleet to a specific version of a container group\n definition, use the ARN value and include the version number. If you're updating the\n fleet to the latest version of a container group definition, you can use the name\n value.

\n

Update a container group definition by calling \n UpdateContainerGroupDefinition. \n This operation creates a \n ContainerGroupDefinition \n resource with an incremented version.

\n

To remove a fleet's per-instance container group definition, leave this parameter empty\n and use the parameter RemoveAttributes.

" } }, "GameServerContainerGroupsPerInstance": { @@ -17932,7 +18097,7 @@ } ], "traits": { - "smithy.api#documentation": "

Updates properties in an existing container group definition. This operation doesn't\n replace the definition. Instead, it creates a new version of the definition and saves it\n separately. You can access all versions that you choose to retain.

\n

The only property you can't update is the container group type.

\n

\n Request options:\n

\n
    \n
  • \n

    Update based on the latest version of the container group definition. Specify the\n container group definition name only, or use an ARN value without a version number.\n Provide updated values for the properties that you want to change only. All other values\n remain the same as the latest version.

    \n
  • \n
  • \n

    Update based on a specific version of the container group definition. Specify the\n container group definition name and a source version number, or use an ARN value with a\n version number. Provide updated values for the properties that you want to change only.\n All other values remain the same as the source version.

    \n
  • \n
  • \n

    Change a game server container definition. Provide the updated container\n definition.

    \n
  • \n
  • \n

    Add or change a support container definition. Provide a complete set of container\n definitions, including the updated definition.

    \n
  • \n
  • \n

    Remove a support container definition. Provide a complete set of container\n definitions, excluding the definition to remove. If the container group has only one\n support container definition, provide an empty set.

    \n
  • \n
\n

\n Results:\n

\n

If successful, this operation returns the complete properties of the new container group\n definition version.

\n

If the container group definition version is used in an active fleets, the update\n automatically initiates a new fleet deployment of the new version. You can track a fleet's \n deployments using ListFleetDeployments.

" + "smithy.api#documentation": "

Updates properties in an existing container group definition. This operation doesn't\n replace the definition. Instead, it creates a new version of the definition and saves it\n separately. You can access all versions that you choose to retain.

\n

The only property you can't update is the container group type.

\n

\n Request options:\n

\n
    \n
  • \n

    Update based on the latest version of the container group definition. Specify the\n container group definition name only, or use an ARN value without a version number.\n Provide updated values for the properties that you want to change only. All other values\n remain the same as the latest version.

    \n
  • \n
  • \n

    Update based on a specific version of the container group definition. Specify the\n container group definition name and a source version number, or use an ARN value with a\n version number. Provide updated values for the properties that you want to change only.\n All other values remain the same as the source version.

    \n
  • \n
  • \n

    Change a game server container definition. Provide the updated container\n definition.

    \n
  • \n
  • \n

    Add or change a support container definition. Provide a complete set of container\n definitions, including the updated definition.

    \n
  • \n
  • \n

    Remove a support container definition. Provide a complete set of container\n definitions, excluding the definition to remove. If the container group has only one\n support container definition, provide an empty set.

    \n
  • \n
\n

\n Results:\n

\n

If successful, this operation returns the complete properties of the new container group\n definition version.

\n

If the container group definition version is used in an active fleets, the update\n automatically initiates a new fleet deployment of the new version. You can track a fleet's \n deployments using ListFleetDeployments.

" } }, "com.amazonaws.gamelift#UpdateContainerGroupDefinitionInput": { @@ -18068,7 +18233,7 @@ "NewGameSessionProtectionPolicy": { "target": "com.amazonaws.gamelift#ProtectionPolicy", "traits": { - "smithy.api#documentation": "

The game session protection policy to apply to all new game sessions created in this\n fleet. Game sessions that already exist are not affected. You can set protection for\n individual game sessions using UpdateGameSession.

\n
    \n
  • \n

    \n NoProtection -- The game session can be\n terminated during a scale-down event.

    \n
  • \n
  • \n

    \n FullProtection -- If the game session is in an\n ACTIVE status, it cannot be terminated during a scale-down\n event.

    \n
  • \n
" + "smithy.api#documentation": "

The game session protection policy to apply to all new game sessions created in this\n fleet. Game sessions that already exist are not affected. You can set protection for\n individual game sessions using UpdateGameSession .

\n
    \n
  • \n

    \n NoProtection -- The game session can be\n terminated during a scale-down event.

    \n
  • \n
  • \n

    \n FullProtection -- If the game session is in an\n ACTIVE status, it cannot be terminated during a scale-down\n event.

    \n
  • \n
" } }, "ResourceCreationLimitPolicy": { @@ -18250,7 +18415,7 @@ } ], "traits": { - "smithy.api#documentation": "

Updates permissions that allow inbound traffic to connect to game sessions in the\n fleet.

\n

To update settings, specify the fleet ID to be updated and specify the changes to be\n made. List the permissions you want to add in\n InboundPermissionAuthorizations, and permissions you want to remove in\n InboundPermissionRevocations. Permissions to be removed must match\n existing fleet permissions.

\n

For a container fleet, inbound permissions must specify port numbers that are defined\n in the fleet's connection port settings.

\n

If successful, the fleet ID for the updated fleet is returned. For fleets with remote\n locations, port setting updates can take time to propagate across all locations. You can\n check the status of updates in each location by calling\n DescribeFleetPortSettings with a location name.

\n

\n Learn more\n

\n

\n Setting up Amazon GameLift\n fleets\n

" + "smithy.api#documentation": "

Updates permissions that allow inbound traffic to connect to game sessions in the\n fleet.

\n

To update settings, specify the fleet ID to be updated and specify the changes to be\n made. List the permissions you want to add in\n InboundPermissionAuthorizations, and permissions you want to remove in\n InboundPermissionRevocations. Permissions to be removed must match\n existing fleet permissions.

\n

If successful, the fleet ID for the updated fleet is returned. For fleets with remote\n locations, port setting updates can take time to propagate across all locations. You can\n check the status of updates in each location by calling\n DescribeFleetPortSettings with a location name.

\n

\n Learn more\n

\n

\n Setting up Amazon GameLift\n fleets\n

" } }, "com.amazonaws.gamelift#UpdateFleetPortSettingsInput": { @@ -18527,7 +18692,7 @@ "ProtectionPolicy": { "target": "com.amazonaws.gamelift#ProtectionPolicy", "traits": { - "smithy.api#documentation": "

Game session protection policy to apply to this game session only.

\n
    \n
  • \n

    \n NoProtection -- The game session can be\n terminated during a scale-down event.

    \n
  • \n
  • \n

    \n FullProtection -- If the game session is in an\n ACTIVE status, it cannot be terminated during a scale-down\n event.

    \n
  • \n
" + "smithy.api#documentation": "

Game session protection policy to apply to this game session only.

\n
    \n
  • \n

    \n NoProtection -- The game session can be terminated during a\n scale-down event.

    \n
  • \n
  • \n

    \n FullProtection -- If the game session is in an\n ACTIVE status, it cannot be terminated during a scale-down\n event.

    \n
  • \n
" } }, "GameProperties": { diff --git a/models/glue.json b/models/glue.json index 37d25bc7bd..189eacd404 100644 --- a/models/glue.json +++ b/models/glue.json @@ -1838,7 +1838,7 @@ "Timeout": { "target": "com.amazonaws.glue#Timeout", "traits": { - "smithy.api#documentation": "

The JobRun timeout in minutes. This is the maximum time that a job run can\n consume resources before it is terminated and enters TIMEOUT status. The default\n is 2,880 minutes (48 hours). This overrides the timeout value set in the parent job.

" + "smithy.api#documentation": "

The JobRun timeout in minutes. This is the maximum time that a job run can\n consume resources before it is terminated and enters TIMEOUT status. This overrides the timeout value set in the parent job.

\n

Jobs must have timeout values less than 7 days or 10080 minutes. Otherwise, the jobs will throw an exception.

\n

When the value is left blank, the timeout is defaulted to 2880 minutes.

\n

Any existing Glue jobs that had a timeout value greater than 7 days will be defaulted to 7 days. For instance if you have specified a timeout of 20 days for a batch job, it will be stopped on the 7th day.

\n

For streaming jobs, if you have set up a maintenance window, it will be restarted during the maintenance window after 7 days.

" } }, "SecurityConfiguration": { @@ -10707,7 +10707,7 @@ "Timeout": { "target": "com.amazonaws.glue#Timeout", "traits": { - "smithy.api#documentation": "

The job timeout in minutes. This is the maximum time that a job run\n can consume resources before it is terminated and enters TIMEOUT\n status. The default is 2,880 minutes (48 hours) for batch jobs.

\n

Streaming jobs must have timeout values less than 7 days or 10080 minutes. When the value is left blank, the job will be restarted after 7 days based if you have not setup a maintenance window. If you have setup maintenance window, it will be restarted during the maintenance window after 7 days.

" + "smithy.api#documentation": "

The job timeout in minutes. This is the maximum time that a job run\n can consume resources before it is terminated and enters TIMEOUT\n status.

\n

Jobs must have timeout values less than 7 days or 10080 minutes. Otherwise, the jobs will throw an exception.

\n

When the value is left blank, the timeout is defaulted to 2880 minutes.

\n

Any existing Glue jobs that had a timeout value greater than 7 days will be defaulted to 7 days. For instance if you have specified a timeout of 20 days for a batch job, it will be stopped on the 7th day.

\n

For streaming jobs, if you have set up a maintenance window, it will be restarted during the maintenance window after 7 days.

" } }, "MaxCapacity": { @@ -11591,7 +11591,7 @@ "Timeout": { "target": "com.amazonaws.glue#Timeout", "traits": { - "smithy.api#documentation": "

\n The number of minutes before session times out. Default for Spark ETL\n jobs is 48 hours (2880 minutes), the maximum session lifetime for this job type.\n Consult the documentation for other job types.\n

" + "smithy.api#documentation": "

\n The number of minutes before session times out. Default for Spark ETL\n jobs is 48 hours (2880 minutes).\n Consult the documentation for other job types.\n

" } }, "IdleTimeout": { @@ -18982,7 +18982,13 @@ "target": "com.amazonaws.glue#Boolean", "traits": { "smithy.api#default": false, - "smithy.api#documentation": "

When specified as true, iterates through the account and returns all catalog resources (including top-level resources and child resources)

" + "smithy.api#documentation": "

Whether to list all catalogs across the catalog hierarchy, starting from the ParentCatalogId. Defaults to false . When true, all catalog objects in the ParentCatalogID hierarchy are enumerated in the response.

" + } + }, + "IncludeRoot": { + "target": "com.amazonaws.glue#NullableBoolean", + "traits": { + "smithy.api#documentation": "

Whether to list the default catalog in the account and region in the response. Defaults to false. When true and ParentCatalogId = NULL | Amazon Web Services Account ID, all catalogs and the default catalog are enumerated in the response.

\n

When the ParentCatalogId is not equal to null, and this attribute is passed as false or true, an InvalidInputException is thrown.

" } } }, @@ -27058,7 +27064,7 @@ "Timeout": { "target": "com.amazonaws.glue#Timeout", "traits": { - "smithy.api#documentation": "

The job timeout in minutes. This is the maximum time that a job run\n can consume resources before it is terminated and enters TIMEOUT\n status. The default is 2,880 minutes (48 hours) for batch jobs.

\n

Streaming jobs must have timeout values less than 7 days or 10080 minutes. When the value is left blank, the job will be restarted after 7 days based if you have not setup a maintenance window. If you have setup maintenance window, it will be restarted during the maintenance window after 7 days.

" + "smithy.api#documentation": "

The job timeout in minutes. This is the maximum time that a job run\n can consume resources before it is terminated and enters TIMEOUT\n status.

\n

Jobs must have timeout values less than 7 days or 10080 minutes. Otherwise, the jobs will throw an exception.

\n

When the value is left blank, the timeout is defaulted to 2880 minutes.

\n

Any existing Glue jobs that had a timeout value greater than 7 days will be defaulted to 7 days. For instance if you have specified a timeout of 20 days for a batch job, it will be stopped on the 7th day.

\n

For streaming jobs, if you have set up a maintenance window, it will be restarted during the maintenance window after 7 days.

" } }, "MaxCapacity": { @@ -27414,7 +27420,7 @@ "Timeout": { "target": "com.amazonaws.glue#Timeout", "traits": { - "smithy.api#documentation": "

The JobRun timeout in minutes. This is the maximum time that a job run can\n consume resources before it is terminated and enters TIMEOUT status. This value overrides the timeout value set in the parent job.

\n

Streaming jobs must have timeout values less than 7 days or 10080 minutes. When the value is left blank, the job will be restarted after 7 days based if you have not setup a maintenance window. If you have setup maintenance window, it will be restarted during the maintenance window after 7 days.

" + "smithy.api#documentation": "

The JobRun timeout in minutes. This is the maximum time that a job run can\n consume resources before it is terminated and enters TIMEOUT status. This value overrides the timeout value set in the parent job.

\n

Jobs must have timeout values less than 7 days or 10080 minutes. Otherwise, the jobs will throw an exception.

\n

When the value is left blank, the timeout is defaulted to 2880 minutes.

\n

Any existing Glue jobs that had a timeout value greater than 7 days will be defaulted to 7 days. For instance if you have specified a timeout of 20 days for a batch job, it will be stopped on the 7th day.

\n

For streaming jobs, if you have set up a maintenance window, it will be restarted during the maintenance window after 7 days.

" } }, "MaxCapacity": { @@ -27648,7 +27654,7 @@ "Timeout": { "target": "com.amazonaws.glue#Timeout", "traits": { - "smithy.api#documentation": "

The job timeout in minutes. This is the maximum time that a job run\n can consume resources before it is terminated and enters TIMEOUT\n status. The default is 2,880 minutes (48 hours) for batch jobs.

\n

Streaming jobs must have timeout values less than 7 days or 10080 minutes. When the value is left blank, the job will be restarted after 7 days based if you have not setup a maintenance window. If you have setup maintenance window, it will be restarted during the maintenance window after 7 days.

" + "smithy.api#documentation": "

The job timeout in minutes. This is the maximum time that a job run\n can consume resources before it is terminated and enters TIMEOUT\n status.

\n

Jobs must have timeout values less than 7 days or 10080 minutes. Otherwise, the jobs will throw an exception.

\n

When the value is left blank, the timeout is defaulted to 2880 minutes.

\n

Any existing Glue jobs that had a timeout value greater than 7 days will be defaulted to 7 days. For instance if you have specified a timeout of 20 days for a batch job, it will be stopped on the 7th day.

\n

For streaming jobs, if you have set up a maintenance window, it will be restarted during the maintenance window after 7 days.

" } }, "MaxCapacity": { @@ -38513,7 +38519,7 @@ "Timeout": { "target": "com.amazonaws.glue#Timeout", "traits": { - "smithy.api#documentation": "

The JobRun timeout in minutes. This is the maximum time that a job run can\n consume resources before it is terminated and enters TIMEOUT status. This value overrides the timeout value set in the parent job.

\n

Streaming jobs must have timeout values less than 7 days or 10080 minutes. When the value is left blank, the job will be restarted after 7 days based if you have not setup a maintenance window. If you have setup maintenance window, it will be restarted during the maintenance window after 7 days.

" + "smithy.api#documentation": "

The JobRun timeout in minutes. This is the maximum time that a job run can\n consume resources before it is terminated and enters TIMEOUT status. This value overrides the timeout value set in the parent job.

\n

Jobs must have timeout values less than 7 days or 10080 minutes. Otherwise, the jobs will throw an exception.

\n

When the value is left blank, the timeout is defaulted to 2880 minutes.

\n

Any existing Glue jobs that had a timeout value greater than 7 days will be defaulted to 7 days. For instance if you have specified a timeout of 20 days for a batch job, it will be stopped on the 7th day.

\n

For streaming jobs, if you have set up a maintenance window, it will be restarted during the maintenance window after 7 days.

" } }, "MaxCapacity": { diff --git a/models/healthlake.json b/models/healthlake.json index 33db822d02..dcb828d648 100644 --- a/models/healthlake.json +++ b/models/healthlake.json @@ -61,6 +61,12 @@ "smithy.api#enumValue": "SMART_ON_FHIR_V1" } }, + "SMART_ON_FHIR": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SMART_ON_FHIR" + } + }, "AWS_AUTH": { "target": "smithy.api#Unit", "traits": { @@ -1783,6 +1789,12 @@ "smithy.api#enumValue": "SUBMITTED" } }, + "QUEUED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "QUEUED" + } + }, "IN_PROGRESS": { "target": "smithy.api#Unit", "traits": { @@ -2392,8 +2404,7 @@ "target": "com.amazonaws.healthlake#ClientTokenString", "traits": { "smithy.api#documentation": "

An optional user provided token used for ensuring idempotency.

", - "smithy.api#idempotencyToken": {}, - "smithy.api#required": {} + "smithy.api#idempotencyToken": {} } } }, @@ -2498,8 +2509,7 @@ "target": "com.amazonaws.healthlake#ClientTokenString", "traits": { "smithy.api#documentation": "

Optional user provided token used for ensuring idempotency.

", - "smithy.api#idempotencyToken": {}, - "smithy.api#required": {} + "smithy.api#idempotencyToken": {} } } }, diff --git a/models/imagebuilder.json b/models/imagebuilder.json index a67d8c280a..85def3512f 100644 --- a/models/imagebuilder.json +++ b/models/imagebuilder.json @@ -217,6 +217,12 @@ "traits": { "smithy.api#enumValue": "IMPORT" } + }, + "IMPORT_ISO": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "IMPORT_ISO" + } } } }, @@ -5500,7 +5506,7 @@ "buildType": { "target": "com.amazonaws.imagebuilder#BuildType", "traits": { - "smithy.api#documentation": "

Indicates the type of build that created this image. The build can be initiated in the\n\t\t\tfollowing ways:

\n
    \n
  • \n

    \n USER_INITIATED – A manual \n\t\t\t\t\tpipeline build request.

    \n
  • \n
  • \n

    \n SCHEDULED – A pipeline build \n\t\t\t\t\tinitiated by a cron expression in the Image Builder pipeline, or from EventBridge.

    \n
  • \n
  • \n

    \n IMPORT – A VM import created \n\t\t\t\t\tthe image to use as the base image for the recipe.

    \n
  • \n
" + "smithy.api#documentation": "

Indicates the type of build that created this image. The build can be initiated in the\n\t\t\tfollowing ways:

\n
    \n
  • \n

    \n USER_INITIATED – A manual \n\t\t\t\t\tpipeline build request.

    \n
  • \n
  • \n

    \n SCHEDULED – A pipeline build \n\t\t\t\t\tinitiated by a cron expression in the Image Builder pipeline, or from EventBridge.

    \n
  • \n
  • \n

    \n IMPORT – A VM import created \n\t\t\t\t\tthe image to use as the base image for the recipe.

    \n
  • \n
  • \n

    \n IMPORT_ISO – An ISO disk import created \n\t\t\t\t\tthe image.

    \n
  • \n
" } }, "imageSource": { @@ -6402,7 +6408,7 @@ "buildType": { "target": "com.amazonaws.imagebuilder#BuildType", "traits": { - "smithy.api#documentation": "

Indicates the type of build that created this image. The build can be initiated in the\n\t\t\tfollowing ways:

\n
    \n
  • \n

    \n USER_INITIATED – A manual \n\t\t\t\t\tpipeline build request.

    \n
  • \n
  • \n

    \n SCHEDULED – A pipeline build \n\t\t\t\t\tinitiated by a cron expression in the Image Builder pipeline, or from EventBridge.

    \n
  • \n
  • \n

    \n IMPORT – A VM import created \n\t\t\t\t\tthe image to use as the base image for the recipe.

    \n
  • \n
" + "smithy.api#documentation": "

Indicates the type of build that created this image. The build can be initiated in the\n\t\t\tfollowing ways:

\n
    \n
  • \n

    \n USER_INITIATED – A manual \n\t\t\t\t\tpipeline build request.

    \n
  • \n
  • \n

    \n SCHEDULED – A pipeline build \n\t\t\t\t\tinitiated by a cron expression in the Image Builder pipeline, or from EventBridge.

    \n
  • \n
  • \n

    \n IMPORT – A VM import created \n\t\t\t\t\tthe image to use as the base image for the recipe.

    \n
  • \n
  • \n

    \n IMPORT_ISO – An ISO disk import created \n\t\t\t\t\tthe image.

    \n
  • \n
" } }, "imageSource": { @@ -6446,7 +6452,7 @@ "timeoutMinutes": { "target": "com.amazonaws.imagebuilder#ImageTestsTimeoutMinutes", "traits": { - "smithy.api#documentation": "

The maximum time in minutes that tests are permitted to run.

\n \n

The timeout attribute is not currently active. This value is\n\t\t\t\tignored.

\n
" + "smithy.api#documentation": "

The maximum time in minutes that tests are permitted to run.

\n \n

The timeout property is not currently active. This value is\n\t\t\t\tignored.

\n
" } } }, @@ -6534,7 +6540,7 @@ "buildType": { "target": "com.amazonaws.imagebuilder#BuildType", "traits": { - "smithy.api#documentation": "

Indicates the type of build that created this image. The build can be initiated in the\n\t\t\tfollowing ways:

\n
    \n
  • \n

    \n USER_INITIATED – A manual \n\t\t\t\t\tpipeline build request.

    \n
  • \n
  • \n

    \n SCHEDULED – A pipeline build \n\t\t\t\t\tinitiated by a cron expression in the Image Builder pipeline, or from EventBridge.

    \n
  • \n
  • \n

    \n IMPORT – A VM import created \n\t\t\t\t\tthe image to use as the base image for the recipe.

    \n
  • \n
" + "smithy.api#documentation": "

Indicates the type of build that created this image. The build can be initiated in the\n\t\t\tfollowing ways:

\n
    \n
  • \n

    \n USER_INITIATED – A manual \n\t\t\t\t\tpipeline build request.

    \n
  • \n
  • \n

    \n SCHEDULED – A pipeline build \n\t\t\t\t\tinitiated by a cron expression in the Image Builder pipeline, or from EventBridge.

    \n
  • \n
  • \n

    \n IMPORT – A VM import created \n\t\t\t\t\tthe image to use as the base image for the recipe.

    \n
  • \n
  • \n

    \n IMPORT_ISO – An ISO disk import created \n\t\t\t\t\tthe image.

    \n
  • \n
" } }, "imageSource": { @@ -6728,6 +6734,130 @@ "smithy.api#output": {} } }, + "com.amazonaws.imagebuilder#ImportDiskImage": { + "type": "operation", + "input": { + "target": "com.amazonaws.imagebuilder#ImportDiskImageRequest" + }, + "output": { + "target": "com.amazonaws.imagebuilder#ImportDiskImageResponse" + }, + "errors": [ + { + "target": "com.amazonaws.imagebuilder#ClientException" + }, + { + "target": "com.amazonaws.imagebuilder#ServiceException" + }, + { + "target": "com.amazonaws.imagebuilder#ServiceUnavailableException" + } + ], + "traits": { + "smithy.api#documentation": "

Import a Windows operating system image from a verified Microsoft ISO disk \n\t\t\tfile. The following disk images are supported:

\n
    \n
  • \n

    Windows 11 Enterprise

    \n
  • \n
", + "smithy.api#http": { + "method": "PUT", + "uri": "/ImportDiskImage", + "code": 200 + } + } + }, + "com.amazonaws.imagebuilder#ImportDiskImageRequest": { + "type": "structure", + "members": { + "name": { + "target": "com.amazonaws.imagebuilder#NonEmptyString", + "traits": { + "smithy.api#documentation": "

The name of the image resource that's created from the import.

", + "smithy.api#required": {} + } + }, + "semanticVersion": { + "target": "com.amazonaws.imagebuilder#VersionNumber", + "traits": { + "smithy.api#documentation": "

The semantic version to attach to the image that's created during the import\n\t\t\tprocess. This version follows the semantic version syntax.

", + "smithy.api#required": {} + } + }, + "description": { + "target": "com.amazonaws.imagebuilder#NonEmptyString", + "traits": { + "smithy.api#documentation": "

The description for your disk image import.

" + } + }, + "platform": { + "target": "com.amazonaws.imagebuilder#NonEmptyString", + "traits": { + "smithy.api#documentation": "

The operating system platform for the imported image. Allowed values include \n\t\t\tthe following: Windows.

", + "smithy.api#required": {} + } + }, + "osVersion": { + "target": "com.amazonaws.imagebuilder#OsVersion", + "traits": { + "smithy.api#documentation": "

The operating system version for the imported image. Allowed values include \n\t\t\tthe following: Microsoft Windows 11.

", + "smithy.api#required": {} + } + }, + "executionRole": { + "target": "com.amazonaws.imagebuilder#RoleNameOrArn", + "traits": { + "smithy.api#documentation": "

The name or Amazon Resource Name (ARN) for the IAM role you create that grants Image Builder access \n\t\t\tto perform workflow actions to import an image from a Microsoft ISO file.

" + } + }, + "infrastructureConfigurationArn": { + "target": "com.amazonaws.imagebuilder#InfrastructureConfigurationArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the infrastructure configuration resource that's used for \n\t\t\tlaunching the EC2 instance on which the ISO image is built.

", + "smithy.api#required": {} + } + }, + "uri": { + "target": "com.amazonaws.imagebuilder#Uri", + "traits": { + "smithy.api#documentation": "

The uri of the ISO disk file that's stored in Amazon S3.

", + "smithy.api#required": {} + } + }, + "tags": { + "target": "com.amazonaws.imagebuilder#TagMap", + "traits": { + "smithy.api#documentation": "

Tags that are attached to image resources created from the import.

" + } + }, + "clientToken": { + "target": "com.amazonaws.imagebuilder#ClientToken", + "traits": { + "smithy.api#documentation": "

Unique, case-sensitive identifier you provide to ensure\n idempotency of the request. For more information, see Ensuring idempotency \n in the Amazon EC2 API Reference.

", + "smithy.api#idempotencyToken": {}, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.imagebuilder#ImportDiskImageResponse": { + "type": "structure", + "members": { + "clientToken": { + "target": "com.amazonaws.imagebuilder#ClientToken", + "traits": { + "smithy.api#documentation": "

The client token that uniquely identifies the request.

" + } + }, + "imageBuildVersionArn": { + "target": "com.amazonaws.imagebuilder#ImageBuildVersionArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the output AMI that was created from the ISO disk file.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.imagebuilder#ImportVmImage": { "type": "operation", "input": { @@ -11507,7 +11637,7 @@ "pipelineExecutionStartCondition": { "target": "com.amazonaws.imagebuilder#PipelineExecutionStartCondition", "traits": { - "smithy.api#documentation": "

The condition configures when the pipeline should trigger a new image build. When the\n\t\t\t\tpipelineExecutionStartCondition is set to\n\t\t\t\tEXPRESSION_MATCH_AND_DEPENDENCY_UPDATES_AVAILABLE, and you use semantic\n\t\t\tversion filters on the base image or components in your image recipe, EC2 Image Builder will\n\t\t\tbuild a new image only when there are new versions of the image or components in your\n\t\t\trecipe that match the semantic version filter. When it is set to\n\t\t\t\tEXPRESSION_MATCH_ONLY, it will build a new image every time the CRON\n\t\t\texpression matches the current time. For semantic version syntax, see CreateComponent in the EC2 Image Builder API Reference.

" + "smithy.api#documentation": "

The start condition configures when the pipeline should trigger a new image build, \n\t\t\tas follows. If no value is set Image Builder defaults to \n\t\t\tEXPRESSION_MATCH_AND_DEPENDENCY_UPDATES_AVAILABLE.

\n
    \n
  • \n

    \n EXPRESSION_MATCH_AND_DEPENDENCY_UPDATES_AVAILABLE (default) – \n\t\t\t\t\tWhen you use semantic version filters on the base image or components in your \n\t\t\t\t\timage recipe, EC2 Image Builder builds a new image only when there are new versions of \n\t\t\t\t\tthe base image or components in your recipe that match the filter.

    \n \n

    For semantic version syntax, see CreateComponent.

    \n
    \n
  • \n
  • \n

    \n EXPRESSION_MATCH_ONLY – This condition builds a new \n\t\t\t\t\timage every time the CRON expression matches the current time.

    \n
  • \n
" } } }, @@ -13887,6 +14017,9 @@ { "target": "com.amazonaws.imagebuilder#ImportComponent" }, + { + "target": "com.amazonaws.imagebuilder#ImportDiskImage" + }, { "target": "com.amazonaws.imagebuilder#ImportVmImage" }, diff --git a/models/iot-1click-devices-service.json b/models/iot-1click-devices-service.json deleted file mode 100644 index 5f09660ca5..0000000000 --- a/models/iot-1click-devices-service.json +++ /dev/null @@ -1,1945 +0,0 @@ -{ - "smithy": "2.0", - "metadata": { - "suppressions": [ - { - "id": "HttpMethodSemantics", - "namespace": "*" - }, - { - "id": "HttpResponseCodeSemantics", - "namespace": "*" - }, - { - "id": "PaginatedTrait", - "namespace": "*" - }, - { - "id": "HttpHeaderTrait", - "namespace": "*" - }, - { - "id": "HttpUriConflict", - "namespace": "*" - }, - { - "id": "Service", - "namespace": "*" - } - ] - }, - "shapes": { - "com.amazonaws.iot1clickdevicesservice#Attributes": { - "type": "structure", - "members": {} - }, - "com.amazonaws.iot1clickdevicesservice#ClaimDevicesByClaimCode": { - "type": "operation", - "input": { - "target": "com.amazonaws.iot1clickdevicesservice#ClaimDevicesByClaimCodeRequest" - }, - "output": { - "target": "com.amazonaws.iot1clickdevicesservice#ClaimDevicesByClaimCodeResponse" - }, - "errors": [ - { - "target": "com.amazonaws.iot1clickdevicesservice#ForbiddenException" - }, - { - "target": "com.amazonaws.iot1clickdevicesservice#InternalFailureException" - }, - { - "target": "com.amazonaws.iot1clickdevicesservice#InvalidRequestException" - } - ], - "traits": { - "smithy.api#documentation": "

Adds device(s) to your account (i.e., claim one or more devices) if and only if you\n received a claim code with the device(s).

", - "smithy.api#http": { - "method": "PUT", - "uri": "/claims/{ClaimCode}", - "code": 200 - } - } - }, - "com.amazonaws.iot1clickdevicesservice#ClaimDevicesByClaimCodeRequest": { - "type": "structure", - "members": { - "ClaimCode": { - "target": "com.amazonaws.iot1clickdevicesservice#__string", - "traits": { - "smithy.api#documentation": "

The claim code, starting with \"C-\", as provided by the device manufacturer.

", - "smithy.api#httpLabel": {}, - "smithy.api#required": {} - } - } - }, - "traits": { - "smithy.api#input": {} - } - }, - "com.amazonaws.iot1clickdevicesservice#ClaimDevicesByClaimCodeResponse": { - "type": "structure", - "members": { - "ClaimCode": { - "target": "com.amazonaws.iot1clickdevicesservice#__stringMin12Max40", - "traits": { - "smithy.api#documentation": "

The claim code provided by the device manufacturer.

", - "smithy.api#jsonName": "claimCode" - } - }, - "Total": { - "target": "com.amazonaws.iot1clickdevicesservice#__integer", - "traits": { - "smithy.api#documentation": "

The total number of devices associated with the claim code that has been processed in\n the claim request.

", - "smithy.api#jsonName": "total" - } - } - }, - "traits": { - "smithy.api#output": {} - } - }, - "com.amazonaws.iot1clickdevicesservice#DescribeDevice": { - "type": "operation", - "input": { - "target": "com.amazonaws.iot1clickdevicesservice#DescribeDeviceRequest" - }, - "output": { - "target": "com.amazonaws.iot1clickdevicesservice#DescribeDeviceResponse" - }, - "errors": [ - { - "target": "com.amazonaws.iot1clickdevicesservice#InternalFailureException" - }, - { - "target": "com.amazonaws.iot1clickdevicesservice#InvalidRequestException" - }, - { - "target": "com.amazonaws.iot1clickdevicesservice#ResourceNotFoundException" - } - ], - "traits": { - "smithy.api#documentation": "

Given a device ID, returns a DescribeDeviceResponse object describing the\n details of the device.

", - "smithy.api#http": { - "method": "GET", - "uri": "/devices/{DeviceId}", - "code": 200 - } - } - }, - "com.amazonaws.iot1clickdevicesservice#DescribeDeviceRequest": { - "type": "structure", - "members": { - "DeviceId": { - "target": "com.amazonaws.iot1clickdevicesservice#__string", - "traits": { - "smithy.api#documentation": "

The unique identifier of the device.

", - "smithy.api#httpLabel": {}, - "smithy.api#required": {} - } - } - }, - "traits": { - "smithy.api#input": {} - } - }, - "com.amazonaws.iot1clickdevicesservice#DescribeDeviceResponse": { - "type": "structure", - "members": { - "DeviceDescription": { - "target": "com.amazonaws.iot1clickdevicesservice#DeviceDescription", - "traits": { - "smithy.api#documentation": "

Device details.

", - "smithy.api#jsonName": "deviceDescription" - } - } - }, - "traits": { - "smithy.api#output": {} - } - }, - "com.amazonaws.iot1clickdevicesservice#Device": { - "type": "structure", - "members": { - "Attributes": { - "target": "com.amazonaws.iot1clickdevicesservice#Attributes", - "traits": { - "smithy.api#documentation": "

The user specified attributes associated with the device for an event.

", - "smithy.api#jsonName": "attributes" - } - }, - "DeviceId": { - "target": "com.amazonaws.iot1clickdevicesservice#__string", - "traits": { - "smithy.api#documentation": "

The unique identifier of the device.

", - "smithy.api#jsonName": "deviceId" - } - }, - "Type": { - "target": "com.amazonaws.iot1clickdevicesservice#__string", - "traits": { - "smithy.api#documentation": "

The device type, such as \"button\".

", - "smithy.api#jsonName": "type" - } - } - } - }, - "com.amazonaws.iot1clickdevicesservice#DeviceAttributes": { - "type": "map", - "key": { - "target": "com.amazonaws.iot1clickdevicesservice#__string" - }, - "value": { - "target": "com.amazonaws.iot1clickdevicesservice#__string" - }, - "traits": { - "smithy.api#documentation": "

\n DeviceAttributes is a string-to-string map specified by the user.

" - } - }, - "com.amazonaws.iot1clickdevicesservice#DeviceDescription": { - "type": "structure", - "members": { - "Arn": { - "target": "com.amazonaws.iot1clickdevicesservice#__string", - "traits": { - "smithy.api#documentation": "

The ARN of the device.

", - "smithy.api#jsonName": "arn" - } - }, - "Attributes": { - "target": "com.amazonaws.iot1clickdevicesservice#DeviceAttributes", - "traits": { - "smithy.api#documentation": "

An array of zero or more elements of DeviceAttribute objects providing\n user specified device attributes.

", - "smithy.api#jsonName": "attributes" - } - }, - "DeviceId": { - "target": "com.amazonaws.iot1clickdevicesservice#__string", - "traits": { - "smithy.api#documentation": "

The unique identifier of the device.

", - "smithy.api#jsonName": "deviceId" - } - }, - "Enabled": { - "target": "com.amazonaws.iot1clickdevicesservice#__boolean", - "traits": { - "smithy.api#documentation": "

A Boolean value indicating whether or not the device is enabled.

", - "smithy.api#jsonName": "enabled" - } - }, - "RemainingLife": { - "target": "com.amazonaws.iot1clickdevicesservice#__doubleMin0Max100", - "traits": { - "smithy.api#documentation": "

A value between 0 and 1 inclusive, representing the fraction of life remaining for the\n device.

", - "smithy.api#jsonName": "remainingLife" - } - }, - "Type": { - "target": "com.amazonaws.iot1clickdevicesservice#__string", - "traits": { - "smithy.api#documentation": "

The type of the device, such as \"button\".

", - "smithy.api#jsonName": "type" - } - }, - "Tags": { - "target": "com.amazonaws.iot1clickdevicesservice#__mapOf__string", - "traits": { - "smithy.api#documentation": "

The tags currently associated with the AWS IoT 1-Click device.

", - "smithy.api#jsonName": "tags" - } - } - } - }, - "com.amazonaws.iot1clickdevicesservice#DeviceEvent": { - "type": "structure", - "members": { - "Device": { - "target": "com.amazonaws.iot1clickdevicesservice#Device", - "traits": { - "smithy.api#documentation": "

An object representing the device associated with the event.

", - "smithy.api#jsonName": "device" - } - }, - "StdEvent": { - "target": "com.amazonaws.iot1clickdevicesservice#__string", - "traits": { - "smithy.api#documentation": "

A serialized JSON object representing the device-type specific event.

", - "smithy.api#jsonName": "stdEvent" - } - } - } - }, - "com.amazonaws.iot1clickdevicesservice#DeviceMethod": { - "type": "structure", - "members": { - "DeviceType": { - "target": "com.amazonaws.iot1clickdevicesservice#__string", - "traits": { - "smithy.api#documentation": "

The type of the device, such as \"button\".

", - "smithy.api#jsonName": "deviceType" - } - }, - "MethodName": { - "target": "com.amazonaws.iot1clickdevicesservice#__string", - "traits": { - "smithy.api#documentation": "

The name of the method applicable to the deviceType.

", - "smithy.api#jsonName": "methodName" - } - } - } - }, - "com.amazonaws.iot1clickdevicesservice#FinalizeDeviceClaim": { - "type": "operation", - "input": { - "target": "com.amazonaws.iot1clickdevicesservice#FinalizeDeviceClaimRequest" - }, - "output": { - "target": "com.amazonaws.iot1clickdevicesservice#FinalizeDeviceClaimResponse" - }, - "errors": [ - { - "target": "com.amazonaws.iot1clickdevicesservice#InternalFailureException" - }, - { - "target": "com.amazonaws.iot1clickdevicesservice#InvalidRequestException" - }, - { - "target": "com.amazonaws.iot1clickdevicesservice#PreconditionFailedException" - }, - { - "target": "com.amazonaws.iot1clickdevicesservice#ResourceConflictException" - }, - { - "target": "com.amazonaws.iot1clickdevicesservice#ResourceNotFoundException" - } - ], - "traits": { - "smithy.api#documentation": "

Given a device ID, finalizes the claim request for the associated device.

\n

Claiming a device consists of initiating a claim, then publishing a device event,\n and finalizing the claim. For a device of type button, a device event can\n be published by simply clicking the device.

\n
", - "smithy.api#http": { - "method": "PUT", - "uri": "/devices/{DeviceId}/finalize-claim", - "code": 200 - } - } - }, - "com.amazonaws.iot1clickdevicesservice#FinalizeDeviceClaimRequest": { - "type": "structure", - "members": { - "DeviceId": { - "target": "com.amazonaws.iot1clickdevicesservice#__string", - "traits": { - "smithy.api#documentation": "

The unique identifier of the device.

", - "smithy.api#httpLabel": {}, - "smithy.api#required": {} - } - }, - "Tags": { - "target": "com.amazonaws.iot1clickdevicesservice#__mapOf__string", - "traits": { - "smithy.api#documentation": "

A collection of key/value pairs defining the resource tags. For example, {\n \"tags\": {\"key1\": \"value1\", \"key2\": \"value2\"} }. For more information, see AWS\n Tagging Strategies.

\n \n

", - "smithy.api#jsonName": "tags" - } - } - }, - "traits": { - "smithy.api#input": {} - } - }, - "com.amazonaws.iot1clickdevicesservice#FinalizeDeviceClaimResponse": { - "type": "structure", - "members": { - "State": { - "target": "com.amazonaws.iot1clickdevicesservice#__string", - "traits": { - "smithy.api#documentation": "

The device's final claim state.

", - "smithy.api#jsonName": "state" - } - } - }, - "traits": { - "smithy.api#output": {} - } - }, - "com.amazonaws.iot1clickdevicesservice#ForbiddenException": { - "type": "structure", - "members": { - "Code": { - "target": "com.amazonaws.iot1clickdevicesservice#__string", - "traits": { - "smithy.api#documentation": "

403

", - "smithy.api#jsonName": "code" - } - }, - "Message": { - "target": "com.amazonaws.iot1clickdevicesservice#__string", - "traits": { - "smithy.api#documentation": "

The 403 error message returned by the web server.

", - "smithy.api#jsonName": "message" - } - } - }, - "traits": { - "smithy.api#error": "client", - "smithy.api#httpError": 403 - } - }, - "com.amazonaws.iot1clickdevicesservice#GetDeviceMethods": { - "type": "operation", - "input": { - "target": "com.amazonaws.iot1clickdevicesservice#GetDeviceMethodsRequest" - }, - "output": { - "target": "com.amazonaws.iot1clickdevicesservice#GetDeviceMethodsResponse" - }, - "errors": [ - { - "target": "com.amazonaws.iot1clickdevicesservice#InternalFailureException" - }, - { - "target": "com.amazonaws.iot1clickdevicesservice#InvalidRequestException" - }, - { - "target": "com.amazonaws.iot1clickdevicesservice#ResourceNotFoundException" - } - ], - "traits": { - "smithy.api#documentation": "

Given a device ID, returns the invokable methods associated with the device.

", - "smithy.api#http": { - "method": "GET", - "uri": "/devices/{DeviceId}/methods", - "code": 200 - } - } - }, - "com.amazonaws.iot1clickdevicesservice#GetDeviceMethodsRequest": { - "type": "structure", - "members": { - "DeviceId": { - "target": "com.amazonaws.iot1clickdevicesservice#__string", - "traits": { - "smithy.api#documentation": "

The unique identifier of the device.

", - "smithy.api#httpLabel": {}, - "smithy.api#required": {} - } - } - }, - "traits": { - "smithy.api#input": {} - } - }, - "com.amazonaws.iot1clickdevicesservice#GetDeviceMethodsResponse": { - "type": "structure", - "members": { - "DeviceMethods": { - "target": "com.amazonaws.iot1clickdevicesservice#__listOfDeviceMethod", - "traits": { - "smithy.api#documentation": "

List of available device APIs.

", - "smithy.api#jsonName": "deviceMethods" - } - } - }, - "traits": { - "smithy.api#output": {} - } - }, - "com.amazonaws.iot1clickdevicesservice#InitiateDeviceClaim": { - "type": "operation", - "input": { - "target": "com.amazonaws.iot1clickdevicesservice#InitiateDeviceClaimRequest" - }, - "output": { - "target": "com.amazonaws.iot1clickdevicesservice#InitiateDeviceClaimResponse" - }, - "errors": [ - { - "target": "com.amazonaws.iot1clickdevicesservice#InternalFailureException" - }, - { - "target": "com.amazonaws.iot1clickdevicesservice#InvalidRequestException" - }, - { - "target": "com.amazonaws.iot1clickdevicesservice#ResourceConflictException" - }, - { - "target": "com.amazonaws.iot1clickdevicesservice#ResourceNotFoundException" - } - ], - "traits": { - "smithy.api#documentation": "

Given a device ID, initiates a claim request for the associated device.

\n

Claiming a device consists of initiating a claim, then publishing a device event,\n and finalizing the claim. For a device of type button, a device event can\n be published by simply clicking the device.

\n
", - "smithy.api#http": { - "method": "PUT", - "uri": "/devices/{DeviceId}/initiate-claim", - "code": 200 - } - } - }, - "com.amazonaws.iot1clickdevicesservice#InitiateDeviceClaimRequest": { - "type": "structure", - "members": { - "DeviceId": { - "target": "com.amazonaws.iot1clickdevicesservice#__string", - "traits": { - "smithy.api#documentation": "

The unique identifier of the device.

", - "smithy.api#httpLabel": {}, - "smithy.api#required": {} - } - } - }, - "traits": { - "smithy.api#input": {} - } - }, - "com.amazonaws.iot1clickdevicesservice#InitiateDeviceClaimResponse": { - "type": "structure", - "members": { - "State": { - "target": "com.amazonaws.iot1clickdevicesservice#__string", - "traits": { - "smithy.api#documentation": "

The device's final claim state.

", - "smithy.api#jsonName": "state" - } - } - }, - "traits": { - "smithy.api#output": {} - } - }, - "com.amazonaws.iot1clickdevicesservice#InternalFailureException": { - "type": "structure", - "members": { - "Code": { - "target": "com.amazonaws.iot1clickdevicesservice#__string", - "traits": { - "smithy.api#documentation": "

500

", - "smithy.api#jsonName": "code" - } - }, - "Message": { - "target": "com.amazonaws.iot1clickdevicesservice#__string", - "traits": { - "smithy.api#documentation": "

The 500 error message returned by the web server.

", - "smithy.api#jsonName": "message" - } - } - }, - "traits": { - "smithy.api#error": "server", - "smithy.api#httpError": 500 - } - }, - "com.amazonaws.iot1clickdevicesservice#InvalidRequestException": { - "type": "structure", - "members": { - "Code": { - "target": "com.amazonaws.iot1clickdevicesservice#__string", - "traits": { - "smithy.api#documentation": "

400

", - "smithy.api#jsonName": "code" - } - }, - "Message": { - "target": "com.amazonaws.iot1clickdevicesservice#__string", - "traits": { - "smithy.api#documentation": "

The 400 error message returned by the web server.

", - "smithy.api#jsonName": "message" - } - } - }, - "traits": { - "smithy.api#error": "client", - "smithy.api#httpError": 400 - } - }, - "com.amazonaws.iot1clickdevicesservice#InvokeDeviceMethod": { - "type": "operation", - "input": { - "target": "com.amazonaws.iot1clickdevicesservice#InvokeDeviceMethodRequest" - }, - "output": { - "target": "com.amazonaws.iot1clickdevicesservice#InvokeDeviceMethodResponse" - }, - "errors": [ - { - "target": "com.amazonaws.iot1clickdevicesservice#InternalFailureException" - }, - { - "target": "com.amazonaws.iot1clickdevicesservice#InvalidRequestException" - }, - { - "target": "com.amazonaws.iot1clickdevicesservice#PreconditionFailedException" - }, - { - "target": "com.amazonaws.iot1clickdevicesservice#RangeNotSatisfiableException" - }, - { - "target": "com.amazonaws.iot1clickdevicesservice#ResourceConflictException" - }, - { - "target": "com.amazonaws.iot1clickdevicesservice#ResourceNotFoundException" - } - ], - "traits": { - "smithy.api#documentation": "

Given a device ID, issues a request to invoke a named device method (with possible\n parameters). See the \"Example POST\" code snippet below.

", - "smithy.api#http": { - "method": "POST", - "uri": "/devices/{DeviceId}/methods", - "code": 200 - } - } - }, - "com.amazonaws.iot1clickdevicesservice#InvokeDeviceMethodRequest": { - "type": "structure", - "members": { - "DeviceId": { - "target": "com.amazonaws.iot1clickdevicesservice#__string", - "traits": { - "smithy.api#documentation": "

The unique identifier of the device.

", - "smithy.api#httpLabel": {}, - "smithy.api#required": {} - } - }, - "DeviceMethod": { - "target": "com.amazonaws.iot1clickdevicesservice#DeviceMethod", - "traits": { - "smithy.api#documentation": "

The device method to invoke.

", - "smithy.api#jsonName": "deviceMethod" - } - }, - "DeviceMethodParameters": { - "target": "com.amazonaws.iot1clickdevicesservice#__string", - "traits": { - "smithy.api#documentation": "

A JSON encoded string containing the device method request parameters.

", - "smithy.api#jsonName": "deviceMethodParameters" - } - } - }, - "traits": { - "smithy.api#input": {} - } - }, - "com.amazonaws.iot1clickdevicesservice#InvokeDeviceMethodResponse": { - "type": "structure", - "members": { - "DeviceMethodResponse": { - "target": "com.amazonaws.iot1clickdevicesservice#__string", - "traits": { - "smithy.api#documentation": "

A JSON encoded string containing the device method response.

", - "smithy.api#jsonName": "deviceMethodResponse" - } - } - }, - "traits": { - "smithy.api#output": {} - } - }, - "com.amazonaws.iot1clickdevicesservice#IoT1ClickDevicesService": { - "type": "service", - "version": "2018-05-14", - "operations": [ - { - "target": "com.amazonaws.iot1clickdevicesservice#ClaimDevicesByClaimCode" - }, - { - "target": "com.amazonaws.iot1clickdevicesservice#DescribeDevice" - }, - { - "target": "com.amazonaws.iot1clickdevicesservice#FinalizeDeviceClaim" - }, - { - "target": "com.amazonaws.iot1clickdevicesservice#GetDeviceMethods" - }, - { - "target": "com.amazonaws.iot1clickdevicesservice#InitiateDeviceClaim" - }, - { - "target": "com.amazonaws.iot1clickdevicesservice#InvokeDeviceMethod" - }, - { - "target": "com.amazonaws.iot1clickdevicesservice#ListDeviceEvents" - }, - { - "target": "com.amazonaws.iot1clickdevicesservice#ListDevices" - }, - { - "target": "com.amazonaws.iot1clickdevicesservice#ListTagsForResource" - }, - { - "target": "com.amazonaws.iot1clickdevicesservice#TagResource" - }, - { - "target": "com.amazonaws.iot1clickdevicesservice#UnclaimDevice" - }, - { - "target": "com.amazonaws.iot1clickdevicesservice#UntagResource" - }, - { - "target": "com.amazonaws.iot1clickdevicesservice#UpdateDeviceState" - } - ], - "traits": { - "aws.api#service": { - "sdkId": "IoT 1Click Devices Service", - "arnNamespace": "iot1click", - "cloudFormationName": "IoT1ClickDevicesService", - "cloudTrailEventSource": "iot1clickdevicesservice.amazonaws.com", - "docId": "devices-2018-05-14", - "endpointPrefix": "devices.iot1click" - }, - "aws.auth#sigv4": { - "name": "iot1click" - }, - "aws.protocols#restJson1": {}, - "smithy.api#documentation": "

Describes all of the AWS IoT 1-Click device-related API operations for the service.\n Also provides sample requests, responses, and errors for the supported web services\n protocols.

", - "smithy.api#title": "AWS IoT 1-Click Devices Service", - "smithy.rules#endpointRuleSet": { - "version": "1.0", - "parameters": { - "Region": { - "builtIn": "AWS::Region", - "required": false, - "documentation": "The AWS region used to dispatch the request.", - "type": "String" - }, - "UseDualStack": { - "builtIn": "AWS::UseDualStack", - "required": true, - "default": false, - "documentation": "When true, use the dual-stack endpoint. If the configured endpoint does not support dual-stack, dispatching the request MAY return an error.", - "type": "Boolean" - }, - "UseFIPS": { - "builtIn": "AWS::UseFIPS", - "required": true, - "default": false, - "documentation": "When true, send this request to the FIPS-compliant regional endpoint. If the configured endpoint does not have a FIPS compliant endpoint, dispatching the request will return an error.", - "type": "Boolean" - }, - "Endpoint": { - "builtIn": "SDK::Endpoint", - "required": false, - "documentation": "Override the endpoint used to send this request", - "type": "String" - } - }, - "rules": [ - { - "conditions": [ - { - "fn": "isSet", - "argv": [ - { - "ref": "Endpoint" - } - ] - } - ], - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - } - ], - "error": "Invalid Configuration: FIPS and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" - }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ], - "type": "tree" - }, - { - "conditions": [ - { - "fn": "isSet", - "argv": [ - { - "ref": "Region" - } - ] - } - ], - "rules": [ - { - "conditions": [ - { - "fn": "aws.partition", - "argv": [ - { - "ref": "Region" - } - ], - "assign": "PartitionResult" - } - ], - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - }, - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://devices.iot1click-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ], - "type": "tree" - }, - { - "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" - } - ], - "type": "tree" - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - } - ], - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - }, - true - ] - } - ], - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://devices.iot1click-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ], - "type": "tree" - }, - { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } - ], - "type": "tree" - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://devices.iot1click.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ], - "type": "tree" - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" - } - ], - "type": "tree" - }, - { - "conditions": [], - "endpoint": { - "url": "https://devices.iot1click.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ], - "type": "tree" - } - ], - "type": "tree" - }, - { - "conditions": [], - "error": "Invalid Configuration: Missing Region", - "type": "error" - } - ] - }, - "smithy.rules#endpointTests": { - "testCases": [ - { - "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://devices.iot1click-fips.us-east-1.api.aws" - } - }, - "params": { - "Region": "us-east-1", - "UseFIPS": true, - "UseDualStack": true - } - }, - { - "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://devices.iot1click-fips.us-east-1.amazonaws.com" - } - }, - "params": { - "Region": "us-east-1", - "UseFIPS": true, - "UseDualStack": false - } - }, - { - "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://devices.iot1click.us-east-1.api.aws" - } - }, - "params": { - "Region": "us-east-1", - "UseFIPS": false, - "UseDualStack": true - } - }, - { - "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://devices.iot1click.us-east-1.amazonaws.com" - } - }, - "params": { - "Region": "us-east-1", - "UseFIPS": false, - "UseDualStack": false - } - }, - { - "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://devices.iot1click-fips.cn-north-1.api.amazonwebservices.com.cn" - } - }, - "params": { - "Region": "cn-north-1", - "UseFIPS": true, - "UseDualStack": true - } - }, - { - "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://devices.iot1click-fips.cn-north-1.amazonaws.com.cn" - } - }, - "params": { - "Region": "cn-north-1", - "UseFIPS": true, - "UseDualStack": false - } - }, - { - "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://devices.iot1click.cn-north-1.api.amazonwebservices.com.cn" - } - }, - "params": { - "Region": "cn-north-1", - "UseFIPS": false, - "UseDualStack": true - } - }, - { - "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://devices.iot1click.cn-north-1.amazonaws.com.cn" - } - }, - "params": { - "Region": "cn-north-1", - "UseFIPS": false, - "UseDualStack": false - } - }, - { - "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://devices.iot1click-fips.us-gov-east-1.api.aws" - } - }, - "params": { - "Region": "us-gov-east-1", - "UseFIPS": true, - "UseDualStack": true - } - }, - { - "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://devices.iot1click-fips.us-gov-east-1.amazonaws.com" - } - }, - "params": { - "Region": "us-gov-east-1", - "UseFIPS": true, - "UseDualStack": false - } - }, - { - "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://devices.iot1click.us-gov-east-1.api.aws" - } - }, - "params": { - "Region": "us-gov-east-1", - "UseFIPS": false, - "UseDualStack": true - } - }, - { - "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://devices.iot1click.us-gov-east-1.amazonaws.com" - } - }, - "params": { - "Region": "us-gov-east-1", - "UseFIPS": false, - "UseDualStack": false - } - }, - { - "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", - "expect": { - "error": "FIPS and DualStack are enabled, but this partition does not support one or both" - }, - "params": { - "Region": "us-iso-east-1", - "UseFIPS": true, - "UseDualStack": true - } - }, - { - "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://devices.iot1click-fips.us-iso-east-1.c2s.ic.gov" - } - }, - "params": { - "Region": "us-iso-east-1", - "UseFIPS": true, - "UseDualStack": false - } - }, - { - "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", - "expect": { - "error": "DualStack is enabled but this partition does not support DualStack" - }, - "params": { - "Region": "us-iso-east-1", - "UseFIPS": false, - "UseDualStack": true - } - }, - { - "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://devices.iot1click.us-iso-east-1.c2s.ic.gov" - } - }, - "params": { - "Region": "us-iso-east-1", - "UseFIPS": false, - "UseDualStack": false - } - }, - { - "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", - "expect": { - "error": "FIPS and DualStack are enabled, but this partition does not support one or both" - }, - "params": { - "Region": "us-isob-east-1", - "UseFIPS": true, - "UseDualStack": true - } - }, - { - "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://devices.iot1click-fips.us-isob-east-1.sc2s.sgov.gov" - } - }, - "params": { - "Region": "us-isob-east-1", - "UseFIPS": true, - "UseDualStack": false - } - }, - { - "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", - "expect": { - "error": "DualStack is enabled but this partition does not support DualStack" - }, - "params": { - "Region": "us-isob-east-1", - "UseFIPS": false, - "UseDualStack": true - } - }, - { - "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://devices.iot1click.us-isob-east-1.sc2s.sgov.gov" - } - }, - "params": { - "Region": "us-isob-east-1", - "UseFIPS": false, - "UseDualStack": false - } - }, - { - "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled", - "expect": { - "endpoint": { - "url": "https://example.com" - } - }, - "params": { - "Region": "us-east-1", - "UseFIPS": false, - "UseDualStack": false, - "Endpoint": "https://example.com" - } - }, - { - "documentation": "For custom endpoint with region not set and fips disabled and dualstack disabled", - "expect": { - "endpoint": { - "url": "https://example.com" - } - }, - "params": { - "UseFIPS": false, - "UseDualStack": false, - "Endpoint": "https://example.com" - } - }, - { - "documentation": "For custom endpoint with fips enabled and dualstack disabled", - "expect": { - "error": "Invalid Configuration: FIPS and custom endpoint are not supported" - }, - "params": { - "Region": "us-east-1", - "UseFIPS": true, - "UseDualStack": false, - "Endpoint": "https://example.com" - } - }, - { - "documentation": "For custom endpoint with fips disabled and dualstack enabled", - "expect": { - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" - }, - "params": { - "Region": "us-east-1", - "UseFIPS": false, - "UseDualStack": true, - "Endpoint": "https://example.com" - } - }, - { - "documentation": "Missing region", - "expect": { - "error": "Invalid Configuration: Missing Region" - } - } - ], - "version": "1.0" - } - } - }, - "com.amazonaws.iot1clickdevicesservice#ListDeviceEvents": { - "type": "operation", - "input": { - "target": "com.amazonaws.iot1clickdevicesservice#ListDeviceEventsRequest" - }, - "output": { - "target": "com.amazonaws.iot1clickdevicesservice#ListDeviceEventsResponse" - }, - "errors": [ - { - "target": "com.amazonaws.iot1clickdevicesservice#InternalFailureException" - }, - { - "target": "com.amazonaws.iot1clickdevicesservice#InvalidRequestException" - }, - { - "target": "com.amazonaws.iot1clickdevicesservice#RangeNotSatisfiableException" - }, - { - "target": "com.amazonaws.iot1clickdevicesservice#ResourceNotFoundException" - } - ], - "traits": { - "smithy.api#documentation": "

Using a device ID, returns a DeviceEventsResponse object containing an\n array of events for the device.

", - "smithy.api#http": { - "method": "GET", - "uri": "/devices/{DeviceId}/events", - "code": 200 - } - } - }, - "com.amazonaws.iot1clickdevicesservice#ListDeviceEventsRequest": { - "type": "structure", - "members": { - "DeviceId": { - "target": "com.amazonaws.iot1clickdevicesservice#__string", - "traits": { - "smithy.api#documentation": "

The unique identifier of the device.

", - "smithy.api#httpLabel": {}, - "smithy.api#required": {} - } - }, - "FromTimeStamp": { - "target": "com.amazonaws.iot1clickdevicesservice#__timestampIso8601", - "traits": { - "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The start date for the device event query, in ISO8061 format. For example,\n 2018-03-28T15:45:12.880Z\n

", - "smithy.api#httpQuery": "fromTimeStamp", - "smithy.api#required": {} - } - }, - "MaxResults": { - "target": "com.amazonaws.iot1clickdevicesservice#MaxResults", - "traits": { - "smithy.api#documentation": "

The maximum number of results to return per request. If not set, a default value of\n 100 is used.

", - "smithy.api#httpQuery": "maxResults" - } - }, - "NextToken": { - "target": "com.amazonaws.iot1clickdevicesservice#__string", - "traits": { - "smithy.api#documentation": "

The token to retrieve the next set of results.

", - "smithy.api#httpQuery": "nextToken" - } - }, - "ToTimeStamp": { - "target": "com.amazonaws.iot1clickdevicesservice#__timestampIso8601", - "traits": { - "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The end date for the device event query, in ISO8061 format. For example,\n 2018-03-28T15:45:12.880Z\n

", - "smithy.api#httpQuery": "toTimeStamp", - "smithy.api#required": {} - } - } - }, - "traits": { - "smithy.api#input": {} - } - }, - "com.amazonaws.iot1clickdevicesservice#ListDeviceEventsResponse": { - "type": "structure", - "members": { - "Events": { - "target": "com.amazonaws.iot1clickdevicesservice#__listOfDeviceEvent", - "traits": { - "smithy.api#documentation": "

An array of zero or more elements describing the event(s) associated with the\n device.

", - "smithy.api#jsonName": "events" - } - }, - "NextToken": { - "target": "com.amazonaws.iot1clickdevicesservice#__string", - "traits": { - "smithy.api#documentation": "

The token to retrieve the next set of results.

", - "smithy.api#jsonName": "nextToken" - } - } - }, - "traits": { - "smithy.api#output": {} - } - }, - "com.amazonaws.iot1clickdevicesservice#ListDevices": { - "type": "operation", - "input": { - "target": "com.amazonaws.iot1clickdevicesservice#ListDevicesRequest" - }, - "output": { - "target": "com.amazonaws.iot1clickdevicesservice#ListDevicesResponse" - }, - "errors": [ - { - "target": "com.amazonaws.iot1clickdevicesservice#InternalFailureException" - }, - { - "target": "com.amazonaws.iot1clickdevicesservice#InvalidRequestException" - }, - { - "target": "com.amazonaws.iot1clickdevicesservice#RangeNotSatisfiableException" - } - ], - "traits": { - "smithy.api#documentation": "

Lists the 1-Click compatible devices associated with your AWS account.

", - "smithy.api#http": { - "method": "GET", - "uri": "/devices", - "code": 200 - } - } - }, - "com.amazonaws.iot1clickdevicesservice#ListDevicesRequest": { - "type": "structure", - "members": { - "DeviceType": { - "target": "com.amazonaws.iot1clickdevicesservice#__string", - "traits": { - "smithy.api#documentation": "

The type of the device, such as \"button\".

", - "smithy.api#httpQuery": "deviceType" - } - }, - "MaxResults": { - "target": "com.amazonaws.iot1clickdevicesservice#MaxResults", - "traits": { - "smithy.api#documentation": "

The maximum number of results to return per request. If not set, a default value of\n 100 is used.

", - "smithy.api#httpQuery": "maxResults" - } - }, - "NextToken": { - "target": "com.amazonaws.iot1clickdevicesservice#__string", - "traits": { - "smithy.api#documentation": "

The token to retrieve the next set of results.

", - "smithy.api#httpQuery": "nextToken" - } - } - }, - "traits": { - "smithy.api#input": {} - } - }, - "com.amazonaws.iot1clickdevicesservice#ListDevicesResponse": { - "type": "structure", - "members": { - "Devices": { - "target": "com.amazonaws.iot1clickdevicesservice#__listOfDeviceDescription", - "traits": { - "smithy.api#documentation": "

A list of devices.

", - "smithy.api#jsonName": "devices" - } - }, - "NextToken": { - "target": "com.amazonaws.iot1clickdevicesservice#__string", - "traits": { - "smithy.api#documentation": "

The token to retrieve the next set of results.

", - "smithy.api#jsonName": "nextToken" - } - } - }, - "traits": { - "smithy.api#output": {} - } - }, - "com.amazonaws.iot1clickdevicesservice#ListTagsForResource": { - "type": "operation", - "input": { - "target": "com.amazonaws.iot1clickdevicesservice#ListTagsForResourceRequest" - }, - "output": { - "target": "com.amazonaws.iot1clickdevicesservice#ListTagsForResourceResponse" - }, - "errors": [ - { - "target": "com.amazonaws.iot1clickdevicesservice#InternalFailureException" - }, - { - "target": "com.amazonaws.iot1clickdevicesservice#ResourceNotFoundException" - } - ], - "traits": { - "smithy.api#documentation": "

Lists the tags associated with the specified resource ARN.

", - "smithy.api#http": { - "method": "GET", - "uri": "/tags/{ResourceArn}", - "code": 200 - } - } - }, - "com.amazonaws.iot1clickdevicesservice#ListTagsForResourceRequest": { - "type": "structure", - "members": { - "ResourceArn": { - "target": "com.amazonaws.iot1clickdevicesservice#__string", - "traits": { - "smithy.api#documentation": "

The ARN of the resource.

", - "smithy.api#httpLabel": {}, - "smithy.api#required": {} - } - } - }, - "traits": { - "smithy.api#input": {} - } - }, - "com.amazonaws.iot1clickdevicesservice#ListTagsForResourceResponse": { - "type": "structure", - "members": { - "Tags": { - "target": "com.amazonaws.iot1clickdevicesservice#__mapOf__string", - "traits": { - "smithy.api#documentation": "

A collection of key/value pairs defining the resource tags. For example, {\n \"tags\": {\"key1\": \"value1\", \"key2\": \"value2\"} }. For more information, see AWS\n Tagging Strategies.

\n \n

", - "smithy.api#jsonName": "tags" - } - } - }, - "traits": { - "smithy.api#output": {} - } - }, - "com.amazonaws.iot1clickdevicesservice#MaxResults": { - "type": "integer", - "traits": { - "smithy.api#range": { - "min": 1, - "max": 250 - } - } - }, - "com.amazonaws.iot1clickdevicesservice#PreconditionFailedException": { - "type": "structure", - "members": { - "Code": { - "target": "com.amazonaws.iot1clickdevicesservice#__string", - "traits": { - "smithy.api#documentation": "

412

", - "smithy.api#jsonName": "code" - } - }, - "Message": { - "target": "com.amazonaws.iot1clickdevicesservice#__string", - "traits": { - "smithy.api#documentation": "

An error message explaining the error or its remedy.

", - "smithy.api#jsonName": "message" - } - } - }, - "traits": { - "smithy.api#error": "client", - "smithy.api#httpError": 412 - } - }, - "com.amazonaws.iot1clickdevicesservice#RangeNotSatisfiableException": { - "type": "structure", - "members": { - "Code": { - "target": "com.amazonaws.iot1clickdevicesservice#__string", - "traits": { - "smithy.api#documentation": "

416

", - "smithy.api#jsonName": "code" - } - }, - "Message": { - "target": "com.amazonaws.iot1clickdevicesservice#__string", - "traits": { - "smithy.api#documentation": "

The requested number of results specified by nextToken cannot be\n satisfied.

", - "smithy.api#jsonName": "message" - } - } - }, - "traits": { - "smithy.api#error": "client", - "smithy.api#httpError": 416 - } - }, - "com.amazonaws.iot1clickdevicesservice#ResourceConflictException": { - "type": "structure", - "members": { - "Code": { - "target": "com.amazonaws.iot1clickdevicesservice#__string", - "traits": { - "smithy.api#documentation": "

409

", - "smithy.api#jsonName": "code" - } - }, - "Message": { - "target": "com.amazonaws.iot1clickdevicesservice#__string", - "traits": { - "smithy.api#documentation": "

An error message explaining the error or its remedy.

", - "smithy.api#jsonName": "message" - } - } - }, - "traits": { - "smithy.api#error": "client", - "smithy.api#httpError": 409 - } - }, - "com.amazonaws.iot1clickdevicesservice#ResourceNotFoundException": { - "type": "structure", - "members": { - "Code": { - "target": "com.amazonaws.iot1clickdevicesservice#__string", - "traits": { - "smithy.api#documentation": "

404

", - "smithy.api#jsonName": "code" - } - }, - "Message": { - "target": "com.amazonaws.iot1clickdevicesservice#__string", - "traits": { - "smithy.api#documentation": "

The requested device could not be found.

", - "smithy.api#jsonName": "message" - } - } - }, - "traits": { - "smithy.api#error": "client", - "smithy.api#httpError": 404 - } - }, - "com.amazonaws.iot1clickdevicesservice#TagResource": { - "type": "operation", - "input": { - "target": "com.amazonaws.iot1clickdevicesservice#TagResourceRequest" - }, - "output": { - "target": "smithy.api#Unit" - }, - "errors": [ - { - "target": "com.amazonaws.iot1clickdevicesservice#InternalFailureException" - }, - { - "target": "com.amazonaws.iot1clickdevicesservice#InvalidRequestException" - }, - { - "target": "com.amazonaws.iot1clickdevicesservice#ResourceNotFoundException" - } - ], - "traits": { - "smithy.api#documentation": "

Adds or updates the tags associated with the resource ARN. See AWS IoT 1-Click Service Limits for the maximum number of tags allowed per\n resource.

", - "smithy.api#http": { - "method": "POST", - "uri": "/tags/{ResourceArn}", - "code": 204 - } - } - }, - "com.amazonaws.iot1clickdevicesservice#TagResourceRequest": { - "type": "structure", - "members": { - "ResourceArn": { - "target": "com.amazonaws.iot1clickdevicesservice#__string", - "traits": { - "smithy.api#documentation": "

The ARN of the resource.

", - "smithy.api#httpLabel": {}, - "smithy.api#required": {} - } - }, - "Tags": { - "target": "com.amazonaws.iot1clickdevicesservice#__mapOf__string", - "traits": { - "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

A collection of key/value pairs defining the resource tags. For example, {\n \"tags\": {\"key1\": \"value1\", \"key2\": \"value2\"} }. For more information, see AWS\n Tagging Strategies.

\n \n

", - "smithy.api#jsonName": "tags", - "smithy.api#required": {} - } - } - }, - "traits": { - "smithy.api#input": {} - } - }, - "com.amazonaws.iot1clickdevicesservice#UnclaimDevice": { - "type": "operation", - "input": { - "target": "com.amazonaws.iot1clickdevicesservice#UnclaimDeviceRequest" - }, - "output": { - "target": "com.amazonaws.iot1clickdevicesservice#UnclaimDeviceResponse" - }, - "errors": [ - { - "target": "com.amazonaws.iot1clickdevicesservice#InternalFailureException" - }, - { - "target": "com.amazonaws.iot1clickdevicesservice#InvalidRequestException" - }, - { - "target": "com.amazonaws.iot1clickdevicesservice#ResourceNotFoundException" - } - ], - "traits": { - "smithy.api#documentation": "

Disassociates a device from your AWS account using its device ID.

", - "smithy.api#http": { - "method": "PUT", - "uri": "/devices/{DeviceId}/unclaim", - "code": 200 - } - } - }, - "com.amazonaws.iot1clickdevicesservice#UnclaimDeviceRequest": { - "type": "structure", - "members": { - "DeviceId": { - "target": "com.amazonaws.iot1clickdevicesservice#__string", - "traits": { - "smithy.api#documentation": "

The unique identifier of the device.

", - "smithy.api#httpLabel": {}, - "smithy.api#required": {} - } - } - }, - "traits": { - "smithy.api#input": {} - } - }, - "com.amazonaws.iot1clickdevicesservice#UnclaimDeviceResponse": { - "type": "structure", - "members": { - "State": { - "target": "com.amazonaws.iot1clickdevicesservice#__string", - "traits": { - "smithy.api#documentation": "

The device's final claim state.

", - "smithy.api#jsonName": "state" - } - } - }, - "traits": { - "smithy.api#output": {} - } - }, - "com.amazonaws.iot1clickdevicesservice#UntagResource": { - "type": "operation", - "input": { - "target": "com.amazonaws.iot1clickdevicesservice#UntagResourceRequest" - }, - "output": { - "target": "smithy.api#Unit" - }, - "errors": [ - { - "target": "com.amazonaws.iot1clickdevicesservice#InternalFailureException" - }, - { - "target": "com.amazonaws.iot1clickdevicesservice#InvalidRequestException" - }, - { - "target": "com.amazonaws.iot1clickdevicesservice#ResourceNotFoundException" - } - ], - "traits": { - "smithy.api#documentation": "

Using tag keys, deletes the tags (key/value pairs) associated with the specified\n resource ARN.

", - "smithy.api#http": { - "method": "DELETE", - "uri": "/tags/{ResourceArn}", - "code": 204 - } - } - }, - "com.amazonaws.iot1clickdevicesservice#UntagResourceRequest": { - "type": "structure", - "members": { - "ResourceArn": { - "target": "com.amazonaws.iot1clickdevicesservice#__string", - "traits": { - "smithy.api#documentation": "

The ARN of the resource.

", - "smithy.api#httpLabel": {}, - "smithy.api#required": {} - } - }, - "TagKeys": { - "target": "com.amazonaws.iot1clickdevicesservice#__listOf__string", - "traits": { - "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

A collections of tag keys. For example, {\"key1\",\"key2\"}

", - "smithy.api#httpQuery": "tagKeys", - "smithy.api#required": {} - } - } - }, - "traits": { - "smithy.api#input": {} - } - }, - "com.amazonaws.iot1clickdevicesservice#UpdateDeviceState": { - "type": "operation", - "input": { - "target": "com.amazonaws.iot1clickdevicesservice#UpdateDeviceStateRequest" - }, - "output": { - "target": "com.amazonaws.iot1clickdevicesservice#UpdateDeviceStateResponse" - }, - "errors": [ - { - "target": "com.amazonaws.iot1clickdevicesservice#InternalFailureException" - }, - { - "target": "com.amazonaws.iot1clickdevicesservice#InvalidRequestException" - }, - { - "target": "com.amazonaws.iot1clickdevicesservice#ResourceNotFoundException" - } - ], - "traits": { - "smithy.api#documentation": "

Using a Boolean value (true or false), this operation\n enables or disables the device given a device ID.

", - "smithy.api#http": { - "method": "PUT", - "uri": "/devices/{DeviceId}/state", - "code": 200 - } - } - }, - "com.amazonaws.iot1clickdevicesservice#UpdateDeviceStateRequest": { - "type": "structure", - "members": { - "DeviceId": { - "target": "com.amazonaws.iot1clickdevicesservice#__string", - "traits": { - "smithy.api#documentation": "

The unique identifier of the device.

", - "smithy.api#httpLabel": {}, - "smithy.api#required": {} - } - }, - "Enabled": { - "target": "com.amazonaws.iot1clickdevicesservice#__boolean", - "traits": { - "smithy.api#documentation": "

If true, the device is enabled. If false, the device is\n disabled.

", - "smithy.api#jsonName": "enabled" - } - } - }, - "traits": { - "smithy.api#input": {} - } - }, - "com.amazonaws.iot1clickdevicesservice#UpdateDeviceStateResponse": { - "type": "structure", - "members": {}, - "traits": { - "smithy.api#output": {} - } - }, - "com.amazonaws.iot1clickdevicesservice#__boolean": { - "type": "boolean" - }, - "com.amazonaws.iot1clickdevicesservice#__doubleMin0Max100": { - "type": "double" - }, - "com.amazonaws.iot1clickdevicesservice#__integer": { - "type": "integer" - }, - "com.amazonaws.iot1clickdevicesservice#__listOfDeviceDescription": { - "type": "list", - "member": { - "target": "com.amazonaws.iot1clickdevicesservice#DeviceDescription" - } - }, - "com.amazonaws.iot1clickdevicesservice#__listOfDeviceEvent": { - "type": "list", - "member": { - "target": "com.amazonaws.iot1clickdevicesservice#DeviceEvent" - } - }, - "com.amazonaws.iot1clickdevicesservice#__listOfDeviceMethod": { - "type": "list", - "member": { - "target": "com.amazonaws.iot1clickdevicesservice#DeviceMethod" - } - }, - "com.amazonaws.iot1clickdevicesservice#__listOf__string": { - "type": "list", - "member": { - "target": "com.amazonaws.iot1clickdevicesservice#__string" - } - }, - "com.amazonaws.iot1clickdevicesservice#__mapOf__string": { - "type": "map", - "key": { - "target": "com.amazonaws.iot1clickdevicesservice#__string" - }, - "value": { - "target": "com.amazonaws.iot1clickdevicesservice#__string" - } - }, - "com.amazonaws.iot1clickdevicesservice#__string": { - "type": "string" - }, - "com.amazonaws.iot1clickdevicesservice#__stringMin12Max40": { - "type": "string", - "traits": { - "smithy.api#length": { - "min": 12, - "max": 40 - } - } - }, - "com.amazonaws.iot1clickdevicesservice#__timestampIso8601": { - "type": "timestamp", - "traits": { - "smithy.api#timestampFormat": "date-time" - } - } - } -} diff --git a/models/iot-1click-projects.json b/models/iot-1click-projects.json deleted file mode 100644 index 941ea127e3..0000000000 --- a/models/iot-1click-projects.json +++ /dev/null @@ -1,2429 +0,0 @@ -{ - "smithy": "2.0", - "metadata": { - "suppressions": [ - { - "id": "HttpMethodSemantics", - "namespace": "*" - }, - { - "id": "HttpResponseCodeSemantics", - "namespace": "*" - }, - { - "id": "PaginatedTrait", - "namespace": "*" - }, - { - "id": "HttpHeaderTrait", - "namespace": "*" - }, - { - "id": "HttpUriConflict", - "namespace": "*" - }, - { - "id": "Service", - "namespace": "*" - } - ] - }, - "shapes": { - "com.amazonaws.iot1clickprojects#AWSIoT1ClickProjects": { - "type": "service", - "version": "2018-05-14", - "operations": [ - { - "target": "com.amazonaws.iot1clickprojects#AssociateDeviceWithPlacement" - }, - { - "target": "com.amazonaws.iot1clickprojects#CreatePlacement" - }, - { - "target": "com.amazonaws.iot1clickprojects#CreateProject" - }, - { - "target": "com.amazonaws.iot1clickprojects#DeletePlacement" - }, - { - "target": "com.amazonaws.iot1clickprojects#DeleteProject" - }, - { - "target": "com.amazonaws.iot1clickprojects#DescribePlacement" - }, - { - "target": "com.amazonaws.iot1clickprojects#DescribeProject" - }, - { - "target": "com.amazonaws.iot1clickprojects#DisassociateDeviceFromPlacement" - }, - { - "target": "com.amazonaws.iot1clickprojects#GetDevicesInPlacement" - }, - { - "target": "com.amazonaws.iot1clickprojects#ListPlacements" - }, - { - "target": "com.amazonaws.iot1clickprojects#ListProjects" - }, - { - "target": "com.amazonaws.iot1clickprojects#ListTagsForResource" - }, - { - "target": "com.amazonaws.iot1clickprojects#TagResource" - }, - { - "target": "com.amazonaws.iot1clickprojects#UntagResource" - }, - { - "target": "com.amazonaws.iot1clickprojects#UpdatePlacement" - }, - { - "target": "com.amazonaws.iot1clickprojects#UpdateProject" - } - ], - "traits": { - "aws.api#service": { - "sdkId": "IoT 1Click Projects", - "arnNamespace": "iot1click", - "cloudFormationName": "IoT1Click", - "cloudTrailEventSource": "iot1clickprojects.amazonaws.com", - "docId": "iot1click-projects-2018-05-14", - "endpointPrefix": "projects.iot1click" - }, - "aws.auth#sigv4": { - "name": "iot1click" - }, - "aws.protocols#restJson1": {}, - "smithy.api#documentation": "

The AWS IoT 1-Click Projects API Reference

", - "smithy.api#title": "AWS IoT 1-Click Projects Service", - "smithy.rules#endpointRuleSet": { - "version": "1.0", - "parameters": { - "Region": { - "builtIn": "AWS::Region", - "required": false, - "documentation": "The AWS region used to dispatch the request.", - "type": "String" - }, - "UseDualStack": { - "builtIn": "AWS::UseDualStack", - "required": true, - "default": false, - "documentation": "When true, use the dual-stack endpoint. If the configured endpoint does not support dual-stack, dispatching the request MAY return an error.", - "type": "Boolean" - }, - "UseFIPS": { - "builtIn": "AWS::UseFIPS", - "required": true, - "default": false, - "documentation": "When true, send this request to the FIPS-compliant regional endpoint. If the configured endpoint does not have a FIPS compliant endpoint, dispatching the request will return an error.", - "type": "Boolean" - }, - "Endpoint": { - "builtIn": "SDK::Endpoint", - "required": false, - "documentation": "Override the endpoint used to send this request", - "type": "String" - } - }, - "rules": [ - { - "conditions": [ - { - "fn": "isSet", - "argv": [ - { - "ref": "Endpoint" - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - } - ], - "error": "Invalid Configuration: FIPS and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" - }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [ - { - "fn": "isSet", - "argv": [ - { - "ref": "Region" - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "aws.partition", - "argv": [ - { - "ref": "Region" - } - ], - "assign": "PartitionResult" - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - }, - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://projects.iot1click-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://projects.iot1click-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://projects.iot1click.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" - } - ] - }, - { - "conditions": [], - "endpoint": { - "url": "https://projects.iot1click.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - } - ] - }, - { - "conditions": [], - "error": "Invalid Configuration: Missing Region", - "type": "error" - } - ] - }, - "smithy.rules#endpointTests": { - "testCases": [ - { - "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://projects.iot1click.ap-northeast-1.amazonaws.com" - } - }, - "params": { - "Region": "ap-northeast-1", - "UseFIPS": false, - "UseDualStack": false - } - }, - { - "documentation": "For region eu-central-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://projects.iot1click.eu-central-1.amazonaws.com" - } - }, - "params": { - "Region": "eu-central-1", - "UseFIPS": false, - "UseDualStack": false - } - }, - { - "documentation": "For region eu-west-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://projects.iot1click.eu-west-1.amazonaws.com" - } - }, - "params": { - "Region": "eu-west-1", - "UseFIPS": false, - "UseDualStack": false - } - }, - { - "documentation": "For region eu-west-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://projects.iot1click.eu-west-2.amazonaws.com" - } - }, - "params": { - "Region": "eu-west-2", - "UseFIPS": false, - "UseDualStack": false - } - }, - { - "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://projects.iot1click.us-east-1.amazonaws.com" - } - }, - "params": { - "Region": "us-east-1", - "UseFIPS": false, - "UseDualStack": false - } - }, - { - "documentation": "For region us-east-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://projects.iot1click.us-east-2.amazonaws.com" - } - }, - "params": { - "Region": "us-east-2", - "UseFIPS": false, - "UseDualStack": false - } - }, - { - "documentation": "For region us-west-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://projects.iot1click.us-west-2.amazonaws.com" - } - }, - "params": { - "Region": "us-west-2", - "UseFIPS": false, - "UseDualStack": false - } - }, - { - "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://projects.iot1click-fips.us-east-1.api.aws" - } - }, - "params": { - "Region": "us-east-1", - "UseFIPS": true, - "UseDualStack": true - } - }, - { - "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://projects.iot1click-fips.us-east-1.amazonaws.com" - } - }, - "params": { - "Region": "us-east-1", - "UseFIPS": true, - "UseDualStack": false - } - }, - { - "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://projects.iot1click.us-east-1.api.aws" - } - }, - "params": { - "Region": "us-east-1", - "UseFIPS": false, - "UseDualStack": true - } - }, - { - "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://projects.iot1click-fips.cn-north-1.api.amazonwebservices.com.cn" - } - }, - "params": { - "Region": "cn-north-1", - "UseFIPS": true, - "UseDualStack": true - } - }, - { - "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://projects.iot1click-fips.cn-north-1.amazonaws.com.cn" - } - }, - "params": { - "Region": "cn-north-1", - "UseFIPS": true, - "UseDualStack": false - } - }, - { - "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://projects.iot1click.cn-north-1.api.amazonwebservices.com.cn" - } - }, - "params": { - "Region": "cn-north-1", - "UseFIPS": false, - "UseDualStack": true - } - }, - { - "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://projects.iot1click.cn-north-1.amazonaws.com.cn" - } - }, - "params": { - "Region": "cn-north-1", - "UseFIPS": false, - "UseDualStack": false - } - }, - { - "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://projects.iot1click-fips.us-gov-east-1.api.aws" - } - }, - "params": { - "Region": "us-gov-east-1", - "UseFIPS": true, - "UseDualStack": true - } - }, - { - "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://projects.iot1click-fips.us-gov-east-1.amazonaws.com" - } - }, - "params": { - "Region": "us-gov-east-1", - "UseFIPS": true, - "UseDualStack": false - } - }, - { - "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://projects.iot1click.us-gov-east-1.api.aws" - } - }, - "params": { - "Region": "us-gov-east-1", - "UseFIPS": false, - "UseDualStack": true - } - }, - { - "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://projects.iot1click.us-gov-east-1.amazonaws.com" - } - }, - "params": { - "Region": "us-gov-east-1", - "UseFIPS": false, - "UseDualStack": false - } - }, - { - "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", - "expect": { - "error": "FIPS and DualStack are enabled, but this partition does not support one or both" - }, - "params": { - "Region": "us-iso-east-1", - "UseFIPS": true, - "UseDualStack": true - } - }, - { - "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://projects.iot1click-fips.us-iso-east-1.c2s.ic.gov" - } - }, - "params": { - "Region": "us-iso-east-1", - "UseFIPS": true, - "UseDualStack": false - } - }, - { - "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", - "expect": { - "error": "DualStack is enabled but this partition does not support DualStack" - }, - "params": { - "Region": "us-iso-east-1", - "UseFIPS": false, - "UseDualStack": true - } - }, - { - "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://projects.iot1click.us-iso-east-1.c2s.ic.gov" - } - }, - "params": { - "Region": "us-iso-east-1", - "UseFIPS": false, - "UseDualStack": false - } - }, - { - "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", - "expect": { - "error": "FIPS and DualStack are enabled, but this partition does not support one or both" - }, - "params": { - "Region": "us-isob-east-1", - "UseFIPS": true, - "UseDualStack": true - } - }, - { - "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://projects.iot1click-fips.us-isob-east-1.sc2s.sgov.gov" - } - }, - "params": { - "Region": "us-isob-east-1", - "UseFIPS": true, - "UseDualStack": false - } - }, - { - "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", - "expect": { - "error": "DualStack is enabled but this partition does not support DualStack" - }, - "params": { - "Region": "us-isob-east-1", - "UseFIPS": false, - "UseDualStack": true - } - }, - { - "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://projects.iot1click.us-isob-east-1.sc2s.sgov.gov" - } - }, - "params": { - "Region": "us-isob-east-1", - "UseFIPS": false, - "UseDualStack": false - } - }, - { - "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled", - "expect": { - "endpoint": { - "url": "https://example.com" - } - }, - "params": { - "Region": "us-east-1", - "UseFIPS": false, - "UseDualStack": false, - "Endpoint": "https://example.com" - } - }, - { - "documentation": "For custom endpoint with region not set and fips disabled and dualstack disabled", - "expect": { - "endpoint": { - "url": "https://example.com" - } - }, - "params": { - "UseFIPS": false, - "UseDualStack": false, - "Endpoint": "https://example.com" - } - }, - { - "documentation": "For custom endpoint with fips enabled and dualstack disabled", - "expect": { - "error": "Invalid Configuration: FIPS and custom endpoint are not supported" - }, - "params": { - "Region": "us-east-1", - "UseFIPS": true, - "UseDualStack": false, - "Endpoint": "https://example.com" - } - }, - { - "documentation": "For custom endpoint with fips disabled and dualstack enabled", - "expect": { - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" - }, - "params": { - "Region": "us-east-1", - "UseFIPS": false, - "UseDualStack": true, - "Endpoint": "https://example.com" - } - }, - { - "documentation": "Missing region", - "expect": { - "error": "Invalid Configuration: Missing Region" - } - } - ], - "version": "1.0" - } - } - }, - "com.amazonaws.iot1clickprojects#AssociateDeviceWithPlacement": { - "type": "operation", - "input": { - "target": "com.amazonaws.iot1clickprojects#AssociateDeviceWithPlacementRequest" - }, - "output": { - "target": "com.amazonaws.iot1clickprojects#AssociateDeviceWithPlacementResponse" - }, - "errors": [ - { - "target": "com.amazonaws.iot1clickprojects#InternalFailureException" - }, - { - "target": "com.amazonaws.iot1clickprojects#InvalidRequestException" - }, - { - "target": "com.amazonaws.iot1clickprojects#ResourceConflictException" - }, - { - "target": "com.amazonaws.iot1clickprojects#ResourceNotFoundException" - } - ], - "traits": { - "smithy.api#documentation": "

Associates a physical device with a placement.

", - "smithy.api#http": { - "method": "PUT", - "uri": "/projects/{projectName}/placements/{placementName}/devices/{deviceTemplateName}", - "code": 200 - } - } - }, - "com.amazonaws.iot1clickprojects#AssociateDeviceWithPlacementRequest": { - "type": "structure", - "members": { - "projectName": { - "target": "com.amazonaws.iot1clickprojects#ProjectName", - "traits": { - "smithy.api#documentation": "

The name of the project containing the placement in which to associate the device.

", - "smithy.api#httpLabel": {}, - "smithy.api#required": {} - } - }, - "placementName": { - "target": "com.amazonaws.iot1clickprojects#PlacementName", - "traits": { - "smithy.api#documentation": "

The name of the placement in which to associate the device.

", - "smithy.api#httpLabel": {}, - "smithy.api#required": {} - } - }, - "deviceId": { - "target": "com.amazonaws.iot1clickprojects#DeviceId", - "traits": { - "smithy.api#documentation": "

The ID of the physical device to be associated with the given placement in the project.\n Note that a mandatory 4 character prefix is required for all deviceId\n values.

", - "smithy.api#required": {} - } - }, - "deviceTemplateName": { - "target": "com.amazonaws.iot1clickprojects#DeviceTemplateName", - "traits": { - "smithy.api#documentation": "

The device template name to associate with the device ID.

", - "smithy.api#httpLabel": {}, - "smithy.api#required": {} - } - } - }, - "traits": { - "smithy.api#input": {} - } - }, - "com.amazonaws.iot1clickprojects#AssociateDeviceWithPlacementResponse": { - "type": "structure", - "members": {}, - "traits": { - "smithy.api#output": {} - } - }, - "com.amazonaws.iot1clickprojects#AttributeDefaultValue": { - "type": "string", - "traits": { - "smithy.api#length": { - "min": 0, - "max": 800 - } - } - }, - "com.amazonaws.iot1clickprojects#AttributeName": { - "type": "string", - "traits": { - "smithy.api#length": { - "min": 1, - "max": 128 - } - } - }, - "com.amazonaws.iot1clickprojects#AttributeValue": { - "type": "string", - "traits": { - "smithy.api#length": { - "min": 0, - "max": 800 - } - } - }, - "com.amazonaws.iot1clickprojects#Code": { - "type": "string" - }, - "com.amazonaws.iot1clickprojects#CreatePlacement": { - "type": "operation", - "input": { - "target": "com.amazonaws.iot1clickprojects#CreatePlacementRequest" - }, - "output": { - "target": "com.amazonaws.iot1clickprojects#CreatePlacementResponse" - }, - "errors": [ - { - "target": "com.amazonaws.iot1clickprojects#InternalFailureException" - }, - { - "target": "com.amazonaws.iot1clickprojects#InvalidRequestException" - }, - { - "target": "com.amazonaws.iot1clickprojects#ResourceConflictException" - }, - { - "target": "com.amazonaws.iot1clickprojects#ResourceNotFoundException" - } - ], - "traits": { - "smithy.api#documentation": "

Creates an empty placement.

", - "smithy.api#http": { - "method": "POST", - "uri": "/projects/{projectName}/placements", - "code": 200 - } - } - }, - "com.amazonaws.iot1clickprojects#CreatePlacementRequest": { - "type": "structure", - "members": { - "placementName": { - "target": "com.amazonaws.iot1clickprojects#PlacementName", - "traits": { - "smithy.api#documentation": "

The name of the placement to be created.

", - "smithy.api#required": {} - } - }, - "projectName": { - "target": "com.amazonaws.iot1clickprojects#ProjectName", - "traits": { - "smithy.api#documentation": "

The name of the project in which to create the placement.

", - "smithy.api#httpLabel": {}, - "smithy.api#required": {} - } - }, - "attributes": { - "target": "com.amazonaws.iot1clickprojects#PlacementAttributeMap", - "traits": { - "smithy.api#documentation": "

Optional user-defined key/value pairs providing contextual data (such as location or\n function) for the placement.

" - } - } - }, - "traits": { - "smithy.api#input": {} - } - }, - "com.amazonaws.iot1clickprojects#CreatePlacementResponse": { - "type": "structure", - "members": {}, - "traits": { - "smithy.api#output": {} - } - }, - "com.amazonaws.iot1clickprojects#CreateProject": { - "type": "operation", - "input": { - "target": "com.amazonaws.iot1clickprojects#CreateProjectRequest" - }, - "output": { - "target": "com.amazonaws.iot1clickprojects#CreateProjectResponse" - }, - "errors": [ - { - "target": "com.amazonaws.iot1clickprojects#InternalFailureException" - }, - { - "target": "com.amazonaws.iot1clickprojects#InvalidRequestException" - }, - { - "target": "com.amazonaws.iot1clickprojects#ResourceConflictException" - } - ], - "traits": { - "smithy.api#documentation": "

Creates an empty project with a placement template. A project contains zero or more\n placements that adhere to the placement template defined in the project.

", - "smithy.api#http": { - "method": "POST", - "uri": "/projects", - "code": 200 - } - } - }, - "com.amazonaws.iot1clickprojects#CreateProjectRequest": { - "type": "structure", - "members": { - "projectName": { - "target": "com.amazonaws.iot1clickprojects#ProjectName", - "traits": { - "smithy.api#documentation": "

The name of the project to create.

", - "smithy.api#required": {} - } - }, - "description": { - "target": "com.amazonaws.iot1clickprojects#Description", - "traits": { - "smithy.api#documentation": "

An optional description for the project.

" - } - }, - "placementTemplate": { - "target": "com.amazonaws.iot1clickprojects#PlacementTemplate", - "traits": { - "smithy.api#documentation": "

The schema defining the placement to be created. A placement template defines placement\n default attributes and device templates. You cannot add or remove device templates after the\n project has been created. However, you can update callbackOverrides for the\n device templates using the UpdateProject API.

" - } - }, - "tags": { - "target": "com.amazonaws.iot1clickprojects#TagMap", - "traits": { - "smithy.api#documentation": "

Optional tags (metadata key/value pairs) to be associated with the project. For example,\n { {\"key1\": \"value1\", \"key2\": \"value2\"} }. For more information, see AWS Tagging\n Strategies.

" - } - } - }, - "traits": { - "smithy.api#input": {} - } - }, - "com.amazonaws.iot1clickprojects#CreateProjectResponse": { - "type": "structure", - "members": {}, - "traits": { - "smithy.api#output": {} - } - }, - "com.amazonaws.iot1clickprojects#DefaultPlacementAttributeMap": { - "type": "map", - "key": { - "target": "com.amazonaws.iot1clickprojects#AttributeName" - }, - "value": { - "target": "com.amazonaws.iot1clickprojects#AttributeDefaultValue" - } - }, - "com.amazonaws.iot1clickprojects#DeletePlacement": { - "type": "operation", - "input": { - "target": "com.amazonaws.iot1clickprojects#DeletePlacementRequest" - }, - "output": { - "target": "com.amazonaws.iot1clickprojects#DeletePlacementResponse" - }, - "errors": [ - { - "target": "com.amazonaws.iot1clickprojects#InternalFailureException" - }, - { - "target": "com.amazonaws.iot1clickprojects#InvalidRequestException" - }, - { - "target": "com.amazonaws.iot1clickprojects#ResourceNotFoundException" - }, - { - "target": "com.amazonaws.iot1clickprojects#TooManyRequestsException" - } - ], - "traits": { - "smithy.api#documentation": "

Deletes a placement. To delete a placement, it must not have any devices associated with\n it.

\n \n

When you delete a placement, all associated data becomes irretrievable.

\n
", - "smithy.api#http": { - "method": "DELETE", - "uri": "/projects/{projectName}/placements/{placementName}", - "code": 200 - } - } - }, - "com.amazonaws.iot1clickprojects#DeletePlacementRequest": { - "type": "structure", - "members": { - "placementName": { - "target": "com.amazonaws.iot1clickprojects#PlacementName", - "traits": { - "smithy.api#documentation": "

The name of the empty placement to delete.

", - "smithy.api#httpLabel": {}, - "smithy.api#required": {} - } - }, - "projectName": { - "target": "com.amazonaws.iot1clickprojects#ProjectName", - "traits": { - "smithy.api#documentation": "

The project containing the empty placement to delete.

", - "smithy.api#httpLabel": {}, - "smithy.api#required": {} - } - } - }, - "traits": { - "smithy.api#input": {} - } - }, - "com.amazonaws.iot1clickprojects#DeletePlacementResponse": { - "type": "structure", - "members": {}, - "traits": { - "smithy.api#output": {} - } - }, - "com.amazonaws.iot1clickprojects#DeleteProject": { - "type": "operation", - "input": { - "target": "com.amazonaws.iot1clickprojects#DeleteProjectRequest" - }, - "output": { - "target": "com.amazonaws.iot1clickprojects#DeleteProjectResponse" - }, - "errors": [ - { - "target": "com.amazonaws.iot1clickprojects#InternalFailureException" - }, - { - "target": "com.amazonaws.iot1clickprojects#InvalidRequestException" - }, - { - "target": "com.amazonaws.iot1clickprojects#ResourceNotFoundException" - }, - { - "target": "com.amazonaws.iot1clickprojects#TooManyRequestsException" - } - ], - "traits": { - "smithy.api#documentation": "

Deletes a project. To delete a project, it must not have any placements associated with\n it.

\n \n

When you delete a project, all associated data becomes irretrievable.

\n
", - "smithy.api#http": { - "method": "DELETE", - "uri": "/projects/{projectName}", - "code": 200 - } - } - }, - "com.amazonaws.iot1clickprojects#DeleteProjectRequest": { - "type": "structure", - "members": { - "projectName": { - "target": "com.amazonaws.iot1clickprojects#ProjectName", - "traits": { - "smithy.api#documentation": "

The name of the empty project to delete.

", - "smithy.api#httpLabel": {}, - "smithy.api#required": {} - } - } - }, - "traits": { - "smithy.api#input": {} - } - }, - "com.amazonaws.iot1clickprojects#DeleteProjectResponse": { - "type": "structure", - "members": {}, - "traits": { - "smithy.api#output": {} - } - }, - "com.amazonaws.iot1clickprojects#DescribePlacement": { - "type": "operation", - "input": { - "target": "com.amazonaws.iot1clickprojects#DescribePlacementRequest" - }, - "output": { - "target": "com.amazonaws.iot1clickprojects#DescribePlacementResponse" - }, - "errors": [ - { - "target": "com.amazonaws.iot1clickprojects#InternalFailureException" - }, - { - "target": "com.amazonaws.iot1clickprojects#InvalidRequestException" - }, - { - "target": "com.amazonaws.iot1clickprojects#ResourceNotFoundException" - } - ], - "traits": { - "smithy.api#documentation": "

Describes a placement in a project.

", - "smithy.api#http": { - "method": "GET", - "uri": "/projects/{projectName}/placements/{placementName}", - "code": 200 - } - } - }, - "com.amazonaws.iot1clickprojects#DescribePlacementRequest": { - "type": "structure", - "members": { - "placementName": { - "target": "com.amazonaws.iot1clickprojects#PlacementName", - "traits": { - "smithy.api#documentation": "

The name of the placement within a project.

", - "smithy.api#httpLabel": {}, - "smithy.api#required": {} - } - }, - "projectName": { - "target": "com.amazonaws.iot1clickprojects#ProjectName", - "traits": { - "smithy.api#documentation": "

The project containing the placement to be described.

", - "smithy.api#httpLabel": {}, - "smithy.api#required": {} - } - } - }, - "traits": { - "smithy.api#input": {} - } - }, - "com.amazonaws.iot1clickprojects#DescribePlacementResponse": { - "type": "structure", - "members": { - "placement": { - "target": "com.amazonaws.iot1clickprojects#PlacementDescription", - "traits": { - "smithy.api#documentation": "

An object describing the placement.

", - "smithy.api#required": {} - } - } - }, - "traits": { - "smithy.api#output": {} - } - }, - "com.amazonaws.iot1clickprojects#DescribeProject": { - "type": "operation", - "input": { - "target": "com.amazonaws.iot1clickprojects#DescribeProjectRequest" - }, - "output": { - "target": "com.amazonaws.iot1clickprojects#DescribeProjectResponse" - }, - "errors": [ - { - "target": "com.amazonaws.iot1clickprojects#InternalFailureException" - }, - { - "target": "com.amazonaws.iot1clickprojects#InvalidRequestException" - }, - { - "target": "com.amazonaws.iot1clickprojects#ResourceNotFoundException" - } - ], - "traits": { - "smithy.api#documentation": "

Returns an object describing a project.

", - "smithy.api#http": { - "method": "GET", - "uri": "/projects/{projectName}", - "code": 200 - } - } - }, - "com.amazonaws.iot1clickprojects#DescribeProjectRequest": { - "type": "structure", - "members": { - "projectName": { - "target": "com.amazonaws.iot1clickprojects#ProjectName", - "traits": { - "smithy.api#documentation": "

The name of the project to be described.

", - "smithy.api#httpLabel": {}, - "smithy.api#required": {} - } - } - }, - "traits": { - "smithy.api#input": {} - } - }, - "com.amazonaws.iot1clickprojects#DescribeProjectResponse": { - "type": "structure", - "members": { - "project": { - "target": "com.amazonaws.iot1clickprojects#ProjectDescription", - "traits": { - "smithy.api#documentation": "

An object describing the project.

", - "smithy.api#required": {} - } - } - }, - "traits": { - "smithy.api#output": {} - } - }, - "com.amazonaws.iot1clickprojects#Description": { - "type": "string", - "traits": { - "smithy.api#length": { - "min": 0, - "max": 500 - } - } - }, - "com.amazonaws.iot1clickprojects#DeviceCallbackKey": { - "type": "string", - "traits": { - "smithy.api#length": { - "min": 1, - "max": 128 - } - } - }, - "com.amazonaws.iot1clickprojects#DeviceCallbackOverrideMap": { - "type": "map", - "key": { - "target": "com.amazonaws.iot1clickprojects#DeviceCallbackKey" - }, - "value": { - "target": "com.amazonaws.iot1clickprojects#DeviceCallbackValue" - } - }, - "com.amazonaws.iot1clickprojects#DeviceCallbackValue": { - "type": "string", - "traits": { - "smithy.api#length": { - "min": 0, - "max": 200 - } - } - }, - "com.amazonaws.iot1clickprojects#DeviceId": { - "type": "string", - "traits": { - "smithy.api#length": { - "min": 1, - "max": 32 - } - } - }, - "com.amazonaws.iot1clickprojects#DeviceMap": { - "type": "map", - "key": { - "target": "com.amazonaws.iot1clickprojects#DeviceTemplateName" - }, - "value": { - "target": "com.amazonaws.iot1clickprojects#DeviceId" - } - }, - "com.amazonaws.iot1clickprojects#DeviceTemplate": { - "type": "structure", - "members": { - "deviceType": { - "target": "com.amazonaws.iot1clickprojects#DeviceType", - "traits": { - "smithy.api#documentation": "

The device type, which currently must be \"button\".

" - } - }, - "callbackOverrides": { - "target": "com.amazonaws.iot1clickprojects#DeviceCallbackOverrideMap", - "traits": { - "smithy.api#documentation": "

An optional Lambda function to invoke instead of the default Lambda function provided by\n the placement template.

" - } - } - }, - "traits": { - "smithy.api#documentation": "

An object representing a device for a placement template (see PlacementTemplate).

" - } - }, - "com.amazonaws.iot1clickprojects#DeviceTemplateMap": { - "type": "map", - "key": { - "target": "com.amazonaws.iot1clickprojects#DeviceTemplateName" - }, - "value": { - "target": "com.amazonaws.iot1clickprojects#DeviceTemplate" - } - }, - "com.amazonaws.iot1clickprojects#DeviceTemplateName": { - "type": "string", - "traits": { - "smithy.api#length": { - "min": 1, - "max": 128 - }, - "smithy.api#pattern": "^[a-zA-Z0-9_-]+$" - } - }, - "com.amazonaws.iot1clickprojects#DeviceType": { - "type": "string", - "traits": { - "smithy.api#length": { - "min": 0, - "max": 128 - } - } - }, - "com.amazonaws.iot1clickprojects#DisassociateDeviceFromPlacement": { - "type": "operation", - "input": { - "target": "com.amazonaws.iot1clickprojects#DisassociateDeviceFromPlacementRequest" - }, - "output": { - "target": "com.amazonaws.iot1clickprojects#DisassociateDeviceFromPlacementResponse" - }, - "errors": [ - { - "target": "com.amazonaws.iot1clickprojects#InternalFailureException" - }, - { - "target": "com.amazonaws.iot1clickprojects#InvalidRequestException" - }, - { - "target": "com.amazonaws.iot1clickprojects#ResourceNotFoundException" - }, - { - "target": "com.amazonaws.iot1clickprojects#TooManyRequestsException" - } - ], - "traits": { - "smithy.api#documentation": "

Removes a physical device from a placement.

", - "smithy.api#http": { - "method": "DELETE", - "uri": "/projects/{projectName}/placements/{placementName}/devices/{deviceTemplateName}", - "code": 200 - } - } - }, - "com.amazonaws.iot1clickprojects#DisassociateDeviceFromPlacementRequest": { - "type": "structure", - "members": { - "projectName": { - "target": "com.amazonaws.iot1clickprojects#ProjectName", - "traits": { - "smithy.api#documentation": "

The name of the project that contains the placement.

", - "smithy.api#httpLabel": {}, - "smithy.api#required": {} - } - }, - "placementName": { - "target": "com.amazonaws.iot1clickprojects#PlacementName", - "traits": { - "smithy.api#documentation": "

The name of the placement that the device should be removed from.

", - "smithy.api#httpLabel": {}, - "smithy.api#required": {} - } - }, - "deviceTemplateName": { - "target": "com.amazonaws.iot1clickprojects#DeviceTemplateName", - "traits": { - "smithy.api#documentation": "

The device ID that should be removed from the placement.

", - "smithy.api#httpLabel": {}, - "smithy.api#required": {} - } - } - }, - "traits": { - "smithy.api#input": {} - } - }, - "com.amazonaws.iot1clickprojects#DisassociateDeviceFromPlacementResponse": { - "type": "structure", - "members": {}, - "traits": { - "smithy.api#output": {} - } - }, - "com.amazonaws.iot1clickprojects#GetDevicesInPlacement": { - "type": "operation", - "input": { - "target": "com.amazonaws.iot1clickprojects#GetDevicesInPlacementRequest" - }, - "output": { - "target": "com.amazonaws.iot1clickprojects#GetDevicesInPlacementResponse" - }, - "errors": [ - { - "target": "com.amazonaws.iot1clickprojects#InternalFailureException" - }, - { - "target": "com.amazonaws.iot1clickprojects#InvalidRequestException" - }, - { - "target": "com.amazonaws.iot1clickprojects#ResourceNotFoundException" - } - ], - "traits": { - "smithy.api#documentation": "

Returns an object enumerating the devices in a placement.

", - "smithy.api#http": { - "method": "GET", - "uri": "/projects/{projectName}/placements/{placementName}/devices", - "code": 200 - } - } - }, - "com.amazonaws.iot1clickprojects#GetDevicesInPlacementRequest": { - "type": "structure", - "members": { - "projectName": { - "target": "com.amazonaws.iot1clickprojects#ProjectName", - "traits": { - "smithy.api#documentation": "

The name of the project containing the placement.

", - "smithy.api#httpLabel": {}, - "smithy.api#required": {} - } - }, - "placementName": { - "target": "com.amazonaws.iot1clickprojects#PlacementName", - "traits": { - "smithy.api#documentation": "

The name of the placement to get the devices from.

", - "smithy.api#httpLabel": {}, - "smithy.api#required": {} - } - } - }, - "traits": { - "smithy.api#input": {} - } - }, - "com.amazonaws.iot1clickprojects#GetDevicesInPlacementResponse": { - "type": "structure", - "members": { - "devices": { - "target": "com.amazonaws.iot1clickprojects#DeviceMap", - "traits": { - "smithy.api#documentation": "

An object containing the devices (zero or more) within the placement.

", - "smithy.api#required": {} - } - } - }, - "traits": { - "smithy.api#output": {} - } - }, - "com.amazonaws.iot1clickprojects#InternalFailureException": { - "type": "structure", - "members": { - "code": { - "target": "com.amazonaws.iot1clickprojects#Code", - "traits": { - "smithy.api#required": {} - } - }, - "message": { - "target": "com.amazonaws.iot1clickprojects#Message", - "traits": { - "smithy.api#required": {} - } - } - }, - "traits": { - "smithy.api#documentation": "

", - "smithy.api#error": "server", - "smithy.api#httpError": 500 - } - }, - "com.amazonaws.iot1clickprojects#InvalidRequestException": { - "type": "structure", - "members": { - "code": { - "target": "com.amazonaws.iot1clickprojects#Code", - "traits": { - "smithy.api#required": {} - } - }, - "message": { - "target": "com.amazonaws.iot1clickprojects#Message", - "traits": { - "smithy.api#required": {} - } - } - }, - "traits": { - "smithy.api#documentation": "

", - "smithy.api#error": "client", - "smithy.api#httpError": 400 - } - }, - "com.amazonaws.iot1clickprojects#ListPlacements": { - "type": "operation", - "input": { - "target": "com.amazonaws.iot1clickprojects#ListPlacementsRequest" - }, - "output": { - "target": "com.amazonaws.iot1clickprojects#ListPlacementsResponse" - }, - "errors": [ - { - "target": "com.amazonaws.iot1clickprojects#InternalFailureException" - }, - { - "target": "com.amazonaws.iot1clickprojects#InvalidRequestException" - }, - { - "target": "com.amazonaws.iot1clickprojects#ResourceNotFoundException" - } - ], - "traits": { - "smithy.api#documentation": "

Lists the placement(s) of a project.

", - "smithy.api#http": { - "method": "GET", - "uri": "/projects/{projectName}/placements", - "code": 200 - }, - "smithy.api#paginated": { - "inputToken": "nextToken", - "outputToken": "nextToken", - "items": "placements", - "pageSize": "maxResults" - } - } - }, - "com.amazonaws.iot1clickprojects#ListPlacementsRequest": { - "type": "structure", - "members": { - "projectName": { - "target": "com.amazonaws.iot1clickprojects#ProjectName", - "traits": { - "smithy.api#documentation": "

The project containing the placements to be listed.

", - "smithy.api#httpLabel": {}, - "smithy.api#required": {} - } - }, - "nextToken": { - "target": "com.amazonaws.iot1clickprojects#NextToken", - "traits": { - "smithy.api#documentation": "

The token to retrieve the next set of results.

", - "smithy.api#httpQuery": "nextToken" - } - }, - "maxResults": { - "target": "com.amazonaws.iot1clickprojects#MaxResults", - "traits": { - "smithy.api#documentation": "

The maximum number of results to return per request. If not set, a default value of 100 is\n used.

", - "smithy.api#httpQuery": "maxResults" - } - } - }, - "traits": { - "smithy.api#input": {} - } - }, - "com.amazonaws.iot1clickprojects#ListPlacementsResponse": { - "type": "structure", - "members": { - "placements": { - "target": "com.amazonaws.iot1clickprojects#PlacementSummaryList", - "traits": { - "smithy.api#documentation": "

An object listing the requested placements.

", - "smithy.api#required": {} - } - }, - "nextToken": { - "target": "com.amazonaws.iot1clickprojects#NextToken", - "traits": { - "smithy.api#documentation": "

The token used to retrieve the next set of results - will be effectively empty if there\n are no further results.

" - } - } - }, - "traits": { - "smithy.api#output": {} - } - }, - "com.amazonaws.iot1clickprojects#ListProjects": { - "type": "operation", - "input": { - "target": "com.amazonaws.iot1clickprojects#ListProjectsRequest" - }, - "output": { - "target": "com.amazonaws.iot1clickprojects#ListProjectsResponse" - }, - "errors": [ - { - "target": "com.amazonaws.iot1clickprojects#InternalFailureException" - }, - { - "target": "com.amazonaws.iot1clickprojects#InvalidRequestException" - } - ], - "traits": { - "smithy.api#documentation": "

Lists the AWS IoT 1-Click project(s) associated with your AWS account and region.

", - "smithy.api#http": { - "method": "GET", - "uri": "/projects", - "code": 200 - }, - "smithy.api#paginated": { - "inputToken": "nextToken", - "outputToken": "nextToken", - "items": "projects", - "pageSize": "maxResults" - } - } - }, - "com.amazonaws.iot1clickprojects#ListProjectsRequest": { - "type": "structure", - "members": { - "nextToken": { - "target": "com.amazonaws.iot1clickprojects#NextToken", - "traits": { - "smithy.api#documentation": "

The token to retrieve the next set of results.

", - "smithy.api#httpQuery": "nextToken" - } - }, - "maxResults": { - "target": "com.amazonaws.iot1clickprojects#MaxResults", - "traits": { - "smithy.api#documentation": "

The maximum number of results to return per request. If not set, a default value of 100 is\n used.

", - "smithy.api#httpQuery": "maxResults" - } - } - }, - "traits": { - "smithy.api#input": {} - } - }, - "com.amazonaws.iot1clickprojects#ListProjectsResponse": { - "type": "structure", - "members": { - "projects": { - "target": "com.amazonaws.iot1clickprojects#ProjectSummaryList", - "traits": { - "smithy.api#documentation": "

An object containing the list of projects.

", - "smithy.api#required": {} - } - }, - "nextToken": { - "target": "com.amazonaws.iot1clickprojects#NextToken", - "traits": { - "smithy.api#documentation": "

The token used to retrieve the next set of results - will be effectively empty if there\n are no further results.

" - } - } - }, - "traits": { - "smithy.api#output": {} - } - }, - "com.amazonaws.iot1clickprojects#ListTagsForResource": { - "type": "operation", - "input": { - "target": "com.amazonaws.iot1clickprojects#ListTagsForResourceRequest" - }, - "output": { - "target": "com.amazonaws.iot1clickprojects#ListTagsForResourceResponse" - }, - "errors": [ - { - "target": "com.amazonaws.iot1clickprojects#InternalFailureException" - }, - { - "target": "com.amazonaws.iot1clickprojects#InvalidRequestException" - }, - { - "target": "com.amazonaws.iot1clickprojects#ResourceNotFoundException" - } - ], - "traits": { - "smithy.api#documentation": "

Lists the tags (metadata key/value pairs) which you have assigned to the resource.

", - "smithy.api#http": { - "method": "GET", - "uri": "/tags/{resourceArn}", - "code": 200 - } - } - }, - "com.amazonaws.iot1clickprojects#ListTagsForResourceRequest": { - "type": "structure", - "members": { - "resourceArn": { - "target": "com.amazonaws.iot1clickprojects#ProjectArn", - "traits": { - "smithy.api#documentation": "

The ARN of the resource whose tags you want to list.

", - "smithy.api#httpLabel": {}, - "smithy.api#required": {} - } - } - }, - "traits": { - "smithy.api#input": {} - } - }, - "com.amazonaws.iot1clickprojects#ListTagsForResourceResponse": { - "type": "structure", - "members": { - "tags": { - "target": "com.amazonaws.iot1clickprojects#TagMap", - "traits": { - "smithy.api#documentation": "

The tags (metadata key/value pairs) which you have assigned to the resource.

" - } - } - }, - "traits": { - "smithy.api#output": {} - } - }, - "com.amazonaws.iot1clickprojects#MaxResults": { - "type": "integer", - "traits": { - "smithy.api#range": { - "min": 1, - "max": 250 - } - } - }, - "com.amazonaws.iot1clickprojects#Message": { - "type": "string" - }, - "com.amazonaws.iot1clickprojects#NextToken": { - "type": "string", - "traits": { - "smithy.api#length": { - "min": 1, - "max": 1024 - } - } - }, - "com.amazonaws.iot1clickprojects#PlacementAttributeMap": { - "type": "map", - "key": { - "target": "com.amazonaws.iot1clickprojects#AttributeName" - }, - "value": { - "target": "com.amazonaws.iot1clickprojects#AttributeValue" - } - }, - "com.amazonaws.iot1clickprojects#PlacementDescription": { - "type": "structure", - "members": { - "projectName": { - "target": "com.amazonaws.iot1clickprojects#ProjectName", - "traits": { - "smithy.api#documentation": "

The name of the project containing the placement.

", - "smithy.api#required": {} - } - }, - "placementName": { - "target": "com.amazonaws.iot1clickprojects#PlacementName", - "traits": { - "smithy.api#documentation": "

The name of the placement.

", - "smithy.api#required": {} - } - }, - "attributes": { - "target": "com.amazonaws.iot1clickprojects#PlacementAttributeMap", - "traits": { - "smithy.api#documentation": "

The user-defined attributes associated with the placement.

", - "smithy.api#required": {} - } - }, - "createdDate": { - "target": "com.amazonaws.iot1clickprojects#Time", - "traits": { - "smithy.api#documentation": "

The date when the placement was initially created, in UNIX epoch time format.

", - "smithy.api#required": {} - } - }, - "updatedDate": { - "target": "com.amazonaws.iot1clickprojects#Time", - "traits": { - "smithy.api#documentation": "

The date when the placement was last updated, in UNIX epoch time format. If the placement\n was not updated, then createdDate and updatedDate are the\n same.

", - "smithy.api#required": {} - } - } - }, - "traits": { - "smithy.api#documentation": "

An object describing a project's placement.

" - } - }, - "com.amazonaws.iot1clickprojects#PlacementName": { - "type": "string", - "traits": { - "smithy.api#length": { - "min": 1, - "max": 128 - }, - "smithy.api#pattern": "^[a-zA-Z0-9_-]+$" - } - }, - "com.amazonaws.iot1clickprojects#PlacementSummary": { - "type": "structure", - "members": { - "projectName": { - "target": "com.amazonaws.iot1clickprojects#ProjectName", - "traits": { - "smithy.api#documentation": "

The name of the project containing the placement.

", - "smithy.api#required": {} - } - }, - "placementName": { - "target": "com.amazonaws.iot1clickprojects#PlacementName", - "traits": { - "smithy.api#documentation": "

The name of the placement being summarized.

", - "smithy.api#required": {} - } - }, - "createdDate": { - "target": "com.amazonaws.iot1clickprojects#Time", - "traits": { - "smithy.api#documentation": "

The date when the placement was originally created, in UNIX epoch time format.

", - "smithy.api#required": {} - } - }, - "updatedDate": { - "target": "com.amazonaws.iot1clickprojects#Time", - "traits": { - "smithy.api#documentation": "

The date when the placement was last updated, in UNIX epoch time format. If the placement\n was not updated, then createdDate and updatedDate are the\n same.

", - "smithy.api#required": {} - } - } - }, - "traits": { - "smithy.api#documentation": "

An object providing summary information for a particular placement.

" - } - }, - "com.amazonaws.iot1clickprojects#PlacementSummaryList": { - "type": "list", - "member": { - "target": "com.amazonaws.iot1clickprojects#PlacementSummary" - } - }, - "com.amazonaws.iot1clickprojects#PlacementTemplate": { - "type": "structure", - "members": { - "defaultAttributes": { - "target": "com.amazonaws.iot1clickprojects#DefaultPlacementAttributeMap", - "traits": { - "smithy.api#documentation": "

The default attributes (key/value pairs) to be applied to all placements using this\n template.

" - } - }, - "deviceTemplates": { - "target": "com.amazonaws.iot1clickprojects#DeviceTemplateMap", - "traits": { - "smithy.api#documentation": "

An object specifying the DeviceTemplate for all placements using this\n (PlacementTemplate) template.

" - } - } - }, - "traits": { - "smithy.api#documentation": "

An object defining the template for a placement.

" - } - }, - "com.amazonaws.iot1clickprojects#ProjectArn": { - "type": "string", - "traits": { - "smithy.api#pattern": "^arn:aws:iot1click:[A-Za-z0-9_/.-]{0,63}:\\d+:projects/[0-9A-Za-z_-]{1,128}$" - } - }, - "com.amazonaws.iot1clickprojects#ProjectDescription": { - "type": "structure", - "members": { - "arn": { - "target": "com.amazonaws.iot1clickprojects#ProjectArn", - "traits": { - "smithy.api#documentation": "

The ARN of the project.

" - } - }, - "projectName": { - "target": "com.amazonaws.iot1clickprojects#ProjectName", - "traits": { - "smithy.api#documentation": "

The name of the project for which to obtain information from.

", - "smithy.api#required": {} - } - }, - "description": { - "target": "com.amazonaws.iot1clickprojects#Description", - "traits": { - "smithy.api#documentation": "

The description of the project.

" - } - }, - "createdDate": { - "target": "com.amazonaws.iot1clickprojects#Time", - "traits": { - "smithy.api#documentation": "

The date when the project was originally created, in UNIX epoch time format.

", - "smithy.api#required": {} - } - }, - "updatedDate": { - "target": "com.amazonaws.iot1clickprojects#Time", - "traits": { - "smithy.api#documentation": "

The date when the project was last updated, in UNIX epoch time format. If the project was\n not updated, then createdDate and updatedDate are the same.

", - "smithy.api#required": {} - } - }, - "placementTemplate": { - "target": "com.amazonaws.iot1clickprojects#PlacementTemplate", - "traits": { - "smithy.api#documentation": "

An object describing the project's placement specifications.

" - } - }, - "tags": { - "target": "com.amazonaws.iot1clickprojects#TagMap", - "traits": { - "smithy.api#documentation": "

The tags (metadata key/value pairs) associated with the project.

" - } - } - }, - "traits": { - "smithy.api#documentation": "

An object providing detailed information for a particular project associated with an AWS\n account and region.

" - } - }, - "com.amazonaws.iot1clickprojects#ProjectName": { - "type": "string", - "traits": { - "smithy.api#length": { - "min": 1, - "max": 128 - }, - "smithy.api#pattern": "^[0-9A-Za-z_-]+$" - } - }, - "com.amazonaws.iot1clickprojects#ProjectSummary": { - "type": "structure", - "members": { - "arn": { - "target": "com.amazonaws.iot1clickprojects#ProjectArn", - "traits": { - "smithy.api#documentation": "

The ARN of the project.

" - } - }, - "projectName": { - "target": "com.amazonaws.iot1clickprojects#ProjectName", - "traits": { - "smithy.api#documentation": "

The name of the project being summarized.

", - "smithy.api#required": {} - } - }, - "createdDate": { - "target": "com.amazonaws.iot1clickprojects#Time", - "traits": { - "smithy.api#documentation": "

The date when the project was originally created, in UNIX epoch time format.

", - "smithy.api#required": {} - } - }, - "updatedDate": { - "target": "com.amazonaws.iot1clickprojects#Time", - "traits": { - "smithy.api#documentation": "

The date when the project was last updated, in UNIX epoch time format. If the project was\n not updated, then createdDate and updatedDate are the same.

", - "smithy.api#required": {} - } - }, - "tags": { - "target": "com.amazonaws.iot1clickprojects#TagMap", - "traits": { - "smithy.api#documentation": "

The tags (metadata key/value pairs) associated with the project.

" - } - } - }, - "traits": { - "smithy.api#documentation": "

An object providing summary information for a particular project for an associated AWS\n account and region.

" - } - }, - "com.amazonaws.iot1clickprojects#ProjectSummaryList": { - "type": "list", - "member": { - "target": "com.amazonaws.iot1clickprojects#ProjectSummary" - } - }, - "com.amazonaws.iot1clickprojects#ResourceConflictException": { - "type": "structure", - "members": { - "code": { - "target": "com.amazonaws.iot1clickprojects#Code", - "traits": { - "smithy.api#required": {} - } - }, - "message": { - "target": "com.amazonaws.iot1clickprojects#Message", - "traits": { - "smithy.api#required": {} - } - } - }, - "traits": { - "smithy.api#documentation": "

", - "smithy.api#error": "client", - "smithy.api#httpError": 409 - } - }, - "com.amazonaws.iot1clickprojects#ResourceNotFoundException": { - "type": "structure", - "members": { - "code": { - "target": "com.amazonaws.iot1clickprojects#Code", - "traits": { - "smithy.api#required": {} - } - }, - "message": { - "target": "com.amazonaws.iot1clickprojects#Message", - "traits": { - "smithy.api#required": {} - } - } - }, - "traits": { - "smithy.api#documentation": "

", - "smithy.api#error": "client", - "smithy.api#httpError": 404 - } - }, - "com.amazonaws.iot1clickprojects#TagKey": { - "type": "string", - "traits": { - "smithy.api#length": { - "min": 1, - "max": 128 - }, - "smithy.api#pattern": "^(?!aws:)[a-zA-Z+-=._:/]+$" - } - }, - "com.amazonaws.iot1clickprojects#TagKeyList": { - "type": "list", - "member": { - "target": "com.amazonaws.iot1clickprojects#TagKey" - }, - "traits": { - "smithy.api#length": { - "min": 1, - "max": 50 - } - } - }, - "com.amazonaws.iot1clickprojects#TagMap": { - "type": "map", - "key": { - "target": "com.amazonaws.iot1clickprojects#TagKey" - }, - "value": { - "target": "com.amazonaws.iot1clickprojects#TagValue" - }, - "traits": { - "smithy.api#length": { - "min": 1, - "max": 50 - } - } - }, - "com.amazonaws.iot1clickprojects#TagResource": { - "type": "operation", - "input": { - "target": "com.amazonaws.iot1clickprojects#TagResourceRequest" - }, - "output": { - "target": "com.amazonaws.iot1clickprojects#TagResourceResponse" - }, - "errors": [ - { - "target": "com.amazonaws.iot1clickprojects#InternalFailureException" - }, - { - "target": "com.amazonaws.iot1clickprojects#InvalidRequestException" - }, - { - "target": "com.amazonaws.iot1clickprojects#ResourceNotFoundException" - } - ], - "traits": { - "smithy.api#documentation": "

Creates or modifies tags for a resource. Tags are key/value pairs (metadata) that can be\n used to manage a resource. For more information, see AWS Tagging\n Strategies.

", - "smithy.api#http": { - "method": "POST", - "uri": "/tags/{resourceArn}", - "code": 200 - } - } - }, - "com.amazonaws.iot1clickprojects#TagResourceRequest": { - "type": "structure", - "members": { - "resourceArn": { - "target": "com.amazonaws.iot1clickprojects#ProjectArn", - "traits": { - "smithy.api#documentation": "

The ARN of the resouce for which tag(s) should be added or modified.

", - "smithy.api#httpLabel": {}, - "smithy.api#required": {} - } - }, - "tags": { - "target": "com.amazonaws.iot1clickprojects#TagMap", - "traits": { - "smithy.api#documentation": "

The new or modifying tag(s) for the resource. See AWS IoT 1-Click Service Limits for the maximum number of tags allowed per\n resource.

", - "smithy.api#required": {} - } - } - }, - "traits": { - "smithy.api#input": {} - } - }, - "com.amazonaws.iot1clickprojects#TagResourceResponse": { - "type": "structure", - "members": {}, - "traits": { - "smithy.api#output": {} - } - }, - "com.amazonaws.iot1clickprojects#TagValue": { - "type": "string", - "traits": { - "smithy.api#length": { - "min": 0, - "max": 256 - } - } - }, - "com.amazonaws.iot1clickprojects#Time": { - "type": "timestamp" - }, - "com.amazonaws.iot1clickprojects#TooManyRequestsException": { - "type": "structure", - "members": { - "code": { - "target": "com.amazonaws.iot1clickprojects#Code", - "traits": { - "smithy.api#required": {} - } - }, - "message": { - "target": "com.amazonaws.iot1clickprojects#Message", - "traits": { - "smithy.api#required": {} - } - } - }, - "traits": { - "smithy.api#documentation": "

", - "smithy.api#error": "client", - "smithy.api#httpError": 429 - } - }, - "com.amazonaws.iot1clickprojects#UntagResource": { - "type": "operation", - "input": { - "target": "com.amazonaws.iot1clickprojects#UntagResourceRequest" - }, - "output": { - "target": "com.amazonaws.iot1clickprojects#UntagResourceResponse" - }, - "errors": [ - { - "target": "com.amazonaws.iot1clickprojects#InternalFailureException" - }, - { - "target": "com.amazonaws.iot1clickprojects#InvalidRequestException" - }, - { - "target": "com.amazonaws.iot1clickprojects#ResourceNotFoundException" - } - ], - "traits": { - "smithy.api#documentation": "

Removes one or more tags (metadata key/value pairs) from a resource.

", - "smithy.api#http": { - "method": "DELETE", - "uri": "/tags/{resourceArn}", - "code": 200 - } - } - }, - "com.amazonaws.iot1clickprojects#UntagResourceRequest": { - "type": "structure", - "members": { - "resourceArn": { - "target": "com.amazonaws.iot1clickprojects#ProjectArn", - "traits": { - "smithy.api#documentation": "

The ARN of the resource whose tag you want to remove.

", - "smithy.api#httpLabel": {}, - "smithy.api#required": {} - } - }, - "tagKeys": { - "target": "com.amazonaws.iot1clickprojects#TagKeyList", - "traits": { - "smithy.api#documentation": "

The keys of those tags which you want to remove.

", - "smithy.api#httpQuery": "tagKeys", - "smithy.api#required": {} - } - } - }, - "traits": { - "smithy.api#input": {} - } - }, - "com.amazonaws.iot1clickprojects#UntagResourceResponse": { - "type": "structure", - "members": {}, - "traits": { - "smithy.api#output": {} - } - }, - "com.amazonaws.iot1clickprojects#UpdatePlacement": { - "type": "operation", - "input": { - "target": "com.amazonaws.iot1clickprojects#UpdatePlacementRequest" - }, - "output": { - "target": "com.amazonaws.iot1clickprojects#UpdatePlacementResponse" - }, - "errors": [ - { - "target": "com.amazonaws.iot1clickprojects#InternalFailureException" - }, - { - "target": "com.amazonaws.iot1clickprojects#InvalidRequestException" - }, - { - "target": "com.amazonaws.iot1clickprojects#ResourceNotFoundException" - }, - { - "target": "com.amazonaws.iot1clickprojects#TooManyRequestsException" - } - ], - "traits": { - "smithy.api#documentation": "

Updates a placement with the given attributes. To clear an attribute, pass an empty value\n (i.e., \"\").

", - "smithy.api#http": { - "method": "PUT", - "uri": "/projects/{projectName}/placements/{placementName}", - "code": 200 - } - } - }, - "com.amazonaws.iot1clickprojects#UpdatePlacementRequest": { - "type": "structure", - "members": { - "placementName": { - "target": "com.amazonaws.iot1clickprojects#PlacementName", - "traits": { - "smithy.api#documentation": "

The name of the placement to update.

", - "smithy.api#httpLabel": {}, - "smithy.api#required": {} - } - }, - "projectName": { - "target": "com.amazonaws.iot1clickprojects#ProjectName", - "traits": { - "smithy.api#documentation": "

The name of the project containing the placement to be updated.

", - "smithy.api#httpLabel": {}, - "smithy.api#required": {} - } - }, - "attributes": { - "target": "com.amazonaws.iot1clickprojects#PlacementAttributeMap", - "traits": { - "smithy.api#documentation": "

The user-defined object of attributes used to update the placement. The maximum number of\n key/value pairs is 50.

" - } - } - }, - "traits": { - "smithy.api#input": {} - } - }, - "com.amazonaws.iot1clickprojects#UpdatePlacementResponse": { - "type": "structure", - "members": {}, - "traits": { - "smithy.api#output": {} - } - }, - "com.amazonaws.iot1clickprojects#UpdateProject": { - "type": "operation", - "input": { - "target": "com.amazonaws.iot1clickprojects#UpdateProjectRequest" - }, - "output": { - "target": "com.amazonaws.iot1clickprojects#UpdateProjectResponse" - }, - "errors": [ - { - "target": "com.amazonaws.iot1clickprojects#InternalFailureException" - }, - { - "target": "com.amazonaws.iot1clickprojects#InvalidRequestException" - }, - { - "target": "com.amazonaws.iot1clickprojects#ResourceNotFoundException" - }, - { - "target": "com.amazonaws.iot1clickprojects#TooManyRequestsException" - } - ], - "traits": { - "smithy.api#documentation": "

Updates a project associated with your AWS account and region. With the exception of\n device template names, you can pass just the values that need to be updated because the update\n request will change only the values that are provided. To clear a value, pass the empty string\n (i.e., \"\").

", - "smithy.api#http": { - "method": "PUT", - "uri": "/projects/{projectName}", - "code": 200 - } - } - }, - "com.amazonaws.iot1clickprojects#UpdateProjectRequest": { - "type": "structure", - "members": { - "projectName": { - "target": "com.amazonaws.iot1clickprojects#ProjectName", - "traits": { - "smithy.api#documentation": "

The name of the project to be updated.

", - "smithy.api#httpLabel": {}, - "smithy.api#required": {} - } - }, - "description": { - "target": "com.amazonaws.iot1clickprojects#Description", - "traits": { - "smithy.api#documentation": "

An optional user-defined description for the project.

" - } - }, - "placementTemplate": { - "target": "com.amazonaws.iot1clickprojects#PlacementTemplate", - "traits": { - "smithy.api#documentation": "

An object defining the project update. Once a project has been created, you cannot add\n device template names to the project. However, for a given placementTemplate, you\n can update the associated callbackOverrides for the device definition using this\n API.

" - } - } - }, - "traits": { - "smithy.api#input": {} - } - }, - "com.amazonaws.iot1clickprojects#UpdateProjectResponse": { - "type": "structure", - "members": {}, - "traits": { - "smithy.api#output": {} - } - } - } -} diff --git a/models/iot.json b/models/iot.json index 5c67493490..a9c2ad09c8 100644 --- a/models/iot.json +++ b/models/iot.json @@ -468,6 +468,9 @@ { "target": "com.amazonaws.iot#GetStatistics" }, + { + "target": "com.amazonaws.iot#GetThingConnectivityData" + }, { "target": "com.amazonaws.iot#GetTopicRule" }, @@ -6890,6 +6893,17 @@ "smithy.api#pattern": "^[a-zA-Z0-9:.]+$" } }, + "com.amazonaws.iot#ConnectivityApiThingName": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 128 + }, + "smithy.api#pattern": "^[a-zA-Z0-9:_-]+$", + "smithy.api#sensitive": {} + } + }, "com.amazonaws.iot#ConnectivityTimestamp": { "type": "long" }, @@ -7492,7 +7506,7 @@ "roleArn": { "target": "com.amazonaws.iot#RoleArn", "traits": { - "smithy.api#documentation": "

The IAM role that allows access to create the command.

" + "smithy.api#documentation": "

The IAM role that you must provide when using the AWS-IoT-FleetWise namespace.\n The role grants IoT Device Management the permission to access IoT FleetWise resources \n for generating the payload for the command. This field is not required when you use the\n AWS-IoT namespace.

" } }, "tags": { @@ -16802,6 +16816,95 @@ "com.amazonaws.iot#DisconnectReason": { "type": "string" }, + "com.amazonaws.iot#DisconnectReasonValue": { + "type": "enum", + "members": { + "AUTH_ERROR": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "AUTH_ERROR" + } + }, + "CLIENT_INITIATED_DISCONNECT": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "CLIENT_INITIATED_DISCONNECT" + } + }, + "CLIENT_ERROR": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "CLIENT_ERROR" + } + }, + "CONNECTION_LOST": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "CONNECTION_LOST" + } + }, + "DUPLICATE_CLIENTID": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DUPLICATE_CLIENTID" + } + }, + "FORBIDDEN_ACCESS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "FORBIDDEN_ACCESS" + } + }, + "MQTT_KEEP_ALIVE_TIMEOUT": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "MQTT_KEEP_ALIVE_TIMEOUT" + } + }, + "SERVER_ERROR": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SERVER_ERROR" + } + }, + "SERVER_INITIATED_DISCONNECT": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SERVER_INITIATED_DISCONNECT" + } + }, + "THROTTLED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "THROTTLED" + } + }, + "WEBSOCKET_TTL_EXPIRATION": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "WEBSOCKET_TTL_EXPIRATION" + } + }, + "CUSTOMAUTH_TTL_EXPIRATION": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "CUSTOMAUTH_TTL_EXPIRATION" + } + }, + "UNKNOWN": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "UNKNOWN" + } + }, + "NONE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "NONE" + } + } + } + }, "com.amazonaws.iot#DisplayName": { "type": "string", "traits": { @@ -18414,7 +18517,7 @@ "timeToLive": { "target": "com.amazonaws.iot#DateType", "traits": { - "smithy.api#documentation": "

The time to live (TTL) parameter for the GetCommandExecution API.

" + "smithy.api#documentation": "

The time to live (TTL) parameter that indicates the duration for which executions will\n be retained in your account. The default value is six months.

" } } }, @@ -18486,7 +18589,7 @@ "roleArn": { "target": "com.amazonaws.iot#RoleArn", "traits": { - "smithy.api#documentation": "

The IAM role that allows access to retrieve information about the command.

" + "smithy.api#documentation": "

The IAM role that you provided when creating the command with AWS-IoT-FleetWise\n as the namespace.

" } }, "createdAt": { @@ -19605,6 +19708,94 @@ "smithy.api#output": {} } }, + "com.amazonaws.iot#GetThingConnectivityData": { + "type": "operation", + "input": { + "target": "com.amazonaws.iot#GetThingConnectivityDataRequest" + }, + "output": { + "target": "com.amazonaws.iot#GetThingConnectivityDataResponse" + }, + "errors": [ + { + "target": "com.amazonaws.iot#IndexNotReadyException" + }, + { + "target": "com.amazonaws.iot#InternalFailureException" + }, + { + "target": "com.amazonaws.iot#InvalidRequestException" + }, + { + "target": "com.amazonaws.iot#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.iot#ServiceUnavailableException" + }, + { + "target": "com.amazonaws.iot#ThrottlingException" + }, + { + "target": "com.amazonaws.iot#UnauthorizedException" + } + ], + "traits": { + "smithy.api#documentation": "

Retrieves the live connectivity status per device.

", + "smithy.api#http": { + "method": "POST", + "uri": "/things/{thingName}/connectivity-data", + "code": 200 + } + } + }, + "com.amazonaws.iot#GetThingConnectivityDataRequest": { + "type": "structure", + "members": { + "thingName": { + "target": "com.amazonaws.iot#ConnectivityApiThingName", + "traits": { + "smithy.api#documentation": "

The name of your IoT thing.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.iot#GetThingConnectivityDataResponse": { + "type": "structure", + "members": { + "thingName": { + "target": "com.amazonaws.iot#ConnectivityApiThingName", + "traits": { + "smithy.api#documentation": "

The name of your IoT thing.

" + } + }, + "connected": { + "target": "com.amazonaws.iot#Boolean", + "traits": { + "smithy.api#documentation": "

A Boolean that indicates the connectivity status.

" + } + }, + "timestamp": { + "target": "com.amazonaws.iot#Timestamp", + "traits": { + "smithy.api#documentation": "

The timestamp of when the event occurred.

" + } + }, + "disconnectReason": { + "target": "com.amazonaws.iot#DisconnectReasonValue", + "traits": { + "smithy.api#documentation": "

The reason why the client is disconnecting.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.iot#GetTopicRule": { "type": "operation", "input": { @@ -22712,7 +22903,7 @@ } ], "traits": { - "smithy.api#documentation": "

List all command executions.

\n \n

You must provide only the\n startedTimeFilter or the completedTimeFilter information. If you \n provide both time filters, the API will generate an error.\n You can use this information to find command executions that started within\n a specific timeframe.

\n
", + "smithy.api#documentation": "

List all command executions.

\n \n
    \n
  • \n

    You must provide only the startedTimeFilter or \n the completedTimeFilter information. If you provide \n both time filters, the API will generate an error. You can use \n this information to retrieve a list of command executions \n within a specific timeframe.

    \n
  • \n
  • \n

    You must provide only the commandArn or \n the thingArn information depending on whether you want\n to list executions for a specific command or an IoT thing. If you provide \n both fields, the API will generate an error.

    \n
  • \n
\n

For more information about considerations for using this API, see\n List\n command executions in your account (CLI).

\n
", "smithy.api#http": { "method": "POST", "uri": "/command-executions", diff --git a/models/iotsecuretunneling.json b/models/iotsecuretunneling.json index b6bf5c9981..2fd0ad123a 100644 --- a/models/iotsecuretunneling.json +++ b/models/iotsecuretunneling.json @@ -459,6 +459,81 @@ } ], "rules": [ + { + "conditions": [ + { + "fn": "stringEquals", + "argv": [ + "aws", + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "name" + ] + } + ] + } + ], + "endpoint": { + "url": "https://api.iot-tunneling-fips.{Region}.api.aws", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [ + { + "fn": "stringEquals", + "argv": [ + "aws-cn", + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "name" + ] + } + ] + } + ], + "endpoint": { + "url": "https://api.iot-tunneling-fips.{Region}.api.amazonwebservices.com.cn", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [ + { + "fn": "stringEquals", + "argv": [ + "aws-us-gov", + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "name" + ] + } + ] + } + ], + "endpoint": { + "url": "https://api.iot-tunneling-fips.{Region}.api.aws", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + }, { "conditions": [], "endpoint": { @@ -563,6 +638,81 @@ } ], "rules": [ + { + "conditions": [ + { + "fn": "stringEquals", + "argv": [ + "aws", + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "name" + ] + } + ] + } + ], + "endpoint": { + "url": "https://api.iot-tunneling.{Region}.api.aws", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [ + { + "fn": "stringEquals", + "argv": [ + "aws-cn", + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "name" + ] + } + ] + } + ], + "endpoint": { + "url": "https://api.iot-tunneling.{Region}.api.amazonwebservices.com.cn", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [ + { + "fn": "stringEquals", + "argv": [ + "aws-us-gov", + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "name" + ] + } + ] + } + ], + "endpoint": { + "url": "https://api.iot-tunneling.{Region}.api.aws", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + }, { "conditions": [], "endpoint": { @@ -620,6 +770,19 @@ "UseDualStack": false } }, + { + "documentation": "For region ap-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://api.iot-tunneling.ap-east-1.api.aws" + } + }, + "params": { + "Region": "ap-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, { "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack disabled", "expect": { @@ -633,6 +796,19 @@ "UseDualStack": false } }, + { + "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://api.iot-tunneling.ap-northeast-1.api.aws" + } + }, + "params": { + "Region": "ap-northeast-1", + "UseFIPS": false, + "UseDualStack": true + } + }, { "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack disabled", "expect": { @@ -646,6 +822,19 @@ "UseDualStack": false } }, + { + "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://api.iot-tunneling.ap-northeast-2.api.aws" + } + }, + "params": { + "Region": "ap-northeast-2", + "UseFIPS": false, + "UseDualStack": true + } + }, { "documentation": "For region ap-south-1 with FIPS disabled and DualStack disabled", "expect": { @@ -659,6 +848,19 @@ "UseDualStack": false } }, + { + "documentation": "For region ap-south-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://api.iot-tunneling.ap-south-1.api.aws" + } + }, + "params": { + "Region": "ap-south-1", + "UseFIPS": false, + "UseDualStack": true + } + }, { "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack disabled", "expect": { @@ -672,6 +874,19 @@ "UseDualStack": false } }, + { + "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://api.iot-tunneling.ap-southeast-1.api.aws" + } + }, + "params": { + "Region": "ap-southeast-1", + "UseFIPS": false, + "UseDualStack": true + } + }, { "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack disabled", "expect": { @@ -685,6 +900,19 @@ "UseDualStack": false } }, + { + "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://api.iot-tunneling.ap-southeast-2.api.aws" + } + }, + "params": { + "Region": "ap-southeast-2", + "UseFIPS": false, + "UseDualStack": true + } + }, { "documentation": "For region ca-central-1 with FIPS disabled and DualStack disabled", "expect": { @@ -711,6 +939,32 @@ "UseDualStack": false } }, + { + "documentation": "For region ca-central-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://api.iot-tunneling.ca-central-1.api.aws" + } + }, + "params": { + "Region": "ca-central-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region ca-central-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://api.iot-tunneling-fips.ca-central-1.api.aws" + } + }, + "params": { + "Region": "ca-central-1", + "UseFIPS": true, + "UseDualStack": true + } + }, { "documentation": "For region eu-central-1 with FIPS disabled and DualStack disabled", "expect": { @@ -724,6 +978,19 @@ "UseDualStack": false } }, + { + "documentation": "For region eu-central-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://api.iot-tunneling.eu-central-1.api.aws" + } + }, + "params": { + "Region": "eu-central-1", + "UseFIPS": false, + "UseDualStack": true + } + }, { "documentation": "For region eu-north-1 with FIPS disabled and DualStack disabled", "expect": { @@ -737,6 +1004,19 @@ "UseDualStack": false } }, + { + "documentation": "For region eu-north-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://api.iot-tunneling.eu-north-1.api.aws" + } + }, + "params": { + "Region": "eu-north-1", + "UseFIPS": false, + "UseDualStack": true + } + }, { "documentation": "For region eu-west-1 with FIPS disabled and DualStack disabled", "expect": { @@ -750,6 +1030,19 @@ "UseDualStack": false } }, + { + "documentation": "For region eu-west-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://api.iot-tunneling.eu-west-1.api.aws" + } + }, + "params": { + "Region": "eu-west-1", + "UseFIPS": false, + "UseDualStack": true + } + }, { "documentation": "For region eu-west-2 with FIPS disabled and DualStack disabled", "expect": { @@ -763,6 +1056,19 @@ "UseDualStack": false } }, + { + "documentation": "For region eu-west-2 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://api.iot-tunneling.eu-west-2.api.aws" + } + }, + "params": { + "Region": "eu-west-2", + "UseFIPS": false, + "UseDualStack": true + } + }, { "documentation": "For region eu-west-3 with FIPS disabled and DualStack disabled", "expect": { @@ -776,6 +1082,45 @@ "UseDualStack": false } }, + { + "documentation": "For region eu-west-3 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://api.iot-tunneling.eu-west-3.api.aws" + } + }, + "params": { + "Region": "eu-west-3", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region me-central-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://api.tunneling.iot.me-central-1.amazonaws.com" + } + }, + "params": { + "Region": "me-central-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region me-central-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://api.iot-tunneling.me-central-1.api.aws" + } + }, + "params": { + "Region": "me-central-1", + "UseFIPS": false, + "UseDualStack": true + } + }, { "documentation": "For region me-south-1 with FIPS disabled and DualStack disabled", "expect": { @@ -789,6 +1134,19 @@ "UseDualStack": false } }, + { + "documentation": "For region me-south-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://api.iot-tunneling.me-south-1.api.aws" + } + }, + "params": { + "Region": "me-south-1", + "UseFIPS": false, + "UseDualStack": true + } + }, { "documentation": "For region sa-east-1 with FIPS disabled and DualStack disabled", "expect": { @@ -802,6 +1160,19 @@ "UseDualStack": false } }, + { + "documentation": "For region sa-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://api.iot-tunneling.sa-east-1.api.aws" + } + }, + "params": { + "Region": "sa-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, { "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", "expect": { @@ -828,6 +1199,32 @@ "UseDualStack": false } }, + { + "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://api.iot-tunneling.us-east-1.api.aws" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://api.iot-tunneling-fips.us-east-1.api.aws" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, { "documentation": "For region us-east-2 with FIPS disabled and DualStack disabled", "expect": { @@ -854,6 +1251,32 @@ "UseDualStack": false } }, + { + "documentation": "For region us-east-2 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://api.iot-tunneling.us-east-2.api.aws" + } + }, + "params": { + "Region": "us-east-2", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-east-2 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://api.iot-tunneling-fips.us-east-2.api.aws" + } + }, + "params": { + "Region": "us-east-2", + "UseFIPS": true, + "UseDualStack": true + } + }, { "documentation": "For region us-west-1 with FIPS disabled and DualStack disabled", "expect": { @@ -880,6 +1303,32 @@ "UseDualStack": false } }, + { + "documentation": "For region us-west-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://api.iot-tunneling.us-west-1.api.aws" + } + }, + "params": { + "Region": "us-west-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-west-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://api.iot-tunneling-fips.us-west-1.api.aws" + } + }, + "params": { + "Region": "us-west-1", + "UseFIPS": true, + "UseDualStack": true + } + }, { "documentation": "For region us-west-2 with FIPS disabled and DualStack disabled", "expect": { @@ -907,28 +1356,28 @@ } }, { - "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-west-2 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://api.tunneling.iot-fips.us-east-1.api.aws" + "url": "https://api.iot-tunneling.us-west-2.api.aws" } }, "params": { - "Region": "us-east-1", - "UseFIPS": true, + "Region": "us-west-2", + "UseFIPS": false, "UseDualStack": true } }, { - "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-west-2 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://api.tunneling.iot.us-east-1.api.aws" + "url": "https://api.iot-tunneling-fips.us-west-2.api.aws" } }, "params": { - "Region": "us-east-1", - "UseFIPS": false, + "Region": "us-west-2", + "UseFIPS": true, "UseDualStack": true } }, @@ -945,6 +1394,19 @@ "UseDualStack": false } }, + { + "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://api.iot-tunneling.cn-north-1.api.amazonwebservices.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": true + } + }, { "documentation": "For region cn-northwest-1 with FIPS disabled and DualStack disabled", "expect": { @@ -959,42 +1421,42 @@ } }, { - "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", + "documentation": "For region cn-northwest-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://api.tunneling.iot-fips.cn-north-1.api.amazonwebservices.com.cn" + "url": "https://api.iot-tunneling.cn-northwest-1.api.amazonwebservices.com.cn" } }, "params": { - "Region": "cn-north-1", - "UseFIPS": true, + "Region": "cn-northwest-1", + "UseFIPS": false, "UseDualStack": true } }, { - "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", + "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://api.tunneling.iot-fips.cn-north-1.amazonaws.com.cn" + "url": "https://api.iot-tunneling-fips.cn-north-1.api.amazonwebservices.com.cn" } }, "params": { "Region": "cn-north-1", "UseFIPS": true, - "UseDualStack": false + "UseDualStack": true } }, { - "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", + "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://api.tunneling.iot.cn-north-1.api.amazonwebservices.com.cn" + "url": "https://api.tunneling.iot-fips.cn-north-1.amazonaws.com.cn" } }, "params": { "Region": "cn-north-1", - "UseFIPS": false, - "UseDualStack": true + "UseFIPS": true, + "UseDualStack": false } }, { @@ -1023,6 +1485,32 @@ "UseDualStack": false } }, + { + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://api.iot-tunneling.us-gov-east-1.api.aws" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://api.iot-tunneling-fips.us-gov-east-1.api.aws" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, { "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack disabled", "expect": { @@ -1050,40 +1538,42 @@ } }, { - "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://api.tunneling.iot-fips.us-gov-east-1.api.aws" + "url": "https://api.iot-tunneling.us-gov-west-1.api.aws" } }, "params": { - "Region": "us-gov-east-1", - "UseFIPS": true, + "Region": "us-gov-west-1", + "UseFIPS": false, "UseDualStack": true } }, { - "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://api.tunneling.iot.us-gov-east-1.api.aws" + "url": "https://api.iot-tunneling-fips.us-gov-west-1.api.aws" } }, "params": { - "Region": "us-gov-east-1", - "UseFIPS": false, + "Region": "us-gov-west-1", + "UseFIPS": true, "UseDualStack": true } }, { - "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", "expect": { - "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + "endpoint": { + "url": "https://api.tunneling.iot.us-iso-east-1.c2s.ic.gov" + } }, "params": { "Region": "us-iso-east-1", - "UseFIPS": true, - "UseDualStack": true + "UseFIPS": false, + "UseDualStack": false } }, { @@ -1100,38 +1590,38 @@ } }, { - "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", "expect": { - "error": "DualStack is enabled but this partition does not support DualStack" + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { "Region": "us-iso-east-1", - "UseFIPS": false, + "UseFIPS": true, "UseDualStack": true } }, { - "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://api.tunneling.iot.us-iso-east-1.c2s.ic.gov" - } + "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { "Region": "us-iso-east-1", "UseFIPS": false, - "UseDualStack": false + "UseDualStack": true } }, { - "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", "expect": { - "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + "endpoint": { + "url": "https://api.tunneling.iot.us-isob-east-1.sc2s.sgov.gov" + } }, "params": { "Region": "us-isob-east-1", - "UseFIPS": true, - "UseDualStack": true + "UseFIPS": false, + "UseDualStack": false } }, { @@ -1148,27 +1638,25 @@ } }, { - "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", "expect": { - "error": "DualStack is enabled but this partition does not support DualStack" + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { "Region": "us-isob-east-1", - "UseFIPS": false, + "UseFIPS": true, "UseDualStack": true } }, { - "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://api.tunneling.iot.us-isob-east-1.sc2s.sgov.gov" - } + "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { "Region": "us-isob-east-1", "UseFIPS": false, - "UseDualStack": false + "UseDualStack": true } }, { diff --git a/models/iotsitewise.json b/models/iotsitewise.json index cdfbb572c3..0adcbb1c46 100644 --- a/models/iotsitewise.json +++ b/models/iotsitewise.json @@ -4293,6 +4293,12 @@ "com.amazonaws.iotsitewise#BatchPutAssetPropertyValueRequest": { "type": "structure", "members": { + "enablePartialEntryProcessing": { + "target": "com.amazonaws.iotsitewise#BooleanValue", + "traits": { + "smithy.api#documentation": "

This setting enables partial ingestion at entry-level. If set to true, we ingest all TQVs not resulting in an error. If set to \n false, an invalid TQV fails ingestion of the entire entry that contains it.

" + } + }, "entries": { "target": "com.amazonaws.iotsitewise#PutAssetPropertyValueEntries", "traits": { @@ -4320,6 +4326,9 @@ "smithy.api#output": {} } }, + "com.amazonaws.iotsitewise#BooleanValue": { + "type": "boolean" + }, "com.amazonaws.iotsitewise#Bucket": { "type": "string", "traits": { @@ -9183,6 +9192,12 @@ "traits": { "smithy.api#documentation": "

Set this period to specify how long your data is stored in the warm tier before it is deleted. You can set this only if cold tier is enabled.

" } + }, + "disallowIngestNullNaN": { + "target": "com.amazonaws.iotsitewise#DisallowIngestNullNaN", + "traits": { + "smithy.api#documentation": "

Describes the configuration for ingesting NULL and NaN data. \n By default the feature is allowed. The feature is disallowed if the value is true.

" + } } }, "traits": { @@ -9377,6 +9392,9 @@ "target": "com.amazonaws.iotsitewise#DetailedError" } }, + "com.amazonaws.iotsitewise#DisallowIngestNullNaN": { + "type": "boolean" + }, "com.amazonaws.iotsitewise#DisassociateAssets": { "type": "operation", "input": { @@ -13454,12 +13472,7 @@ "type": "boolean" }, "com.amazonaws.iotsitewise#NumberOfDays": { - "type": "integer", - "traits": { - "smithy.api#range": { - "min": 30 - } - } + "type": "integer" }, "com.amazonaws.iotsitewise#Offset": { "type": "string", @@ -14009,6 +14022,21 @@ "com.amazonaws.iotsitewise#PropertyValueIntegerValue": { "type": "integer" }, + "com.amazonaws.iotsitewise#PropertyValueNullValue": { + "type": "structure", + "members": { + "valueType": { + "target": "com.amazonaws.iotsitewise#RawValueType", + "traits": { + "smithy.api#documentation": "

The type of null asset property data.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The value type of null asset property data with BAD and UNCERTAIN qualities.

" + } + }, "com.amazonaws.iotsitewise#PropertyValueStringValue": { "type": "string" }, @@ -14282,6 +14310,12 @@ "traits": { "smithy.api#documentation": "

Set this period to specify how long your data is stored in the warm tier before it is deleted. You can set this only if cold tier is enabled.

" } + }, + "disallowIngestNullNaN": { + "target": "com.amazonaws.iotsitewise#DisallowIngestNullNaN", + "traits": { + "smithy.api#documentation": "

Describes the configuration for ingesting NULL and NaN data. \n By default the feature is allowed. The feature is disallowed if the value is true.

" + } } }, "traits": { @@ -14330,6 +14364,12 @@ "traits": { "smithy.api#documentation": "

Set this period to specify how long your data is stored in the warm tier before it is deleted. You can set this only if cold tier is enabled.

" } + }, + "disallowIngestNullNaN": { + "target": "com.amazonaws.iotsitewise#DisallowIngestNullNaN", + "traits": { + "smithy.api#documentation": "

Describes the configuration for ingesting NULL and NaN data. \n By default the feature is allowed. The feature is disallowed if the value is true.

" + } } }, "traits": { @@ -14393,6 +14433,41 @@ "smithy.api#httpError": 400 } }, + "com.amazonaws.iotsitewise#RawValueType": { + "type": "enum", + "members": { + "DOUBLE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "D" + } + }, + "BOOLEAN": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "B" + } + }, + "STRING": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "S" + } + }, + "INTEGER": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "I" + } + }, + "UNKNOWN": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "U" + } + } + } + }, "com.amazonaws.iotsitewise#Reference": { "type": "structure", "members": { @@ -16455,7 +16530,7 @@ "stringValue": { "target": "com.amazonaws.iotsitewise#PropertyValueStringValue", "traits": { - "smithy.api#documentation": "

Asset property data of type string (sequence of characters).

" + "smithy.api#documentation": "

\n Asset property data of type string (sequence of characters).\n The allowed pattern: \"^$|[^\\u0000-\\u001F\\u007F]+\". The max length is 1024.\n

" } }, "integerValue": { @@ -16467,7 +16542,7 @@ "doubleValue": { "target": "com.amazonaws.iotsitewise#PropertyValueDoubleValue", "traits": { - "smithy.api#documentation": "

Asset property data of type double (floating point number).

" + "smithy.api#documentation": "

\n Asset property data of type double (floating point number). The min value is -10^10. \n The max value is 10^10. Double.NaN is allowed.\n

" } }, "booleanValue": { @@ -16475,6 +16550,12 @@ "traits": { "smithy.api#documentation": "

Asset property data of type Boolean (true or false).

" } + }, + "nullValue": { + "target": "com.amazonaws.iotsitewise#PropertyValueNullValue", + "traits": { + "smithy.api#documentation": "

The type of null asset property data with BAD and UNCERTAIN qualities.

" + } } }, "traits": { diff --git a/models/kafkaconnect.json b/models/kafkaconnect.json index de1e5fec32..2d3b20d2c5 100644 --- a/models/kafkaconnect.json +++ b/models/kafkaconnect.json @@ -47,7 +47,7 @@ "type": "structure", "members": { "maxWorkerCount": { - "target": "com.amazonaws.kafkaconnect#__integerMin1Max10", + "target": "com.amazonaws.kafkaconnect#__integer", "traits": { "smithy.api#default": 0, "smithy.api#documentation": "

The maximum number of workers allocated to the connector.

", @@ -63,7 +63,7 @@ } }, "minWorkerCount": { - "target": "com.amazonaws.kafkaconnect#__integerMin1Max10", + "target": "com.amazonaws.kafkaconnect#__integer", "traits": { "smithy.api#default": 0, "smithy.api#documentation": "

The minimum number of workers allocated to the connector.

", @@ -132,7 +132,7 @@ "type": "structure", "members": { "maxWorkerCount": { - "target": "com.amazonaws.kafkaconnect#__integerMin1Max10", + "target": "com.amazonaws.kafkaconnect#__integer", "traits": { "smithy.api#default": 0, "smithy.api#documentation": "

The target maximum number of workers allocated to the connector.

", @@ -148,7 +148,7 @@ } }, "minWorkerCount": { - "target": "com.amazonaws.kafkaconnect#__integerMin1Max10", + "target": "com.amazonaws.kafkaconnect#__integer", "traits": { "smithy.api#default": 0, "smithy.api#documentation": "

The target minimum number of workers allocated to the connector.

", @@ -303,6 +303,200 @@ "smithy.api#httpError": 409 } }, + "com.amazonaws.kafkaconnect#ConnectorConfiguration": { + "type": "map", + "key": { + "target": "com.amazonaws.kafkaconnect#__string" + }, + "value": { + "target": "com.amazonaws.kafkaconnect#__string" + }, + "traits": { + "smithy.api#sensitive": {} + } + }, + "com.amazonaws.kafkaconnect#ConnectorConfigurationUpdate": { + "type": "map", + "key": { + "target": "com.amazonaws.kafkaconnect#__string" + }, + "value": { + "target": "com.amazonaws.kafkaconnect#__string" + }, + "traits": { + "smithy.api#sensitive": {} + } + }, + "com.amazonaws.kafkaconnect#ConnectorOperationState": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "PENDING", + "name": "PENDING" + }, + { + "value": "UPDATE_IN_PROGRESS", + "name": "UPDATE_IN_PROGRESS" + }, + { + "value": "UPDATE_COMPLETE", + "name": "UPDATE_COMPLETE" + }, + { + "value": "UPDATE_FAILED", + "name": "UPDATE_FAILED" + }, + { + "value": "ROLLBACK_IN_PROGRESS", + "name": "ROLLBACK_IN_PROGRESS" + }, + { + "value": "ROLLBACK_FAILED", + "name": "ROLLBACK_FAILED" + }, + { + "value": "ROLLBACK_COMPLETE", + "name": "ROLLBACK_COMPLETE" + } + ] + } + }, + "com.amazonaws.kafkaconnect#ConnectorOperationStep": { + "type": "structure", + "members": { + "stepType": { + "target": "com.amazonaws.kafkaconnect#ConnectorOperationStepType", + "traits": { + "smithy.api#documentation": "

The step type of the operation.

" + } + }, + "stepState": { + "target": "com.amazonaws.kafkaconnect#ConnectorOperationStepState", + "traits": { + "smithy.api#documentation": "

The step state of the operation.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Details of a step that is involved in a connector's operation.

" + } + }, + "com.amazonaws.kafkaconnect#ConnectorOperationStepState": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "PENDING", + "name": "PENDING" + }, + { + "value": "IN_PROGRESS", + "name": "IN_PROGRESS" + }, + { + "value": "COMPLETED", + "name": "COMPLETED" + }, + { + "value": "FAILED", + "name": "FAILED" + }, + { + "value": "CANCELLED", + "name": "CANCELLED" + } + ] + } + }, + "com.amazonaws.kafkaconnect#ConnectorOperationStepType": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "INITIALIZE_UPDATE", + "name": "INITIALIZE_UPDATE" + }, + { + "value": "FINALIZE_UPDATE", + "name": "FINALIZE_UPDATE" + }, + { + "value": "UPDATE_WORKER_SETTING", + "name": "UPDATE_WORKER_SETTING" + }, + { + "value": "UPDATE_CONNECTOR_CONFIGURATION", + "name": "UPDATE_CONNECTOR_CONFIGURATION" + }, + { + "value": "VALIDATE_UPDATE", + "name": "VALIDATE_UPDATE" + } + ] + } + }, + "com.amazonaws.kafkaconnect#ConnectorOperationSummary": { + "type": "structure", + "members": { + "connectorOperationArn": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the connector operation.

" + } + }, + "connectorOperationType": { + "target": "com.amazonaws.kafkaconnect#ConnectorOperationType", + "traits": { + "smithy.api#documentation": "

The type of connector operation performed.

" + } + }, + "connectorOperationState": { + "target": "com.amazonaws.kafkaconnect#ConnectorOperationState", + "traits": { + "smithy.api#documentation": "

The state of the connector operation.

" + } + }, + "creationTime": { + "target": "com.amazonaws.kafkaconnect#__timestampIso8601", + "traits": { + "smithy.api#documentation": "

The time when operation was created.

" + } + }, + "endTime": { + "target": "com.amazonaws.kafkaconnect#__timestampIso8601", + "traits": { + "smithy.api#documentation": "

The time when operation ended.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Summary of a connector operation.

" + } + }, + "com.amazonaws.kafkaconnect#ConnectorOperationType": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "UPDATE_WORKER_SETTING", + "name": "UPDATE_WORKER_SETTING" + }, + { + "value": "UPDATE_CONNECTOR_CONFIGURATION", + "name": "UPDATE_CONNECTOR_CONFIGURATION" + }, + { + "value": "ISOLATE_CONNECTOR", + "name": "ISOLATE_CONNECTOR" + }, + { + "value": "RESTORE_CONNECTOR", + "name": "RESTORE_CONNECTOR" + } + ] + } + }, "com.amazonaws.kafkaconnect#ConnectorState": { "type": "string", "traits": { @@ -482,7 +676,7 @@ } }, "connectorConfiguration": { - "target": "com.amazonaws.kafkaconnect#__sensitive__mapOf__string", + "target": "com.amazonaws.kafkaconnect#ConnectorConfiguration", "traits": { "smithy.api#documentation": "

A map of keys to values that represent the configuration for the connector.

", "smithy.api#required": {} @@ -561,6 +755,9 @@ "smithy.api#documentation": "

The tags you want to attach to the connector.

" } } + }, + "traits": { + "smithy.api#input": {} } }, "com.amazonaws.kafkaconnect#CreateConnectorResponse": { @@ -665,6 +862,9 @@ "smithy.api#documentation": "

The tags you want to attach to the custom plugin.

" } } + }, + "traits": { + "smithy.api#input": {} } }, "com.amazonaws.kafkaconnect#CreateCustomPluginResponse": { @@ -769,6 +969,9 @@ "smithy.api#documentation": "

The tags you want to attach to the worker configuration.

" } } + }, + "traits": { + "smithy.api#input": {} } }, "com.amazonaws.kafkaconnect#CreateWorkerConfigurationResponse": { @@ -1094,6 +1297,9 @@ "smithy.api#httpQuery": "currentVersion" } } + }, + "traits": { + "smithy.api#input": {} } }, "com.amazonaws.kafkaconnect#DeleteConnectorResponse": { @@ -1165,6 +1371,9 @@ "smithy.api#required": {} } } + }, + "traits": { + "smithy.api#input": {} } }, "com.amazonaws.kafkaconnect#DeleteCustomPluginResponse": { @@ -1236,6 +1445,9 @@ "smithy.api#required": {} } } + }, + "traits": { + "smithy.api#input": {} } }, "com.amazonaws.kafkaconnect#DeleteWorkerConfigurationResponse": { @@ -1296,6 +1508,140 @@ "smithy.api#readonly": {} } }, + "com.amazonaws.kafkaconnect#DescribeConnectorOperation": { + "type": "operation", + "input": { + "target": "com.amazonaws.kafkaconnect#DescribeConnectorOperationRequest" + }, + "output": { + "target": "com.amazonaws.kafkaconnect#DescribeConnectorOperationResponse" + }, + "errors": [ + { + "target": "com.amazonaws.kafkaconnect#BadRequestException" + }, + { + "target": "com.amazonaws.kafkaconnect#ForbiddenException" + }, + { + "target": "com.amazonaws.kafkaconnect#InternalServerErrorException" + }, + { + "target": "com.amazonaws.kafkaconnect#NotFoundException" + }, + { + "target": "com.amazonaws.kafkaconnect#ServiceUnavailableException" + }, + { + "target": "com.amazonaws.kafkaconnect#TooManyRequestsException" + }, + { + "target": "com.amazonaws.kafkaconnect#UnauthorizedException" + } + ], + "traits": { + "smithy.api#documentation": "

Returns information about the specified connector's operations.

", + "smithy.api#http": { + "method": "GET", + "uri": "/v1/connectorOperations/{connectorOperationArn}", + "code": 200 + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.kafkaconnect#DescribeConnectorOperationRequest": { + "type": "structure", + "members": { + "connectorOperationArn": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "

ARN of the connector operation to be described.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.kafkaconnect#DescribeConnectorOperationResponse": { + "type": "structure", + "members": { + "connectorArn": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the connector.

" + } + }, + "connectorOperationArn": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the connector operation.

" + } + }, + "connectorOperationState": { + "target": "com.amazonaws.kafkaconnect#ConnectorOperationState", + "traits": { + "smithy.api#documentation": "

The state of the connector operation.

" + } + }, + "connectorOperationType": { + "target": "com.amazonaws.kafkaconnect#ConnectorOperationType", + "traits": { + "smithy.api#documentation": "

The type of connector operation performed.

" + } + }, + "operationSteps": { + "target": "com.amazonaws.kafkaconnect#__listOfConnectorOperationStep", + "traits": { + "smithy.api#documentation": "

The array of operation steps taken.

" + } + }, + "originWorkerSetting": { + "target": "com.amazonaws.kafkaconnect#WorkerSetting", + "traits": { + "smithy.api#documentation": "

The origin worker setting.

" + } + }, + "originConnectorConfiguration": { + "target": "com.amazonaws.kafkaconnect#ConnectorConfiguration", + "traits": { + "smithy.api#documentation": "

The origin connector configuration.

" + } + }, + "targetWorkerSetting": { + "target": "com.amazonaws.kafkaconnect#WorkerSetting", + "traits": { + "smithy.api#documentation": "

The target worker setting.

" + } + }, + "targetConnectorConfiguration": { + "target": "com.amazonaws.kafkaconnect#ConnectorConfiguration", + "traits": { + "smithy.api#documentation": "

The target connector configuration.

" + } + }, + "errorInfo": { + "target": "com.amazonaws.kafkaconnect#StateDescription" + }, + "creationTime": { + "target": "com.amazonaws.kafkaconnect#__timestampIso8601", + "traits": { + "smithy.api#documentation": "

The time when the operation was created.

" + } + }, + "endTime": { + "target": "com.amazonaws.kafkaconnect#__timestampIso8601", + "traits": { + "smithy.api#documentation": "

The time when the operation ended.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.kafkaconnect#DescribeConnectorRequest": { "type": "structure", "members": { @@ -1307,6 +1653,9 @@ "smithy.api#required": {} } } + }, + "traits": { + "smithy.api#input": {} } }, "com.amazonaws.kafkaconnect#DescribeConnectorResponse": { @@ -1325,7 +1674,7 @@ } }, "connectorConfiguration": { - "target": "com.amazonaws.kafkaconnect#__sensitive__mapOf__string", + "target": "com.amazonaws.kafkaconnect#ConnectorConfiguration", "traits": { "smithy.api#documentation": "

A map of keys to values that represent the configuration for the connector.

" } @@ -1468,6 +1817,9 @@ "smithy.api#required": {} } } + }, + "traits": { + "smithy.api#input": {} } }, "com.amazonaws.kafkaconnect#DescribeCustomPluginResponse": { @@ -1569,6 +1921,9 @@ "smithy.api#required": {} } } + }, + "traits": { + "smithy.api#input": {} } }, "com.amazonaws.kafkaconnect#DescribeWorkerConfigurationResponse": { @@ -1823,12 +2178,18 @@ { "target": "com.amazonaws.kafkaconnect#DescribeConnector" }, + { + "target": "com.amazonaws.kafkaconnect#DescribeConnectorOperation" + }, { "target": "com.amazonaws.kafkaconnect#DescribeCustomPlugin" }, { "target": "com.amazonaws.kafkaconnect#DescribeWorkerConfiguration" }, + { + "target": "com.amazonaws.kafkaconnect#ListConnectorOperations" + }, { "target": "com.amazonaws.kafkaconnect#ListConnectors" }, @@ -2718,6 +3079,103 @@ } } }, + "com.amazonaws.kafkaconnect#ListConnectorOperations": { + "type": "operation", + "input": { + "target": "com.amazonaws.kafkaconnect#ListConnectorOperationsRequest" + }, + "output": { + "target": "com.amazonaws.kafkaconnect#ListConnectorOperationsResponse" + }, + "errors": [ + { + "target": "com.amazonaws.kafkaconnect#BadRequestException" + }, + { + "target": "com.amazonaws.kafkaconnect#ForbiddenException" + }, + { + "target": "com.amazonaws.kafkaconnect#InternalServerErrorException" + }, + { + "target": "com.amazonaws.kafkaconnect#NotFoundException" + }, + { + "target": "com.amazonaws.kafkaconnect#ServiceUnavailableException" + }, + { + "target": "com.amazonaws.kafkaconnect#TooManyRequestsException" + }, + { + "target": "com.amazonaws.kafkaconnect#UnauthorizedException" + } + ], + "traits": { + "smithy.api#documentation": "

Lists information about a connector's operation(s).

", + "smithy.api#http": { + "method": "GET", + "uri": "/v1/connectors/{connectorArn}/operations", + "code": 200 + }, + "smithy.api#paginated": { + "inputToken": "nextToken", + "outputToken": "nextToken", + "pageSize": "maxResults", + "items": "connectorOperations" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.kafkaconnect#ListConnectorOperationsRequest": { + "type": "structure", + "members": { + "connectorArn": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the connector for which to list operations.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "maxResults": { + "target": "com.amazonaws.kafkaconnect#MaxResults", + "traits": { + "smithy.api#documentation": "

Maximum number of connector operations to fetch in one get request.

", + "smithy.api#httpQuery": "maxResults" + } + }, + "nextToken": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "

If the response is truncated, it includes a NextToken. Send this NextToken in a subsequent request to continue listing from where it left off.

", + "smithy.api#httpQuery": "nextToken" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.kafkaconnect#ListConnectorOperationsResponse": { + "type": "structure", + "members": { + "connectorOperations": { + "target": "com.amazonaws.kafkaconnect#__listOfConnectorOperationSummary", + "traits": { + "smithy.api#documentation": "

An array of connector operation descriptions.

" + } + }, + "nextToken": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "

If the response is truncated, it includes a NextToken. Send this NextToken in a subsequent request to continue listing from where it left off.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.kafkaconnect#ListConnectors": { "type": "operation", "input": { @@ -2778,7 +3236,6 @@ "maxResults": { "target": "com.amazonaws.kafkaconnect#MaxResults", "traits": { - "smithy.api#default": 0, "smithy.api#documentation": "

The maximum number of connectors to list in one response.

", "smithy.api#httpQuery": "maxResults" } @@ -2790,6 +3247,9 @@ "smithy.api#httpQuery": "nextToken" } } + }, + "traits": { + "smithy.api#input": {} } }, "com.amazonaws.kafkaconnect#ListConnectorsResponse": { @@ -2862,7 +3322,6 @@ "maxResults": { "target": "com.amazonaws.kafkaconnect#MaxResults", "traits": { - "smithy.api#default": 0, "smithy.api#documentation": "

The maximum number of custom plugins to list in one response.

", "smithy.api#httpQuery": "maxResults" } @@ -2881,6 +3340,9 @@ "smithy.api#httpQuery": "namePrefix" } } + }, + "traits": { + "smithy.api#input": {} } }, "com.amazonaws.kafkaconnect#ListCustomPluginsResponse": { @@ -2951,6 +3413,9 @@ "smithy.api#required": {} } } + }, + "traits": { + "smithy.api#input": {} } }, "com.amazonaws.kafkaconnect#ListTagsForResourceResponse": { @@ -3017,7 +3482,6 @@ "maxResults": { "target": "com.amazonaws.kafkaconnect#MaxResults", "traits": { - "smithy.api#default": 0, "smithy.api#documentation": "

The maximum number of worker configurations to list in one response.

", "smithy.api#httpQuery": "maxResults" } @@ -3036,6 +3500,9 @@ "smithy.api#httpQuery": "namePrefix" } } + }, + "traits": { + "smithy.api#input": {} } }, "com.amazonaws.kafkaconnect#ListWorkerConfigurationsResponse": { @@ -3087,7 +3554,6 @@ "com.amazonaws.kafkaconnect#MaxResults": { "type": "integer", "traits": { - "smithy.api#default": 0, "smithy.api#range": { "min": 1, "max": 100 @@ -3148,7 +3614,7 @@ } }, "workerCount": { - "target": "com.amazonaws.kafkaconnect#__integerMin1Max10", + "target": "com.amazonaws.kafkaconnect#__integer", "traits": { "smithy.api#default": 0, "smithy.api#documentation": "

The number of workers that are allocated to the connector.

", @@ -3194,7 +3660,7 @@ } }, "workerCount": { - "target": "com.amazonaws.kafkaconnect#__integerMin1Max10", + "target": "com.amazonaws.kafkaconnect#__integer", "traits": { "smithy.api#default": 0, "smithy.api#documentation": "

The number of workers that are allocated to the connector.

", @@ -3524,6 +3990,9 @@ "smithy.api#required": {} } } + }, + "traits": { + "smithy.api#input": {} } }, "com.amazonaws.kafkaconnect#TagResourceResponse": { @@ -3639,6 +4108,9 @@ "smithy.api#required": {} } } + }, + "traits": { + "smithy.api#input": {} } }, "com.amazonaws.kafkaconnect#UntagResourceResponse": { @@ -3692,8 +4164,13 @@ "capacity": { "target": "com.amazonaws.kafkaconnect#CapacityUpdate", "traits": { - "smithy.api#documentation": "

The target capacity.

", - "smithy.api#required": {} + "smithy.api#documentation": "

The target capacity.

" + } + }, + "connectorConfiguration": { + "target": "com.amazonaws.kafkaconnect#ConnectorConfigurationUpdate", + "traits": { + "smithy.api#documentation": "

A map of keys to values that represent the configuration for the connector.

" } }, "connectorArn": { @@ -3712,6 +4189,9 @@ "smithy.api#required": {} } } + }, + "traits": { + "smithy.api#input": {} } }, "com.amazonaws.kafkaconnect#UpdateConnectorResponse": { @@ -3728,6 +4208,12 @@ "traits": { "smithy.api#documentation": "

The state of the connector.

" } + }, + "connectorOperationArn": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the connector operation.

" + } } } }, @@ -3987,6 +4473,17 @@ "smithy.api#documentation": "

Workers can send worker logs to different destination types. This configuration\n specifies the details of these destinations.

" } }, + "com.amazonaws.kafkaconnect#WorkerSetting": { + "type": "structure", + "members": { + "capacity": { + "target": "com.amazonaws.kafkaconnect#CapacityDescription" + } + }, + "traits": { + "smithy.api#documentation": "

Details about worker setting of a connector

" + } + }, "com.amazonaws.kafkaconnect#__boolean": { "type": "boolean", "traits": { @@ -3999,16 +4496,6 @@ "smithy.api#default": 0 } }, - "com.amazonaws.kafkaconnect#__integerMin1Max10": { - "type": "integer", - "traits": { - "smithy.api#default": 0, - "smithy.api#range": { - "min": 1, - "max": 10 - } - } - }, "com.amazonaws.kafkaconnect#__integerMin1Max100": { "type": "integer", "traits": { @@ -4029,6 +4516,18 @@ } } }, + "com.amazonaws.kafkaconnect#__listOfConnectorOperationStep": { + "type": "list", + "member": { + "target": "com.amazonaws.kafkaconnect#ConnectorOperationStep" + } + }, + "com.amazonaws.kafkaconnect#__listOfConnectorOperationSummary": { + "type": "list", + "member": { + "target": "com.amazonaws.kafkaconnect#ConnectorOperationSummary" + } + }, "com.amazonaws.kafkaconnect#__listOfConnectorSummary": { "type": "list", "member": { @@ -4087,18 +4586,6 @@ "smithy.api#sensitive": {} } }, - "com.amazonaws.kafkaconnect#__sensitive__mapOf__string": { - "type": "map", - "key": { - "target": "com.amazonaws.kafkaconnect#__string" - }, - "value": { - "target": "com.amazonaws.kafkaconnect#__string" - }, - "traits": { - "smithy.api#sensitive": {} - } - }, "com.amazonaws.kafkaconnect#__string": { "type": "string" }, diff --git a/models/macie2.json b/models/macie2.json index 5ba0b2f3b5..f4ca58a157 100644 --- a/models/macie2.json +++ b/models/macie2.json @@ -417,7 +417,7 @@ "apiServiceName": { "target": "com.amazonaws.macie2#__string", "traits": { - "smithy.api#documentation": "

The URL of the Amazon Web Service that provides the operation, for example: s3.amazonaws.com.

", + "smithy.api#documentation": "

The URL of the Amazon Web Services service that provides the operation, for example: s3.amazonaws.com.

", "smithy.api#jsonName": "apiServiceName" } }, @@ -966,7 +966,7 @@ "unknown": { "target": "com.amazonaws.macie2#__long", "traits": { - "smithy.api#documentation": "

The total number of buckets that Amazon Macie wasn't able to evaluate permissions settings for. Macie can't determine whether these buckets are publicly accessible.

", + "smithy.api#documentation": "

The total number of buckets that Amazon Macie wasn't able to evaluate permissions settings for. For example, the buckets' policies or a quota prevented Macie from retrieving the requisite data. Macie can't determine whether the buckets are publicly accessible.

", "smithy.api#jsonName": "unknown" } } @@ -1002,7 +1002,7 @@ "unknown": { "target": "com.amazonaws.macie2#__long", "traits": { - "smithy.api#documentation": "

The total number of buckets that Amazon Macie doesn't have current encryption metadata for. Macie can't provide current data about the default encryption settings for these buckets.

", + "smithy.api#documentation": "

The total number of buckets that Amazon Macie doesn't have current encryption metadata for. For example, the buckets' permissions settings or a quota prevented Macie from retrieving the default encryption settings for the buckets.

", "smithy.api#jsonName": "unknown" } } @@ -1038,7 +1038,7 @@ "unknown": { "target": "com.amazonaws.macie2#__long", "traits": { - "smithy.api#documentation": "

The total number of buckets that Amazon Macie wasn't able to evaluate shared access settings for. Macie can't determine whether these buckets are shared with other Amazon Web Services accounts, Amazon CloudFront OAIs, or CloudFront OACs.

", + "smithy.api#documentation": "

The total number of buckets that Amazon Macie wasn't able to evaluate shared access settings for. For example, the buckets' permissions settings or a quota prevented Macie from retrieving the requisite data. Macie can't determine whether the buckets are shared with other Amazon Web Services accounts, Amazon CloudFront OAIs, or CloudFront OACs.

", "smithy.api#jsonName": "unknown" } } @@ -1067,7 +1067,7 @@ "unknown": { "target": "com.amazonaws.macie2#__long", "traits": { - "smithy.api#documentation": "

The total number of buckets that Amazon Macie wasn't able to evaluate server-side encryption requirements for. Macie can't determine whether the bucket policies for these buckets require server-side encryption of new objects.

", + "smithy.api#documentation": "

The total number of buckets that Amazon Macie wasn't able to evaluate server-side encryption requirements for. For example, the buckets' permissions settings or a quota prevented Macie from retrieving the requisite data. Macie can't determine whether bucket policies for the buckets require server-side encryption of new objects.

", "smithy.api#jsonName": "unknown" } } @@ -1236,14 +1236,14 @@ "errorCode": { "target": "com.amazonaws.macie2#BucketMetadataErrorCode", "traits": { - "smithy.api#documentation": "

The error code for an error that prevented Amazon Macie from retrieving and processing information about the bucket and the bucket's objects. If this value is ACCESS_DENIED, Macie doesn't have permission to retrieve the information. For example, the bucket has a restrictive bucket policy and Amazon S3 denied the request. If this value is null, Macie was able to retrieve and process the information.

", + "smithy.api#documentation": "

The code for an error or issue that prevented Amazon Macie from retrieving and processing information about the bucket and the bucket's objects. Possible values are:

  • ACCESS_DENIED - Macie doesn't have permission to retrieve the information. For example, the bucket has a restrictive bucket policy and Amazon S3 denied the request.

  • BUCKET_COUNT_EXCEEDS_QUOTA - Retrieving and processing the information would exceed the quota for the number of buckets that Macie monitors for an account (10,000).

If this value is null, Macie was able to retrieve and process the information.

", "smithy.api#jsonName": "errorCode" } }, "errorMessage": { "target": "com.amazonaws.macie2#__string", "traits": { - "smithy.api#documentation": "

A brief description of the error (errorCode) that prevented Amazon Macie from retrieving and processing information about the bucket and the bucket's objects. This value is null if Macie was able to retrieve and process the information.

", + "smithy.api#documentation": "

A brief description of the error or issue (errorCode) that prevented Amazon Macie from retrieving and processing information about the bucket and the bucket's objects. This value is null if Macie was able to retrieve and process the information.

", "smithy.api#jsonName": "errorMessage" } }, @@ -1257,7 +1257,7 @@ "lastAutomatedDiscoveryTime": { "target": "com.amazonaws.macie2#__timestampIso8601", "traits": { - "smithy.api#documentation": "

The date and time, in UTC and extended ISO 8601 format, when Amazon Macie most recently analyzed objects in the bucket while performing automated sensitive data discovery. This value is null if automated sensitive data discovery is disabled for your account.

", + "smithy.api#documentation": "

The date and time, in UTC and extended ISO 8601 format, when Amazon Macie most recently analyzed objects in the bucket while performing automated sensitive data discovery. This value is null if this analysis hasn't occurred.

", "smithy.api#jsonName": "lastAutomatedDiscoveryTime" } }, @@ -1306,7 +1306,7 @@ "sensitivityScore": { "target": "com.amazonaws.macie2#__integer", "traits": { - "smithy.api#documentation": "

The sensitivity score for the bucket, ranging from -1 (classification error) to 100 (sensitive).

If automated sensitive data discovery has never been enabled for your account or it’s been disabled for your organization or your standalone account for more than 30 days, possible values are: 1, the bucket is empty; or, 50, the bucket stores objects but it’s been excluded from recent analyses.

", + "smithy.api#documentation": "

The sensitivity score for the bucket, ranging from -1 (classification error) to 100 (sensitive).

If automated sensitive data discovery has never been enabled for your account or it's been disabled for your organization or standalone account for more than 30 days, possible values are: 1, the bucket is empty; or, 50, the bucket stores objects but it's been excluded from recent analyses.

", "smithy.api#jsonName": "sensitivityScore" } }, @@ -1368,7 +1368,7 @@ } }, "traits": { - "smithy.api#documentation": "

Provides statistical data and other information about an S3 bucket that Amazon Macie monitors and analyzes for your account. By default, object count and storage size values include data for object parts that are the result of incomplete multipart uploads. For more information, see How Macie monitors Amazon S3 data security in the Amazon Macie User Guide.

If an error occurs when Macie attempts to retrieve and process metadata from Amazon S3 for the bucket or the bucket's objects, the value for the versioning property is false and the value for most other properties is null. Key exceptions are accountId, bucketArn, bucketCreatedAt, bucketName, lastUpdated, and region. To identify the cause of the error, refer to the errorCode and errorMessage values.

" + "smithy.api#documentation": "

Provides statistical data and other information about an S3 bucket that Amazon Macie monitors and analyzes for your account. By default, object count and storage size values include data for object parts that are the result of incomplete multipart uploads. For more information, see How Macie monitors Amazon S3 data security in the Amazon Macie User Guide.

If an error or issue prevents Macie from retrieving and processing metadata from Amazon S3 for the bucket or the bucket's objects, the value for the versioning property is false and the value for most other properties is null or UNKNOWN. Key exceptions are accountId, bucketArn, bucketCreatedAt, bucketName, lastUpdated, and region. To identify the cause, refer to the errorCode and errorMessage values.

" } }, "com.amazonaws.macie2#BucketMetadataErrorCode": { @@ -1379,10 +1379,16 @@ "traits": { "smithy.api#enumValue": "ACCESS_DENIED" } + }, + "BUCKET_COUNT_EXCEEDS_QUOTA": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "BUCKET_COUNT_EXCEEDS_QUOTA" + } } }, "traits": { - "smithy.api#documentation": "

The error code for an error that prevented Amazon Macie from retrieving and processing information about an S3 bucket and the bucket's objects.

" + "smithy.api#documentation": "

The code for an error or issue that prevented Amazon Macie from retrieving and processing information about an S3 bucket and the bucket's objects.

" } }, "com.amazonaws.macie2#BucketPermissionConfiguration": { @@ -1528,7 +1534,7 @@ } }, "traits": { - "smithy.api#documentation": "

Provides aggregated statistical data for sensitive data discovery metrics that apply to S3 buckets, grouped by bucket sensitivity score (sensitivityScore). If automated sensitive data discovery is currently disabled for your account, the value for each metric is 0.

" + "smithy.api#documentation": "

Provides aggregated statistical data for sensitive data discovery metrics that apply to S3 buckets, grouped by bucket sensitivity score (sensitivityScore). If automated sensitive data discovery is currently disabled for your account, the value for most of these metrics is 0.

" } }, "com.amazonaws.macie2#Cell": { @@ -3752,7 +3758,7 @@ "suppressed": { "target": "com.amazonaws.macie2#__boolean", "traits": { - "smithy.api#documentation": "

Specifies whether occurrences of this type of sensitive data are excluded (true) or included (false) in the bucket's sensitivity score.

", + "smithy.api#documentation": "

Specifies whether occurrences of this type of sensitive data are excluded (true) or included (false) in the bucket's sensitivity score, if the score is calculated by Amazon Macie.

", "smithy.api#jsonName": "suppressed" } }, @@ -4718,7 +4724,7 @@ } }, "traits": { - "smithy.api#documentation": "

The type of finding. For details about each type, see Types of Amazon Macie findings in the Amazon Macie User Guide. Possible values are:

" + "smithy.api#documentation": "

The type of finding. For details about each type, see Types of findings in the Amazon Macie User Guide. Possible values are:

" } }, "com.amazonaws.macie2#FindingsFilterAction": { @@ -5158,7 +5164,7 @@ "bucketStatisticsBySensitivity": { "target": "com.amazonaws.macie2#BucketStatisticsBySensitivity", "traits": { - "smithy.api#documentation": "

The aggregated sensitive data discovery statistics for the buckets. If automated sensitive data discovery is currently disabled for your account, the value for each statistic is 0.

", + "smithy.api#documentation": "

The aggregated sensitive data discovery statistics for the buckets. If automated sensitive data discovery is currently disabled for your account, the value for most statistics is 0.

", "smithy.api#jsonName": "bucketStatisticsBySensitivity" } }, @@ -6481,7 +6487,7 @@ "reasons": { "target": "com.amazonaws.macie2#__listOfUnavailabilityReasonCode", "traits": { - "smithy.api#documentation": "

Specifies why occurrences of sensitive data can't be retrieved for the finding. Possible values are:

  • ACCOUNT_NOT_IN_ORGANIZATION - The affected account isn't currently part of your organization. Or the account is part of your organization but Macie isn't currently enabled for the account. You're not allowed to access the affected S3 object by using Macie.

  • INVALID_CLASSIFICATION_RESULT - There isn't a corresponding sensitive data discovery result for the finding. Or the corresponding sensitive data discovery result isn't available in the current Amazon Web Services Region, is malformed or corrupted, or uses an unsupported storage format. Macie can't verify the location of the sensitive data to retrieve.

  • INVALID_RESULT_SIGNATURE - The corresponding sensitive data discovery result is stored in an S3 object that wasn't signed by Macie. Macie can't verify the integrity and authenticity of the sensitive data discovery result. Therefore, Macie can't verify the location of the sensitive data to retrieve.

  • MEMBER_ROLE_TOO_PERMISSIVE - The trust or permissions policy for the IAM role in the affected member account doesn't meet Macie requirements for restricting access to the role. Or the role's trust policy doesn't specify the correct external ID for your organization. Macie can't assume the role to retrieve the sensitive data.

  • MISSING_GET_MEMBER_PERMISSION - You're not allowed to retrieve information about the association between your account and the affected account. Macie can't determine whether you’re allowed to access the affected S3 object as the delegated Macie administrator for the affected account.

  • OBJECT_EXCEEDS_SIZE_QUOTA - The storage size of the affected S3 object exceeds the size quota for retrieving occurrences of sensitive data from this type of file.

  • OBJECT_UNAVAILABLE - The affected S3 object isn't available. The object was renamed, moved, deleted, or changed after Macie created the finding. Or the object is encrypted with an KMS key that's currently disabled.

  • RESULT_NOT_SIGNED - The corresponding sensitive data discovery result is stored in an S3 object that hasn't been signed. Macie can't verify the integrity and authenticity of the sensitive data discovery result. Therefore, Macie can't verify the location of the sensitive data to retrieve.

  • ROLE_TOO_PERMISSIVE - Your account is configured to retrieve occurrences of sensitive data by using an IAM role whose trust or permissions policy doesn't meet Macie requirements for restricting access to the role. Macie can’t assume the role to retrieve the sensitive data.

  • UNSUPPORTED_FINDING_TYPE - The specified finding isn't a sensitive data finding.

  • UNSUPPORTED_OBJECT_TYPE - The affected S3 object uses a file or storage format that Macie doesn't support for retrieving occurrences of sensitive data.

This value is null if sensitive data can be retrieved for the finding.

", + "smithy.api#documentation": "

Specifies why occurrences of sensitive data can't be retrieved for the finding. Possible values are:

  • ACCOUNT_NOT_IN_ORGANIZATION - The affected account isn't currently part of your organization. Or the account is part of your organization but Macie isn't currently enabled for the account. You're not allowed to access the affected S3 object by using Macie.

  • INVALID_CLASSIFICATION_RESULT - There isn't a corresponding sensitive data discovery result for the finding. Or the corresponding sensitive data discovery result isn't available in the current Amazon Web Services Region, is malformed or corrupted, or uses an unsupported storage format. Macie can't verify the location of the sensitive data to retrieve.

  • INVALID_RESULT_SIGNATURE - The corresponding sensitive data discovery result is stored in an S3 object that wasn't signed by Macie. Macie can't verify the integrity and authenticity of the sensitive data discovery result. Therefore, Macie can't verify the location of the sensitive data to retrieve.

  • MEMBER_ROLE_TOO_PERMISSIVE - The trust or permissions policy for the IAM role in the affected member account doesn't meet Macie requirements for restricting access to the role. Or the role's trust policy doesn't specify the correct external ID for your organization. Macie can't assume the role to retrieve the sensitive data.

  • MISSING_GET_MEMBER_PERMISSION - You're not allowed to retrieve information about the association between your account and the affected account. Macie can't determine whether you’re allowed to access the affected S3 object as the delegated Macie administrator for the affected account.

  • OBJECT_EXCEEDS_SIZE_QUOTA - The storage size of the affected S3 object exceeds the size quota for retrieving occurrences of sensitive data from this type of file.

  • OBJECT_UNAVAILABLE - The affected S3 object isn't available. The object was renamed, moved, deleted, or changed after Macie created the finding. Or the object is encrypted with an KMS key that isn’t available. For example, the key is disabled, is scheduled for deletion, or was deleted.

  • RESULT_NOT_SIGNED - The corresponding sensitive data discovery result is stored in an S3 object that hasn't been signed. Macie can't verify the integrity and authenticity of the sensitive data discovery result. Therefore, Macie can't verify the location of the sensitive data to retrieve.

  • ROLE_TOO_PERMISSIVE - Your account is configured to retrieve occurrences of sensitive data by using an IAM role whose trust or permissions policy doesn't meet Macie requirements for restricting access to the role. Macie can’t assume the role to retrieve the sensitive data.

  • UNSUPPORTED_FINDING_TYPE - The specified finding isn't a sensitive data finding.

  • UNSUPPORTED_OBJECT_TYPE - The affected S3 object uses a file or storage format that Macie doesn't support for retrieving occurrences of sensitive data.

This value is null if sensitive data can be retrieved for the finding.

", "smithy.api#jsonName": "reasons" } } @@ -7894,7 +7900,7 @@ } ], "traits": { - "smithy.api#documentation": "

Retrieves a subset of information about all the custom data identifiers for an account.

", + "smithy.api#documentation": "

Retrieves a subset of information about the custom data identifiers for an account.

", "smithy.api#http": { "method": "POST", "uri": "/custom-data-identifiers/list", @@ -10247,14 +10253,14 @@ "errorCode": { "target": "com.amazonaws.macie2#BucketMetadataErrorCode", "traits": { - "smithy.api#documentation": "

The error code for an error that prevented Amazon Macie from retrieving and processing information about the bucket and the bucket's objects. If this value is ACCESS_DENIED, Macie doesn't have permission to retrieve the information. For example, the bucket has a restrictive bucket policy and Amazon S3 denied the request. If this value is null, Macie was able to retrieve and process the information.

", + "smithy.api#documentation": "

The code for an error or issue that prevented Amazon Macie from retrieving and processing information about the bucket and the bucket's objects. Possible values are:

  • ACCESS_DENIED - Macie doesn't have permission to retrieve the information. For example, the bucket has a restrictive bucket policy and Amazon S3 denied the request.

  • BUCKET_COUNT_EXCEEDS_QUOTA - Retrieving and processing the information would exceed the quota for the number of buckets that Macie monitors for an account (10,000).

If this value is null, Macie was able to retrieve and process the information.

", "smithy.api#jsonName": "errorCode" } }, "errorMessage": { "target": "com.amazonaws.macie2#__string", "traits": { - "smithy.api#documentation": "

A brief description of the error (errorCode) that prevented Amazon Macie from retrieving and processing information about the bucket and the bucket's objects. This value is null if Macie was able to retrieve and process the information.

", + "smithy.api#documentation": "

A brief description of the error or issue (errorCode) that prevented Amazon Macie from retrieving and processing information about the bucket and the bucket's objects. This value is null if Macie was able to retrieve and process the information.

", "smithy.api#jsonName": "errorMessage" } }, @@ -10268,7 +10274,7 @@ "lastAutomatedDiscoveryTime": { "target": "com.amazonaws.macie2#__timestampIso8601", "traits": { - "smithy.api#documentation": "

The date and time, in UTC and extended ISO 8601 format, when Amazon Macie most recently analyzed objects in the bucket while performing automated sensitive data discovery. This value is null if automated sensitive data discovery is disabled for your account.

", + "smithy.api#documentation": "

The date and time, in UTC and extended ISO 8601 format, when Amazon Macie most recently analyzed objects in the bucket while performing automated sensitive data discovery. This value is null if this analysis hasn't occurred.

", "smithy.api#jsonName": "lastAutomatedDiscoveryTime" } }, @@ -10289,7 +10295,7 @@ "sensitivityScore": { "target": "com.amazonaws.macie2#__integer", "traits": { - "smithy.api#documentation": "

The sensitivity score for the bucket, ranging from -1 (classification error) to 100 (sensitive).

If automated sensitive data discovery has never been enabled for your account or it’s been disabled for your organization or your standalone account for more than 30 days, possible values are: 1, the bucket is empty; or, 50, the bucket stores objects but it’s been excluded from recent analyses.

", + "smithy.api#documentation": "

The sensitivity score for the bucket, ranging from -1 (classification error) to 100 (sensitive).

If automated sensitive data discovery has never been enabled for your account or it's been disabled for your organization or standalone account for more than 30 days, possible values are: 1, the bucket is empty; or, 50, the bucket stores objects but it's been excluded from recent analyses.

", "smithy.api#jsonName": "sensitivityScore" } }, @@ -10323,7 +10329,7 @@ } }, "traits": { - "smithy.api#documentation": "

Provides statistical data and other information about an S3 bucket that Amazon Macie monitors and analyzes for your account. By default, object count and storage size values include data for object parts that are the result of incomplete multipart uploads. For more information, see How Macie monitors Amazon S3 data security in the Amazon Macie User Guide.

If an error occurs when Macie attempts to retrieve and process information about the bucket or the bucket's objects, the value for most of these properties is null. Key exceptions are accountId and bucketName. To identify the cause of the error, refer to the errorCode and errorMessage values.

" + "smithy.api#documentation": "

Provides statistical data and other information about an S3 bucket that Amazon Macie monitors and analyzes for your account. By default, object count and storage size values include data for object parts that are the result of incomplete multipart uploads. For more information, see How Macie monitors Amazon S3 data security in the Amazon Macie User Guide.

If an error or issue prevents Macie from retrieving and processing information about the bucket or the bucket's objects, the value for many of these properties is null. Key exceptions are accountId and bucketName. To identify the cause, refer to the errorCode and errorMessage values.

" } }, "com.amazonaws.macie2#MatchingResource": { @@ -10332,7 +10338,7 @@ "matchingBucket": { "target": "com.amazonaws.macie2#MatchingBucket", "traits": { - "smithy.api#documentation": "

The details of an S3 bucket that Amazon Macie monitors and analyzes.

", + "smithy.api#documentation": "

The details of an S3 bucket that Amazon Macie monitors and analyzes for your account.

", "smithy.api#jsonName": "matchingBucket" } } @@ -11288,7 +11294,7 @@ } }, "traits": { - "smithy.api#documentation": "

Provides information about the S3 bucket that a finding applies to.

" + "smithy.api#documentation": "

Provides information about the S3 bucket that a finding applies to. If a quota prevented Amazon Macie from retrieving and processing all the bucket's information prior to generating the finding, the following values are UNKNOWN or null: allowsUnencryptedObjectUploads, defaultServerSideEncryption, publicAccess, and tags.

" } }, "com.amazonaws.macie2#S3BucketCriteriaForJob": { @@ -11409,7 +11415,7 @@ "target": "com.amazonaws.macie2#__listOfS3BucketName", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

Depending on the value specified for the update operation (ClassificationScopeUpdateOperation), an array of strings that: lists the names of buckets to add or remove from the list, or specifies a new set of bucket names that overwrites all existing names in the list. Each string must be the full name of an S3 bucket. Values are case sensitive.

", + "smithy.api#documentation": "

Depending on the value specified for the update operation (ClassificationScopeUpdateOperation), an array of strings that: lists the names of buckets to add or remove from the list, or specifies a new set of bucket names that overwrites all existing names in the list. Each string must be the full name of an existing S3 bucket. Values are case sensitive.

", "smithy.api#jsonName": "bucketNames", "smithy.api#required": {} } @@ -11711,7 +11717,7 @@ } ], "traits": { - "smithy.api#documentation": "

Retrieves (queries) statistical data and other information about Amazon Web Services resources that Amazon Macie monitors and analyzes.

", + "smithy.api#documentation": "

Retrieves (queries) statistical data and other information about Amazon Web Services resources that Amazon Macie monitors and analyzes for an account.

", "smithy.api#http": { "method": "POST", "uri": "/datasources/search-resources", @@ -12050,7 +12056,7 @@ } }, "traits": { - "smithy.api#documentation": "

Specifies configuration settings that determine which findings are published to Security Hub automatically. For information about how Macie publishes findings to Security Hub, see Amazon Macie integration with Security Hub in the Amazon Macie User Guide.

" + "smithy.api#documentation": "

Specifies configuration settings that determine which findings are published to Security Hub automatically. For information about how Macie publishes findings to Security Hub, see Evaluating findings with Security Hub in the Amazon Macie User Guide.

" } }, "com.amazonaws.macie2#SensitiveData": { @@ -12168,7 +12174,7 @@ } }, "traits": { - "smithy.api#documentation": "

Provides aggregated statistical data for sensitive data discovery metrics that apply to S3 buckets. Each field contains aggregated data for all the buckets that have a sensitivity score (sensitivityScore) of a specified value or within a specified range (BucketStatisticsBySensitivity). If automated sensitive data discovery is currently disabled for your account, the value for each field is 0.

" + "smithy.api#documentation": "

Provides aggregated statistical data for sensitive data discovery metrics that apply to S3 buckets. Each field contains aggregated data for all the buckets that have a sensitivity score (sensitivityScore) of a specified value or within a specified range (BucketStatisticsBySensitivity). If automated sensitive data discovery is currently disabled for your account, the value for most fields is 0.

" } }, "com.amazonaws.macie2#SensitivityInspectionTemplateExcludes": { @@ -12712,7 +12718,7 @@ "id": { "target": "com.amazonaws.macie2#__string", "traits": { - "smithy.api#documentation": "

The unique identifier for the custom data identifier or managed data identifier that detected the type of sensitive data to exclude or include in the score.

", + "smithy.api#documentation": "

The unique identifier for the custom data identifier or managed data identifier that detected the type of sensitive data to exclude from the score.

", "smithy.api#jsonName": "id" } }, @@ -12725,7 +12731,7 @@ } }, "traits": { - "smithy.api#documentation": "

Specifies a custom data identifier or managed data identifier that detected a type of sensitive data to start excluding or including in an S3 bucket's sensitivity score.

" + "smithy.api#documentation": "

Specifies a custom data identifier or managed data identifier that detected a type of sensitive data to exclude from an S3 bucket's sensitivity score.

" } }, "com.amazonaws.macie2#TagCriterionForJob": { @@ -13172,7 +13178,7 @@ "target": "com.amazonaws.macie2#__string", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The type of error that occurred and prevented Amazon Macie from retrieving occurrences of sensitive data reported by the finding. Possible values are:

  • ACCOUNT_NOT_IN_ORGANIZATION - The affected account isn't currently part of your organization. Or the account is part of your organization but Macie isn't currently enabled for the account. You're not allowed to access the affected S3 object by using Macie.

  • INVALID_CLASSIFICATION_RESULT - There isn't a corresponding sensitive data discovery result for the finding. Or the corresponding sensitive data discovery result isn't available in the current Amazon Web Services Region, is malformed or corrupted, or uses an unsupported storage format. Macie can't verify the location of the sensitive data to retrieve.

  • INVALID_RESULT_SIGNATURE - The corresponding sensitive data discovery result is stored in an S3 object that wasn't signed by Macie. Macie can't verify the integrity and authenticity of the sensitive data discovery result. Therefore, Macie can't verify the location of the sensitive data to retrieve.

  • MEMBER_ROLE_TOO_PERMISSIVE - The trust or permissions policy for the IAM role in the affected member account doesn't meet Macie requirements for restricting access to the role. Or the role's trust policy doesn't specify the correct external ID for your organization. Macie can't assume the role to retrieve the sensitive data.

  • MISSING_GET_MEMBER_PERMISSION - You're not allowed to retrieve information about the association between your account and the affected account. Macie can't determine whether you’re allowed to access the affected S3 object as the delegated Macie administrator for the affected account.

  • OBJECT_EXCEEDS_SIZE_QUOTA - The storage size of the affected S3 object exceeds the size quota for retrieving occurrences of sensitive data from this type of file.

  • OBJECT_UNAVAILABLE - The affected S3 object isn't available. The object was renamed, moved, deleted, or changed after Macie created the finding. Or the object is encrypted with an KMS key that's currently disabled.

  • RESULT_NOT_SIGNED - The corresponding sensitive data discovery result is stored in an S3 object that hasn't been signed. Macie can't verify the integrity and authenticity of the sensitive data discovery result. Therefore, Macie can't verify the location of the sensitive data to retrieve.

  • ROLE_TOO_PERMISSIVE - Your account is configured to retrieve occurrences of sensitive data by using an IAM role whose trust or permissions policy doesn't meet Macie requirements for restricting access to the role. Macie can’t assume the role to retrieve the sensitive data.

  • UNSUPPORTED_FINDING_TYPE - The specified finding isn't a sensitive data finding.

  • UNSUPPORTED_OBJECT_TYPE - The affected S3 object uses a file or storage format that Macie doesn't support for retrieving occurrences of sensitive data.

", + "smithy.api#documentation": "

The type of error that occurred and prevented Amazon Macie from retrieving occurrences of sensitive data reported by the finding. Possible values are:

  • ACCOUNT_NOT_IN_ORGANIZATION - The affected account isn't currently part of your organization. Or the account is part of your organization but Macie isn't currently enabled for the account. You're not allowed to access the affected S3 object by using Macie.

  • INVALID_CLASSIFICATION_RESULT - There isn't a corresponding sensitive data discovery result for the finding. Or the corresponding sensitive data discovery result isn't available in the current Amazon Web Services Region, is malformed or corrupted, or uses an unsupported storage format. Macie can't verify the location of the sensitive data to retrieve.

  • INVALID_RESULT_SIGNATURE - The corresponding sensitive data discovery result is stored in an S3 object that wasn't signed by Macie. Macie can't verify the integrity and authenticity of the sensitive data discovery result. Therefore, Macie can't verify the location of the sensitive data to retrieve.

  • MEMBER_ROLE_TOO_PERMISSIVE - The trust or permissions policy for the IAM role in the affected member account doesn't meet Macie requirements for restricting access to the role. Or the role's trust policy doesn't specify the correct external ID for your organization. Macie can't assume the role to retrieve the sensitive data.

  • MISSING_GET_MEMBER_PERMISSION - You're not allowed to retrieve information about the association between your account and the affected account. Macie can't determine whether you’re allowed to access the affected S3 object as the delegated Macie administrator for the affected account.

  • OBJECT_EXCEEDS_SIZE_QUOTA - The storage size of the affected S3 object exceeds the size quota for retrieving occurrences of sensitive data from this type of file.

  • OBJECT_UNAVAILABLE - The affected S3 object isn't available. The object was renamed, moved, deleted, or changed after Macie created the finding. Or the object is encrypted with an KMS key that isn’t available. For example, the key is disabled, is scheduled for deletion, or was deleted.

  • RESULT_NOT_SIGNED - The corresponding sensitive data discovery result is stored in an S3 object that hasn't been signed. Macie can't verify the integrity and authenticity of the sensitive data discovery result. Therefore, Macie can't verify the location of the sensitive data to retrieve.

  • ROLE_TOO_PERMISSIVE - Your account is configured to retrieve occurrences of sensitive data by using an IAM role whose trust or permissions policy doesn't meet Macie requirements for restricting access to the role. Macie can’t assume the role to retrieve the sensitive data.

  • UNSUPPORTED_FINDING_TYPE - The specified finding isn't a sensitive data finding.

  • UNSUPPORTED_OBJECT_TYPE - The affected S3 object uses a file or storage format that Macie doesn't support for retrieving occurrences of sensitive data.

", "smithy.api#jsonName": "message", "smithy.api#required": {} } @@ -13972,7 +13978,7 @@ "suppressDataIdentifiers": { "target": "com.amazonaws.macie2#__listOfSuppressDataIdentifier", "traits": { - "smithy.api#documentation": "

An array of objects, one for each custom data identifier or managed data identifier that detected the type of sensitive data to start excluding or including in the bucket's score. To start including all sensitive data types in the score, don't specify any values for this array.

", + "smithy.api#documentation": "

An array of objects, one for each custom data identifier or managed data identifier that detected a type of sensitive data to exclude from the bucket's score. To include all sensitive data types in the score, don't specify any values for this array.

", "smithy.api#jsonName": "suppressDataIdentifiers" } } @@ -14040,7 +14046,7 @@ } }, "traits": { - "smithy.api#documentation": "

Specifies the access method and settings to use when retrieving occurrences of sensitive data reported by findings. If your request specifies an Identity and Access Management (IAM) role to assume, Amazon Macie verifies that the role exists and the attached policies are configured correctly. If there's an issue, Macie returns an error. For information about addressing the issue, see Configuration options and requirements for retrieving sensitive data samples in the Amazon Macie User Guide.

" + "smithy.api#documentation": "

Specifies the access method and settings to use when retrieving occurrences of sensitive data reported by findings. If your request specifies an Identity and Access Management (IAM) role to assume, Amazon Macie verifies that the role exists and the attached policies are configured correctly. If there's an issue, Macie returns an error. For information about addressing the issue, see Configuration options for retrieving sensitive data samples in the Amazon Macie User Guide.

" } }, "com.amazonaws.macie2#UpdateRevealConfiguration": { @@ -14516,7 +14522,7 @@ "awsService": { "target": "com.amazonaws.macie2#AwsService", "traits": { - "smithy.api#documentation": "

If the action was performed by an Amazon Web Services account that belongs to an Amazon Web Service, the name of the service.

", + "smithy.api#documentation": "

If the action was performed by an Amazon Web Services account that belongs to an Amazon Web Services service, the name of the service.

", "smithy.api#jsonName": "awsService" } }, diff --git a/models/mediaconnect.json b/models/mediaconnect.json index 191555a3fb..9a326c4c08 100644 --- a/models/mediaconnect.json +++ b/models/mediaconnect.json @@ -1033,6 +1033,21 @@ } } }, + "com.amazonaws.mediaconnect#AudioMonitoringSetting": { + "type": "structure", + "members": { + "SilentAudio": { + "target": "com.amazonaws.mediaconnect#SilentAudio", + "traits": { + "smithy.api#documentation": "Detects periods of silence.", + "smithy.api#jsonName": "silentAudio" + } + } + }, + "traits": { + "smithy.api#documentation": "Specifies the configuration for audio stream metrics monitoring." + } + }, "com.amazonaws.mediaconnect#BadRequestException": { "type": "structure", "members": { @@ -1052,6 +1067,28 @@ "smithy.api#httpError": 400 } }, + "com.amazonaws.mediaconnect#BlackFrames": { + "type": "structure", + "members": { + "State": { + "target": "com.amazonaws.mediaconnect#State", + "traits": { + "smithy.api#documentation": "Indicates whether the BlackFrames metric is enabled or disabled.", + "smithy.api#jsonName": "state" + } + }, + "ThresholdSeconds": { + "target": "com.amazonaws.mediaconnect#__integer", + "traits": { + "smithy.api#documentation": "Specifies the number of consecutive seconds of black frames that triggers an event or alert.", + "smithy.api#jsonName": "thresholdSeconds" + } + } + }, + "traits": { + "smithy.api#documentation": "Configures settings for the BlackFrames metric." + } + }, "com.amazonaws.mediaconnect#Bridge": { "type": "structure", "members": { @@ -1546,6 +1583,23 @@ } } }, + "com.amazonaws.mediaconnect#ContentQualityAnalysisState": { + "type": "enum", + "members": { + "ENABLED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ENABLED" + } + }, + "DISABLED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DISABLED" + } + } + } + }, "com.amazonaws.mediaconnect#CreateBridge": { "type": "operation", "input": { @@ -3691,6 +3745,28 @@ "smithy.api#documentation": "The frame resolution used by the video stream." } }, + "com.amazonaws.mediaconnect#FrozenFrames": { + "type": "structure", + "members": { + "State": { + "target": "com.amazonaws.mediaconnect#State", + "traits": { + "smithy.api#documentation": "Indicates whether the FrozenFrames metric is enabled or disabled.", + "smithy.api#jsonName": "state" + } + }, + "ThresholdSeconds": { + "target": "com.amazonaws.mediaconnect#__integer", + "traits": { + "smithy.api#documentation": "Specifies the number of consecutive seconds of a static image that triggers an event or alert.", + "smithy.api#jsonName": "thresholdSeconds" + } + } + }, + "traits": { + "smithy.api#documentation": "Configures settings for the FrozenFrames metric." + } + }, "com.amazonaws.mediaconnect#Gateway": { "type": "structure", "members": { @@ -6600,6 +6676,27 @@ "smithy.api#documentation": "The state of thumbnail monitoring.", "smithy.api#jsonName": "thumbnailState" } + }, + "AudioMonitoringSettings": { + "target": "com.amazonaws.mediaconnect#__listOfAudioMonitoringSetting", + "traits": { + "smithy.api#documentation": "Contains the settings for audio stream metrics monitoring.", + "smithy.api#jsonName": "audioMonitoringSettings" + } + }, + "ContentQualityAnalysisState": { + "target": "com.amazonaws.mediaconnect#ContentQualityAnalysisState", + "traits": { + "smithy.api#documentation": "Indicates whether content quality analysis is enabled or disabled.", + "smithy.api#jsonName": "contentQualityAnalysisState" + } + }, + "VideoMonitoringSettings": { + "target": "com.amazonaws.mediaconnect#__listOfVideoMonitoringSetting", + "traits": { + "smithy.api#documentation": "Contains the settings for video stream metrics monitoring.", + "smithy.api#jsonName": "videoMonitoringSettings" + } } }, "traits": { @@ -8067,6 +8164,28 @@ "smithy.api#documentation": "The settings for the source of the flow." } }, + "com.amazonaws.mediaconnect#SilentAudio": { + "type": "structure", + "members": { + "State": { + "target": "com.amazonaws.mediaconnect#State", + "traits": { + "smithy.api#documentation": "Indicates whether the SilentAudio metric is enabled or disabled.", + "smithy.api#jsonName": "state" + } + }, + "ThresholdSeconds": { + "target": "com.amazonaws.mediaconnect#__integer", + "traits": { + "smithy.api#documentation": "Specifies the number of consecutive seconds of silence that triggers an event or alert.", + "smithy.api#jsonName": "thresholdSeconds" + } + } + }, + "traits": { + "smithy.api#documentation": "Configures settings for the SilentAudio metric." + } + }, "com.amazonaws.mediaconnect#Source": { "type": "structure", "members": { @@ -10404,6 +10523,28 @@ "smithy.api#documentation": "Update maintenance setting for a flow" } }, + "com.amazonaws.mediaconnect#VideoMonitoringSetting": { + "type": "structure", + "members": { + "BlackFrames": { + "target": "com.amazonaws.mediaconnect#BlackFrames", + "traits": { + "smithy.api#documentation": "Detects video frames that are black.", + "smithy.api#jsonName": "blackFrames" + } + }, + "FrozenFrames": { + "target": "com.amazonaws.mediaconnect#FrozenFrames", + "traits": { + "smithy.api#documentation": "Detects video frames that have not changed.", + "smithy.api#jsonName": "frozenFrames" + } + } + }, + "traits": { + "smithy.api#documentation": "Specifies the configuration for video stream metrics monitoring." + } + }, "com.amazonaws.mediaconnect#VpcInterface": { "type": "structure", "members": { @@ -10565,6 +10706,12 @@ "target": "com.amazonaws.mediaconnect#AddOutputRequest" } }, + "com.amazonaws.mediaconnect#__listOfAudioMonitoringSetting": { + "type": "list", + "member": { + "target": "com.amazonaws.mediaconnect#AudioMonitoringSetting" + } + }, "com.amazonaws.mediaconnect#__listOfBridgeOutput": { "type": "list", "member": { @@ -10727,6 +10874,12 @@ "target": "com.amazonaws.mediaconnect#TransportStreamProgram" } }, + "com.amazonaws.mediaconnect#__listOfVideoMonitoringSetting": { + "type": "list", + "member": { + "target": "com.amazonaws.mediaconnect#VideoMonitoringSetting" + } + }, "com.amazonaws.mediaconnect#__listOfVpcInterface": { "type": "list", "member": { diff --git a/models/mediaconvert.json b/models/mediaconvert.json index 9ca11269d6..94aeefd4e6 100644 --- a/models/mediaconvert.json +++ b/models/mediaconvert.json @@ -1415,7 +1415,7 @@ } }, "AudioSourceName": { - "target": "com.amazonaws.mediaconvert#__string", + "target": "com.amazonaws.mediaconvert#__stringMax2048", "traits": { "smithy.api#documentation": "Specifies which audio data to use from each input. In the simplest case, specify an \"Audio Selector\":#inputs-audio_selector by name based on its order within each input. For example if you specify \"Audio Selector 3\", then the third audio selector will be used from each input. If an input does not have an \"Audio Selector 3\", then the audio selector marked as \"default\" in that input will be used. If there is no audio selector marked as \"default\", silence will be inserted for the duration of that input. Alternatively, an \"Audio Selector Group\":#inputs-audio_selector_group name may be specified, with similar default/silence behavior. If no audio_source_name is specified, then \"Audio Selector 1\" will be chosen automatically.", "smithy.api#jsonName": "audioSourceName" @@ -2820,6 +2820,13 @@ "smithy.api#jsonName": "outlineSize" } }, + "RemoveRubyReserveAttributes": { + "target": "com.amazonaws.mediaconvert#RemoveRubyReserveAttributes", + "traits": { + "smithy.api#documentation": "Optionally remove any tts:rubyReserve attributes present in your input, that do not have a tts:ruby attribute in the same element, from your output. Use if your vertical Japanese output captions have alignment issues. To remove ruby reserve attributes when present: Choose Enabled. To not remove any ruby reserve attributes: Keep the default value, Disabled.", + "smithy.api#jsonName": "removeRubyReserveAttributes" + } + }, "ShadowColor": { "target": "com.amazonaws.mediaconvert#BurninSubtitleShadowColor", "traits": { @@ -7052,7 +7059,7 @@ } }, "traits": { - "smithy.api#documentation": "Applies only to 29.97 fps outputs. When this feature is enabled, the service will use drop-frame timecode on outputs. If it is not possible to use drop-frame timecode, the system will fall back to non-drop-frame. This setting is enabled by default when Timecode insertion is enabled." + "smithy.api#documentation": "Applies only to 29.97 fps outputs. When this feature is enabled, the service will use drop-frame timecode on outputs. If it is not possible to use drop-frame timecode, the system will fall back to non-drop-frame. This setting is enabled by default when Timecode insertion or Timecode track is enabled." } }, "com.amazonaws.mediaconvert#DvbNitSettings": { @@ -10456,6 +10463,13 @@ "smithy.api#documentation": "Inserts timecode for each frame as 4 bytes of an unregistered SEI message.", "smithy.api#jsonName": "unregisteredSeiTimecode" } + }, + "WriteMp4PackagingType": { + "target": "com.amazonaws.mediaconvert#H264WriteMp4PackagingType", + "traits": { + "smithy.api#documentation": "Specify how SPS and PPS NAL units are written in your output MP4 container, according to ISO/IEC 14496-15. If the location of these parameters doesn't matter in your workflow: Keep the default value, AVC1. MediaConvert writes SPS and PPS NAL units in the sample description ('stsd') box (but not into samples directly). To write SPS and PPS NAL units directly into samples (but not in the 'stsd' box): Choose AVC3. When you do, note that your output might not play properly with some downstream systems or players.", + "smithy.api#jsonName": "writeMp4PackagingType" + } } }, "traits": { @@ -10588,6 +10602,26 @@ "smithy.api#documentation": "Inserts timecode for each frame as 4 bytes of an unregistered SEI message." } }, + "com.amazonaws.mediaconvert#H264WriteMp4PackagingType": { + "type": "enum", + "members": { + "AVC1": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "AVC1" + } + }, + "AVC3": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "AVC3" + } + } + }, + "traits": { + "smithy.api#documentation": "Specify how SPS and PPS NAL units are written in your output MP4 container, according to ISO/IEC 14496-15. If the location of these parameters doesn't matter in your workflow: Keep the default value, AVC1. MediaConvert writes SPS and PPS NAL units in the sample description ('stsd') box (but not into samples directly). To write SPS and PPS NAL units directly into samples (but not in the 'stsd' box): Choose AVC3. When you do, note that your output might not play properly with some downstream systems or players." + } + }, "com.amazonaws.mediaconvert#H265AdaptiveQuantization": { "type": "enum", "members": { @@ -13046,7 +13080,7 @@ } }, "FileInput": { - "target": "com.amazonaws.mediaconvert#__stringPatternS3Https", + "target": "com.amazonaws.mediaconvert#__stringMax2048PatternS3Https", "traits": { "smithy.api#documentation": "Specify the source file for your transcoding job. You can use multiple inputs in a single job. The service concatenates these inputs, in the order that you specify them in the job, to create the outputs. If your input format is IMF, specify your input by providing the path to your CPL. For example, \"s3://bucket/vf/cpl.xml\". If the CPL is in an incomplete IMP, make sure to use *Supplemental IMPs* to specify any supplemental IMPs that contain assets referenced by the CPL.", "smithy.api#jsonName": "fileInput" @@ -14410,7 +14444,7 @@ } }, "CredentialsSecretName": { - "target": "com.amazonaws.mediaconvert#__stringMin1Max2048PatternArnAwsAwsUsGovAwsCnSecretsmanagerUsGovApCaCnEuSaCentralNorthSouthEastWestDD12SecretAZAZ09", + "target": "com.amazonaws.mediaconvert#__stringMin1Max2048PatternArnAZSecretsmanagerWD12SecretAZAZ09", "traits": { "smithy.api#documentation": "Provide the name of the AWS Secrets Manager secret where your Kantar credentials are stored. Note that your MediaConvert service role must provide access to this secret. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/granting-permissions-for-mediaconvert-to-access-secrets-manager-secret.html. For instructions on creating a secret, see https://docs.aws.amazon.com/secretsmanager/latest/userguide/tutorials_basic.html, in the AWS Secrets Manager User Guide.", "smithy.api#jsonName": "credentialsSecretName" @@ -20538,7 +20572,7 @@ } }, "Extension": { - "target": "com.amazonaws.mediaconvert#__string", + "target": "com.amazonaws.mediaconvert#__stringMax256", "traits": { "smithy.api#documentation": "Use Extension to specify the file extension for outputs in File output groups. If you do not specify a value, the service will use default extensions by container type as follows * MPEG-2 transport stream, m2ts * Quicktime, mov * MXF container, mxf * MPEG-4 container, mp4 * WebM container, webm * No Container, the service will use codec extensions (e.g. AAC, H265, H265, AC3)", "smithy.api#jsonName": "extension" @@ -20639,7 +20673,7 @@ } }, "Name": { - "target": "com.amazonaws.mediaconvert#__string", + "target": "com.amazonaws.mediaconvert#__stringMax2048", "traits": { "smithy.api#documentation": "Name of the output group", "smithy.api#jsonName": "name" @@ -21779,6 +21813,26 @@ "smithy.api#documentation": "Use Manual audio remixing to adjust audio levels for each audio channel in each output of your job. With audio remixing, you can output more or fewer audio channels than your input audio source provides." } }, + "com.amazonaws.mediaconvert#RemoveRubyReserveAttributes": { + "type": "enum", + "members": { + "DISABLED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DISABLED" + } + }, + "ENABLED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ENABLED" + } + } + }, + "traits": { + "smithy.api#documentation": "Optionally remove any tts:rubyReserve attributes present in your input, that do not have a tts:ruby attribute in the same element, from your output. Use if your vertical Japanese output captions have alignment issues. To remove ruby reserve attributes when present: Choose Enabled. To not remove any ruby reserve attributes: Keep the default value, Disabled." + } + }, "com.amazonaws.mediaconvert#RenewalType": { "type": "enum", "members": { @@ -23027,6 +23081,26 @@ "smithy.api#documentation": "Use Source to set how timecodes are handled within this job. To make sure that your video, audio, captions, and markers are synchronized and that time-based features, such as image inserter, work correctly, choose the Timecode source option that matches your assets. All timecodes are in a 24-hour format with frame number (HH:MM:SS:FF). * Embedded - Use the timecode that is in the input video. If no embedded timecode is in the source, the service will use Start at 0 instead. * Start at 0 - Set the timecode of the initial frame to 00:00:00:00. * Specified Start - Set the timecode of the initial frame to a value other than zero. You use Start timecode to provide this value." } }, + "com.amazonaws.mediaconvert#TimecodeTrack": { + "type": "enum", + "members": { + "DISABLED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DISABLED" + } + }, + "ENABLED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ENABLED" + } + } + }, + "traits": { + "smithy.api#documentation": "To include a timecode track in your MP4 output: Choose Enabled. MediaConvert writes the timecode track in the Null Media Header box (NMHD), without any timecode text formatting information. You can also specify dropframe or non-dropframe timecode under the Drop Frame Timecode setting. To not include a timecode track: Keep the default value, Disabled." + } + }, "com.amazonaws.mediaconvert#TimedMetadata": { "type": "enum", "members": { @@ -24262,7 +24336,7 @@ "DropFrameTimecode": { "target": "com.amazonaws.mediaconvert#DropFrameTimecode", "traits": { - "smithy.api#documentation": "Applies only to 29.97 fps outputs. When this feature is enabled, the service will use drop-frame timecode on outputs. If it is not possible to use drop-frame timecode, the system will fall back to non-drop-frame. This setting is enabled by default when Timecode insertion is enabled.", + "smithy.api#documentation": "Applies only to 29.97 fps outputs. When this feature is enabled, the service will use drop-frame timecode on outputs. If it is not possible to use drop-frame timecode, the system will fall back to non-drop-frame. This setting is enabled by default when Timecode insertion or Timecode track is enabled.", "smithy.api#jsonName": "dropFrameTimecode" } }, @@ -24315,6 +24389,13 @@ "smithy.api#jsonName": "timecodeInsertion" } }, + "TimecodeTrack": { + "target": "com.amazonaws.mediaconvert#TimecodeTrack", + "traits": { + "smithy.api#documentation": "To include a timecode track in your MP4 output: Choose Enabled. MediaConvert writes the timecode track in the Null Media Header box (NMHD), without any timecode text formatting information. You can also specify dropframe or non-dropframe timecode under the Drop Frame Timecode setting. To not include a timecode track: Keep the default value, Disabled.", + "smithy.api#jsonName": "timecodeTrack" + } + }, "VideoPreprocessors": { "target": "com.amazonaws.mediaconvert#VideoPreprocessor", "traits": { @@ -27478,6 +27559,34 @@ } } }, + "com.amazonaws.mediaconvert#__stringMax2048": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 2048 + } + } + }, + "com.amazonaws.mediaconvert#__stringMax2048PatternS3Https": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 2048 + }, + "smithy.api#pattern": "^s3://([^\\/]+\\/+)+((([^\\/]*)))|^https?://[^\\/].*[^&]$" + } + }, + "com.amazonaws.mediaconvert#__stringMax256": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 256 + } + } + }, "com.amazonaws.mediaconvert#__stringMin0": { "type": "string", "traits": { @@ -27586,14 +27695,14 @@ } } }, - "com.amazonaws.mediaconvert#__stringMin1Max2048PatternArnAwsAwsUsGovAwsCnSecretsmanagerUsGovApCaCnEuSaCentralNorthSouthEastWestDD12SecretAZAZ09": { + "com.amazonaws.mediaconvert#__stringMin1Max2048PatternArnAZSecretsmanagerWD12SecretAZAZ09": { "type": "string", "traits": { "smithy.api#length": { "min": 1, "max": 2048 }, - "smithy.api#pattern": "^(arn:(aws|aws-us-gov|aws-cn):secretsmanager:(us(-gov)?|ap|ca|cn|eu|sa)-(central|(north|south)?(east|west)?)-\\d:\\d{12}:secret:)?[a-zA-Z0-9_\\/_+=.@-]*$" + "smithy.api#pattern": "^(arn:[a-z-]+:secretsmanager:[\\w-]+:\\d{12}:secret:)?[a-zA-Z0-9_\\/_+=.@-]*$" } }, "com.amazonaws.mediaconvert#__stringMin1Max256": { diff --git a/models/medialive.json b/models/medialive.json index 4796ffa365..f0438c1b2b 100644 --- a/models/medialive.json +++ b/models/medialive.json @@ -3555,6 +3555,13 @@ "smithy.api#documentation": "Anywhere settings for this channel.", "smithy.api#jsonName": "anywhereSettings" } + }, + "ChannelEngineVersion": { + "target": "com.amazonaws.medialive#ChannelEngineVersionResponse", + "traits": { + "smithy.api#documentation": "Requested engine version for this channel.", + "smithy.api#jsonName": "channelEngineVersion" + } } }, "traits": { @@ -3596,6 +3603,43 @@ "smithy.api#documentation": "Placeholder documentation for ChannelEgressEndpoint" } }, + "com.amazonaws.medialive#ChannelEngineVersionRequest": { + "type": "structure", + "members": { + "Version": { + "target": "com.amazonaws.medialive#__string", + "traits": { + "smithy.api#documentation": "The build identifier of the engine version to use for this channel. Specify 'DEFAULT' to reset to the default version.", + "smithy.api#jsonName": "version" + } + } + }, + "traits": { + "smithy.api#documentation": "Placeholder documentation for ChannelEngineVersionRequest" + } + }, + "com.amazonaws.medialive#ChannelEngineVersionResponse": { + "type": "structure", + "members": { + "ExpirationDate": { + "target": "com.amazonaws.medialive#__timestampIso8601", + "traits": { + "smithy.api#documentation": "The UTC time when the version expires.", + "smithy.api#jsonName": "expirationDate" + } + }, + "Version": { + "target": "com.amazonaws.medialive#__string", + "traits": { + "smithy.api#documentation": "The build identifier for this version of the channel version.", + "smithy.api#jsonName": "version" + } + } + }, + "traits": { + "smithy.api#documentation": "Placeholder documentation for ChannelEngineVersionResponse" + } + }, "com.amazonaws.medialive#ChannelPipelineIdToRestart": { "type": "enum", "members": { @@ -3860,6 +3904,20 @@ "smithy.api#documentation": "AnywhereSettings settings for this channel.", "smithy.api#jsonName": "anywhereSettings" } + }, + "ChannelEngineVersion": { + "target": "com.amazonaws.medialive#ChannelEngineVersionResponse", + "traits": { + "smithy.api#documentation": "The engine version that you requested for this channel.", + "smithy.api#jsonName": "channelEngineVersion" + } + }, + "UsedChannelEngineVersions": { + "target": "com.amazonaws.medialive#__listOfChannelEngineVersionResponse", + "traits": { + "smithy.api#documentation": "The engine version that the running pipelines are using.", + "smithy.api#jsonName": "usedChannelEngineVersions" + } } }, "traits": { @@ -4432,6 +4490,26 @@ "smithy.api#documentation": "Used in CreateClusterSummary, DescribeClusterSummary, DescribeClusterResult, UpdateClusterResult." } }, + "com.amazonaws.medialive#CmafId3Behavior": { + "type": "enum", + "members": { + "DISABLED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DISABLED" + } + }, + "ENABLED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ENABLED" + } + } + }, + "traits": { + "smithy.api#documentation": "Cmaf Id3 Behavior" + } + }, "com.amazonaws.medialive#CmafIngestGroupSettings": { "type": "structure", "members": { @@ -4506,6 +4584,20 @@ "smithy.api#documentation": "Change the modifier that MediaLive automatically adds to the Streams() name for a SCTE 35 track. The default is \"scte\", which means the default name will be Streams(scte.cmfm). Any string you enter here will replace the \"scte\" string.\\nThe modifier can only contain: numbers, letters, plus (+), minus (-), underscore (_) and period (.) and has a maximum length of 100 characters.", "smithy.api#jsonName": "scte35NameModifier" } + }, + "Id3Behavior": { + "target": "com.amazonaws.medialive#CmafId3Behavior", + "traits": { + "smithy.api#documentation": "Set to ENABLED to enable ID3 metadata insertion. To include metadata, you configure other parameters in the output group, or you add an ID3 action to the channel schedule.", + "smithy.api#jsonName": "id3Behavior" + } + }, + "Id3NameModifier": { + "target": "com.amazonaws.medialive#__stringMax100", + "traits": { + "smithy.api#documentation": "Change the modifier that MediaLive automatically adds to the Streams() name that identifies an ID3 track. The default is \"id3\", which means the default name will be Streams(id3.cmfm). Any string you enter here will replace the \"id3\" string.\\nThe modifier can only contain: numbers, letters, plus (+), minus (-), underscore (_) and period (.) and has a maximum length of 100 characters.", + "smithy.api#jsonName": "id3NameModifier" + } } }, "traits": { @@ -5002,6 +5094,19 @@ "smithy.api#documentation": "The Elemental Anywhere settings for this channel.", "smithy.api#jsonName": "anywhereSettings" } + }, + "ChannelEngineVersion": { + "target": "com.amazonaws.medialive#ChannelEngineVersionRequest", + "traits": { + "smithy.api#documentation": "The desired engine version for this channel.", + "smithy.api#jsonName": "channelEngineVersion" + } + }, + "DryRun": { + "target": "com.amazonaws.medialive#__boolean", + "traits": { + "smithy.api#jsonName": "dryRun" + } } }, "traits": { @@ -7553,6 +7658,13 @@ "smithy.api#documentation": "Anywhere settings for this channel.", "smithy.api#jsonName": "anywhereSettings" } + }, + "ChannelEngineVersion": { + "target": "com.amazonaws.medialive#ChannelEngineVersionResponse", + "traits": { + "smithy.api#documentation": "Requested engine version for this channel.", + "smithy.api#jsonName": "channelEngineVersion" + } } }, "traits": { @@ -9615,6 +9727,13 @@ "smithy.api#documentation": "Anywhere settings for this channel.", "smithy.api#jsonName": "anywhereSettings" } + }, + "ChannelEngineVersion": { + "target": "com.amazonaws.medialive#ChannelEngineVersionResponse", + "traits": { + "smithy.api#documentation": "Requested engine version for this channel.", + "smithy.api#jsonName": "channelEngineVersion" + } } }, "traits": { @@ -18550,6 +18669,28 @@ "smithy.api#documentation": "When set to \"standard\", an I-Frame only playlist will be written out for each video output in the output group. This I-Frame only playlist will contain byte range offsets pointing to the I-frame(s) in each segment." } }, + "com.amazonaws.medialive#Id3SegmentTaggingScheduleActionSettings": { + "type": "structure", + "members": { + "Id3": { + "target": "com.amazonaws.medialive#__string", + "traits": { + "smithy.api#documentation": "Complete this parameter if you want to specify the entire ID3 metadata. Enter a base64 string that contains one or more fully formed ID3 tags, according to the ID3 specification: http://id3.org/id3v2.4.0-structure", + "smithy.api#jsonName": "id3" + } + }, + "Tag": { + "target": "com.amazonaws.medialive#__string", + "traits": { + "smithy.api#documentation": "Complete this parameter if you want to specify only the metadata, not the entire frame. MediaLive will insert the metadata in a TXXX frame. Enter the value as plain text. You can include standard MediaLive variable data such as the current segment number.", + "smithy.api#jsonName": "tag" + } + } + }, + "traits": { + "smithy.api#documentation": "Settings for the action to insert ID3 metadata in every segment, in applicable output groups." + } + }, "com.amazonaws.medialive#ImmediateModeScheduleActionStartSettings": { "type": "structure", "members": {}, @@ -22786,6 +22927,73 @@ "smithy.api#output": {} } }, + "com.amazonaws.medialive#ListVersions": { + "type": "operation", + "input": { + "target": "com.amazonaws.medialive#ListVersionsRequest" + }, + "output": { + "target": "com.amazonaws.medialive#ListVersionsResponse" + }, + "errors": [ + { + "target": "com.amazonaws.medialive#BadGatewayException" + }, + { + "target": "com.amazonaws.medialive#BadRequestException" + }, + { + "target": "com.amazonaws.medialive#ConflictException" + }, + { + "target": "com.amazonaws.medialive#ForbiddenException" + }, + { + "target": "com.amazonaws.medialive#GatewayTimeoutException" + }, + { + "target": "com.amazonaws.medialive#InternalServerErrorException" + }, + { + "target": "com.amazonaws.medialive#NotFoundException" + }, + { + "target": "com.amazonaws.medialive#TooManyRequestsException" + } + ], + "traits": { + "smithy.api#documentation": "Retrieves an array of all the encoder engine versions that are available in this AWS account.", + "smithy.api#http": { + "method": "GET", + "uri": "/prod/versions", + "code": 200 + } + } + }, + "com.amazonaws.medialive#ListVersionsRequest": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#documentation": "Placeholder documentation for ListVersionsRequest", + "smithy.api#input": {} + } + }, + "com.amazonaws.medialive#ListVersionsResponse": { + "type": "structure", + "members": { + "Versions": { + "target": "com.amazonaws.medialive#__listOfChannelEngineVersionResponse", + "traits": { + "smithy.api#documentation": "List of engine versions that are available for this AWS account.", + "smithy.api#jsonName": "versions" + } + } + }, + "traits": { + "smithy.api#documentation": "Placeholder documentation for ListVersionsResponse", + "smithy.api#output": {} + } + }, "com.amazonaws.medialive#LogLevel": { "type": "enum", "members": { @@ -24237,6 +24445,9 @@ { "target": "com.amazonaws.medialive#ListTagsForResource" }, + { + "target": "com.amazonaws.medialive#ListVersions" + }, { "target": "com.amazonaws.medialive#PurchaseOffering" }, @@ -27971,6 +28182,13 @@ "smithy.api#documentation": "Pipeline ID", "smithy.api#jsonName": "pipelineId" } + }, + "ChannelEngineVersion": { + "target": "com.amazonaws.medialive#ChannelEngineVersionResponse", + "traits": { + "smithy.api#documentation": "Current engine version of the encoder for this pipeline.", + "smithy.api#jsonName": "channelEngineVersion" + } } }, "traits": { @@ -29089,6 +29307,13 @@ "smithy.api#documentation": "Anywhere settings for this channel.", "smithy.api#jsonName": "anywhereSettings" } + }, + "ChannelEngineVersion": { + "target": "com.amazonaws.medialive#ChannelEngineVersionResponse", + "traits": { + "smithy.api#documentation": "Requested engine version for this channel.", + "smithy.api#jsonName": "channelEngineVersion" + } } }, "traits": { @@ -29525,6 +29750,20 @@ "smithy.api#documentation": "Action to deactivate a static image overlay in one or more specified outputs", "smithy.api#jsonName": "staticImageOutputDeactivateSettings" } + }, + "Id3SegmentTaggingSettings": { + "target": "com.amazonaws.medialive#Id3SegmentTaggingScheduleActionSettings", + "traits": { + "smithy.api#documentation": "Action to insert ID3 metadata in every segment, in applicable output groups", + "smithy.api#jsonName": "id3SegmentTaggingSettings" + } + }, + "TimedMetadataSettings": { + "target": "com.amazonaws.medialive#TimedMetadataScheduleActionSettings", + "traits": { + "smithy.api#documentation": "Action to insert ID3 metadata once, in applicable output groups", + "smithy.api#jsonName": "timedMetadataSettings" + } } }, "traits": { @@ -31135,6 +31374,13 @@ "smithy.api#documentation": "Anywhere settings for this channel.", "smithy.api#jsonName": "anywhereSettings" } + }, + "ChannelEngineVersion": { + "target": "com.amazonaws.medialive#ChannelEngineVersionResponse", + "traits": { + "smithy.api#documentation": "Requested engine version for this channel.", + "smithy.api#jsonName": "channelEngineVersion" + } } }, "traits": { @@ -32451,6 +32697,13 @@ "smithy.api#documentation": "Anywhere settings for this channel.", "smithy.api#jsonName": "anywhereSettings" } + }, + "ChannelEngineVersion": { + "target": "com.amazonaws.medialive#ChannelEngineVersionResponse", + "traits": { + "smithy.api#documentation": "Requested engine version for this channel.", + "smithy.api#jsonName": "channelEngineVersion" + } } }, "traits": { @@ -33215,6 +33468,23 @@ "smithy.api#documentation": "Timecode Config Source" } }, + "com.amazonaws.medialive#TimedMetadataScheduleActionSettings": { + "type": "structure", + "members": { + "Id3": { + "target": "com.amazonaws.medialive#__string", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "Enter a base64 string that contains one or more fully formed ID3 tags.See the ID3 specification: http://id3.org/id3v2.4.0-structure", + "smithy.api#jsonName": "id3", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "Settings for the action to insert ID3 metadata (as a one-time action) in applicable output groups." + } + }, "com.amazonaws.medialive#TooManyRequestsException": { "type": "structure", "members": { @@ -33942,6 +34212,19 @@ "smithy.api#documentation": "An optional Amazon Resource Name (ARN) of the role to assume when running the Channel. If you do not specify this on an update call but the role was previously set that role will be removed.", "smithy.api#jsonName": "roleArn" } + }, + "ChannelEngineVersion": { + "target": "com.amazonaws.medialive#ChannelEngineVersionRequest", + "traits": { + "smithy.api#documentation": "Channel engine version for this channel", + "smithy.api#jsonName": "channelEngineVersion" + } + }, + "DryRun": { + "target": "com.amazonaws.medialive#__boolean", + "traits": { + "smithy.api#jsonName": "dryRun" + } } }, "traits": { @@ -37169,6 +37452,15 @@ "smithy.api#documentation": "Placeholder documentation for __listOfChannelEgressEndpoint" } }, + "com.amazonaws.medialive#__listOfChannelEngineVersionResponse": { + "type": "list", + "member": { + "target": "com.amazonaws.medialive#ChannelEngineVersionResponse" + }, + "traits": { + "smithy.api#documentation": "Placeholder documentation for __listOfChannelEngineVersionResponse" + } + }, "com.amazonaws.medialive#__listOfChannelPipelineIdToRestart": { "type": "list", "member": { diff --git a/models/mwaa.json b/models/mwaa.json index 161364d670..99d50643a8 100644 --- a/models/mwaa.json +++ b/models/mwaa.json @@ -1136,7 +1136,7 @@ "AirflowVersion": { "target": "com.amazonaws.mwaa#AirflowVersion", "traits": { - "smithy.api#documentation": "

The Apache Airflow version for your environment. If no value is specified, it defaults to the latest version.\n For more information, see Apache Airflow versions on Amazon Managed Workflows for Apache Airflow (Amazon MWAA).

\n

Valid values: 1.10.12, 2.0.2, 2.2.2,\n 2.4.3, 2.5.1, 2.6.3, 2.7.2,\n 2.8.1, 2.9.2, and 2.10.1.

" + "smithy.api#documentation": "

The Apache Airflow version for your environment. If no value is specified, it defaults to the latest version.\n For more information, see Apache Airflow versions on Amazon Managed Workflows for Apache Airflow (Amazon MWAA).

\n

Valid values: 1.10.12, 2.0.2, 2.2.2,\n 2.4.3, 2.5.1, 2.6.3, 2.7.2,\n 2.8.1, 2.9.2, 2.10.1, and 2.10.3.

" } }, "LoggingConfiguration": { @@ -1443,7 +1443,7 @@ "AirflowVersion": { "target": "com.amazonaws.mwaa#AirflowVersion", "traits": { - "smithy.api#documentation": "

The Apache Airflow version on your environment.

\n

Valid values: 1.10.12, 2.0.2, 2.2.2,\n 2.4.3, 2.5.1, 2.6.3, 2.7.2,\n 2.8.1, 2.9.2, and 2.10.1.

" + "smithy.api#documentation": "

The Apache Airflow version on your environment.

\n

Valid values: 1.10.12, 2.0.2, 2.2.2,\n 2.4.3, 2.5.1, 2.6.3, 2.7.2,\n 2.8.1, 2.9.2, 2.10.1, and 2.10.3.

" } }, "SourceBucketArn": { @@ -2989,7 +2989,7 @@ "AirflowVersion": { "target": "com.amazonaws.mwaa#AirflowVersion", "traits": { - "smithy.api#documentation": "

The Apache Airflow version for your environment. To upgrade your environment, specify a newer version of Apache Airflow supported by Amazon MWAA.

\n

Before you upgrade an environment, make sure your requirements, DAGs, plugins, and other resources used in your workflows are compatible with the new Apache Airflow version. For more information about updating\n your resources, see Upgrading an Amazon MWAA environment.

\n

Valid values: 1.10.12, 2.0.2, 2.2.2,\n 2.4.3, 2.5.1, 2.6.3, 2.7.2,\n 2.8.1, 2.9.2, and 2.10.1.

" + "smithy.api#documentation": "

The Apache Airflow version for your environment. To upgrade your environment, specify a newer version of Apache Airflow supported by Amazon MWAA.

\n

Before you upgrade an environment, make sure your requirements, DAGs, plugins, and other resources used in your workflows are compatible with the new Apache Airflow version. For more information about updating\n your resources, see Upgrading an Amazon MWAA environment.

\n

Valid values: 1.10.12, 2.0.2, 2.2.2,\n 2.4.3, 2.5.1, 2.6.3, 2.7.2,\n 2.8.1, 2.9.2, 2.10.1, and 2.10.3.

" } }, "SourceBucketArn": { diff --git a/models/network-firewall.json b/models/network-firewall.json index 7d936974e9..05d71ac87a 100644 --- a/models/network-firewall.json +++ b/models/network-firewall.json @@ -3374,7 +3374,7 @@ "name": "network-firewall" }, "aws.protocols#awsJson1_0": {}, - "smithy.api#documentation": "

This is the API Reference for Network Firewall. This guide is for developers who need\n detailed information about the Network Firewall API actions, data types, and errors.

\n
    \n
  • \n

    The REST API requires you to handle connection details, such as calculating\n signatures, handling request retries, and error handling. For general information\n about using the Amazon Web Services REST APIs, see Amazon Web Services APIs.

    \n

    To access Network Firewall using the REST API endpoint:\n https://network-firewall..amazonaws.com \n

    \n
  • \n
  • \n

    Alternatively, you can use one of the Amazon Web Services SDKs to access an API that's tailored to\n the programming language or platform that you're using. For more information, see\n Amazon Web Services SDKs.

    \n
  • \n
  • \n

    For descriptions of Network Firewall features, including and step-by-step\n instructions on how to use them through the Network Firewall console, see the Network Firewall Developer\n Guide.

    \n
  • \n
\n

Network Firewall is a stateful, managed, network firewall and intrusion detection and\n prevention service for Amazon Virtual Private Cloud (Amazon VPC). With Network Firewall, you can filter traffic at the\n perimeter of your VPC. This includes filtering traffic going to and coming from an internet\n gateway, NAT gateway, or over VPN or Direct Connect. Network Firewall uses rules that are compatible\n with Suricata, a free, open source network analysis and threat detection engine.

\n

You can use Network Firewall to monitor and protect your VPC traffic in a number of ways.\n The following are just a few examples:

\n
    \n
  • \n

    Allow domains or IP addresses for known Amazon Web Services service endpoints, such as Amazon S3, and\n block all other forms of traffic.

    \n
  • \n
  • \n

    Use custom lists of known bad domains to limit the types of domain names that your\n applications can access.

    \n
  • \n
  • \n

    Perform deep packet inspection on traffic entering or leaving your VPC.

    \n
  • \n
  • \n

    Use stateful protocol detection to filter protocols like HTTPS, regardless of the\n port used.

    \n
  • \n
\n

To enable Network Firewall for your VPCs, you perform steps in both Amazon VPC and in\n Network Firewall. For information about using Amazon VPC, see Amazon VPC User Guide.

\n

To start using Network Firewall, do the following:

\n
    \n
  1. \n

    (Optional) If you don't already have a VPC that you want to protect, create it in\n Amazon VPC.

    \n
  2. \n
  3. \n

    In Amazon VPC, in each Availability Zone where you want to have a firewall endpoint, create a\n subnet for the sole use of Network Firewall.

    \n
  4. \n
  5. \n

    In Network Firewall, create stateless and stateful rule groups,\n to define the components of the network traffic filtering behavior that you want your firewall to have.

    \n
  6. \n
  7. \n

    In Network Firewall, create a firewall policy that uses your rule groups and\n specifies additional default traffic filtering behavior.

    \n
  8. \n
  9. \n

    In Network Firewall, create a firewall and specify your new firewall policy and\n VPC subnets. Network Firewall creates a firewall endpoint in each subnet that you\n specify, with the behavior that's defined in the firewall policy.

    \n
  10. \n
  11. \n

    In Amazon VPC, use ingress routing enhancements to route traffic through the new firewall\n endpoints.

    \n
  12. \n
", + "smithy.api#documentation": "

This is the API Reference for Network Firewall. This guide is for developers who need\n detailed information about the Network Firewall API actions, data types, and errors.

\n

The REST API requires you to handle connection details, such as calculating\n signatures, handling request retries, and error handling. For general information\n about using the Amazon Web Services REST APIs, see Amazon Web Services APIs.

\n

To view the complete list of Amazon Web Services Regions where Network Firewall is available, see\n Service\n endpoints and quotas in the Amazon Web Services General\n Reference.\n

\n

To access Network Firewall using the IPv4 REST API endpoint:\n https://network-firewall..amazonaws.com \n

\n

To access Network Firewall using the Dualstack (IPv4 and IPv6) REST API endpoint:\n https://network-firewall..aws.api \n

\n

Alternatively, you can use one of the Amazon Web Services SDKs to access an API that's tailored to\n the programming language or platform that you're using. For more information, see\n Amazon Web Services SDKs.

\n

For descriptions of Network Firewall features, including and step-by-step\n instructions on how to use them through the Network Firewall console, see the Network Firewall Developer\n Guide.

\n

Network Firewall is a stateful, managed, network firewall and intrusion detection and\n prevention service for Amazon Virtual Private Cloud (Amazon VPC). With Network Firewall, you can filter traffic at the\n perimeter of your VPC. This includes filtering traffic going to and coming from an internet\n gateway, NAT gateway, or over VPN or Direct Connect. Network Firewall uses rules that are compatible\n with Suricata, a free, open source network analysis and threat detection engine.

\n

You can use Network Firewall to monitor and protect your VPC traffic in a number of ways.\n The following are just a few examples:

\n
    \n
  • \n

    Allow domains or IP addresses for known Amazon Web Services service endpoints, such as Amazon S3, and\n block all other forms of traffic.

    \n
  • \n
  • \n

    Use custom lists of known bad domains to limit the types of domain names that your\n applications can access.

    \n
  • \n
  • \n

    Perform deep packet inspection on traffic entering or leaving your VPC.

    \n
  • \n
  • \n

    Use stateful protocol detection to filter protocols like HTTPS, regardless of the\n port used.

    \n
  • \n
\n

To enable Network Firewall for your VPCs, you perform steps in both Amazon VPC and in\n Network Firewall. For information about using Amazon VPC, see Amazon VPC User Guide.

\n

To start using Network Firewall, do the following:

\n
    \n
  1. \n

    (Optional) If you don't already have a VPC that you want to protect, create it in\n Amazon VPC.

    \n
  2. \n
  3. \n

    In Amazon VPC, in each Availability Zone where you want to have a firewall endpoint, create a\n subnet for the sole use of Network Firewall.

    \n
  4. \n
  5. \n

    In Network Firewall, create stateless and stateful rule groups,\n to define the components of the network traffic filtering behavior that you want your firewall to have.

    \n
  6. \n
  7. \n

    In Network Firewall, create a firewall policy that uses your rule groups and\n specifies additional default traffic filtering behavior.

    \n
  8. \n
  9. \n

    In Network Firewall, create a firewall and specify your new firewall policy and\n VPC subnets. Network Firewall creates a firewall endpoint in each subnet that you\n specify, with the behavior that's defined in the firewall policy.

    \n
  10. \n
  11. \n

    In Amazon VPC, use ingress routing enhancements to route traffic through the new firewall\n endpoints.

    \n
  12. \n
", "smithy.api#title": "AWS Network Firewall", "smithy.rules#endpointRuleSet": { "version": "1.0", diff --git a/models/notifications.json b/models/notifications.json index 5614b4b6ab..9857e2e027 100644 --- a/models/notifications.json +++ b/models/notifications.json @@ -17,12 +17,82 @@ "smithy.api#httpError": 403 } }, + "com.amazonaws.notifications#AccessStatus": { + "type": "enum", + "members": { + "ENABLED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ENABLED" + } + }, + "DISABLED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DISABLED" + } + }, + "PENDING": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "PENDING" + } + } + } + }, + "com.amazonaws.notifications#AccountContactType": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "name": "ACCOUNT_PRIMARY", + "value": "ACCOUNT_PRIMARY", + "documentation": " Primary Contact managed by AWS Account Management Service.\n" + }, + { + "name": "ACCOUNT_ALTERNATE_BILLING", + "value": "ACCOUNT_ALTERNATE_BILLING", + "documentation": " Alternate Billing Contact managed by AWS Account Management Service.\n" + }, + { + "name": "ACCOUNT_ALTERNATE_OPERATIONS", + "value": "ACCOUNT_ALTERNATE_OPERATIONS", + "documentation": " Alternate Operations Contact managed by AWS Account Management Service.\n" + }, + { + "name": "ACCOUNT_ALTERNATE_SECURITY", + "value": "ACCOUNT_ALTERNATE_SECURITY", + "documentation": " Alternate Security Contact managed by AWS Account Management Service.\n" + } + ] + } + }, "com.amazonaws.notifications#AccountId": { "type": "string", "traits": { "smithy.api#pattern": "^\\d{12}$" } }, + "com.amazonaws.notifications#AggregatedNotificationRegions": { + "type": "list", + "member": { + "target": "com.amazonaws.notifications#Region" + } + }, + "com.amazonaws.notifications#AggregationDetail": { + "type": "structure", + "members": { + "summarizationDimensions": { + "target": "com.amazonaws.notifications#SummarizationDimensionDetails", + "traits": { + "smithy.api#documentation": "

Properties used to summarize aggregated events.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Provides detailed information about the dimensions used for aggregation.

" + } + }, "com.amazonaws.notifications#AggregationDuration": { "type": "string", "traits": { @@ -64,6 +134,82 @@ ] } }, + "com.amazonaws.notifications#AggregationKey": { + "type": "structure", + "members": { + "name": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

Indicates the type of aggregation key.

", + "smithy.api#required": {} + } + }, + "value": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

Indicates the value associated with the aggregation key name.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Key-value collection that indicate how notifications are grouped.

" + } + }, + "com.amazonaws.notifications#AggregationKeys": { + "type": "list", + "member": { + "target": "com.amazonaws.notifications#AggregationKey" + } + }, + "com.amazonaws.notifications#AggregationSummary": { + "type": "structure", + "members": { + "eventCount": { + "target": "smithy.api#Integer", + "traits": { + "smithy.api#documentation": "

Indicates the number of events associated with the aggregation key.

", + "smithy.api#required": {} + } + }, + "aggregatedBy": { + "target": "com.amazonaws.notifications#AggregationKeys", + "traits": { + "smithy.api#documentation": "

Indicates the criteria or rules by which notifications have been grouped together.

", + "smithy.api#required": {} + } + }, + "aggregatedAccounts": { + "target": "com.amazonaws.notifications#SummarizationDimensionOverview", + "traits": { + "smithy.api#documentation": "

Indicates the Amazon Web Services accounts in the aggregation key.

", + "smithy.api#required": {} + } + }, + "aggregatedRegions": { + "target": "com.amazonaws.notifications#SummarizationDimensionOverview", + "traits": { + "smithy.api#documentation": "

Indicates the Amazon Web Services Regions in the aggregation key.

", + "smithy.api#required": {} + } + }, + "aggregatedOrganizationalUnits": { + "target": "com.amazonaws.notifications#SummarizationDimensionOverview", + "traits": { + "smithy.api#documentation": "

Indicates the collection of organizational units that are involved in the aggregation key.

" + } + }, + "additionalSummarizationDimensions": { + "target": "com.amazonaws.notifications#SummarizationDimensionOverviews", + "traits": { + "smithy.api#documentation": "

List of additional dimensions used to group and summarize data.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Provides additional information about the aggregation key.

" + } + }, "com.amazonaws.notifications#Arn": { "type": "string", "traits": { @@ -107,7 +253,7 @@ "aws.iam#iamAction": { "documentation": "Grants permission to associate a new Channel with a particular NotificationConfiguration" }, - "smithy.api#documentation": "

Associates a delivery Channel with a particular NotificationConfiguration. Supported Channels include AWS Chatbot,\nthe AWS Console Mobile Application, and emails (notifications-contacts).

", + "smithy.api#documentation": "

Associates a delivery Channel with a particular NotificationConfiguration. Supported Channels include Chatbot,\nthe Console Mobile Application, and emails (notifications-contacts).

", "smithy.api#http": { "code": 201, "method": "POST", @@ -122,7 +268,7 @@ "arn": { "target": "com.amazonaws.notifications#ChannelArn", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the Channel to associate with the NotificationConfiguration.

\n

Supported ARNs include AWS Chatbot, the Console Mobile Application, and notifications-contacts.

", + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the Channel to associate with the NotificationConfiguration.

\n

Supported ARNs include Chatbot, the Console Mobile Application, and notifications-contacts.

", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -130,7 +276,7 @@ "notificationConfigurationArn": { "target": "com.amazonaws.notifications#NotificationConfigurationArn", "traits": { - "smithy.api#documentation": "

The ARN of the NotificationConfiguration to associate with the Channel.

", + "smithy.api#documentation": "

The ARN of the NotificationConfiguration to associate with the\n Channel.

", "smithy.api#required": {} } } @@ -146,6 +292,156 @@ "smithy.api#output": {} } }, + "com.amazonaws.notifications#AssociateManagedNotificationAccountContact": { + "type": "operation", + "input": { + "target": "com.amazonaws.notifications#AssociateManagedNotificationAccountContactRequest" + }, + "output": { + "target": "com.amazonaws.notifications#AssociateManagedNotificationAccountContactResponse" + }, + "errors": [ + { + "target": "com.amazonaws.notifications#AccessDeniedException" + }, + { + "target": "com.amazonaws.notifications#ConflictException" + }, + { + "target": "com.amazonaws.notifications#InternalServerException" + }, + { + "target": "com.amazonaws.notifications#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.notifications#ServiceQuotaExceededException" + }, + { + "target": "com.amazonaws.notifications#ThrottlingException" + }, + { + "target": "com.amazonaws.notifications#ValidationException" + } + ], + "traits": { + "aws.api#controlPlane": {}, + "aws.iam#iamAction": { + "documentation": "Grants permission to associate an Account contact\n to a particular Managed Notification Configuration" + }, + "smithy.api#documentation": "

Associates an Account Contact with a particular ManagedNotificationConfiguration.

", + "smithy.api#http": { + "code": 201, + "method": "PUT", + "uri": "/contacts/associate-managed-notification/{contactIdentifier}" + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.notifications#AssociateManagedNotificationAccountContactRequest": { + "type": "structure", + "members": { + "contactIdentifier": { + "target": "com.amazonaws.notifications#AccountContactType", + "traits": { + "smithy.api#documentation": "

A unique value of an Account Contact Type to associate with the ManagedNotificationConfiguration.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "managedNotificationConfigurationArn": { + "target": "com.amazonaws.notifications#ManagedNotificationConfigurationOsArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the ManagedNotificationConfiguration to associate with the Account Contact.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.notifications#AssociateManagedNotificationAccountContactResponse": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.notifications#AssociateManagedNotificationAdditionalChannel": { + "type": "operation", + "input": { + "target": "com.amazonaws.notifications#AssociateManagedNotificationAdditionalChannelRequest" + }, + "output": { + "target": "com.amazonaws.notifications#AssociateManagedNotificationAdditionalChannelResponse" + }, + "errors": [ + { + "target": "com.amazonaws.notifications#AccessDeniedException" + }, + { + "target": "com.amazonaws.notifications#ConflictException" + }, + { + "target": "com.amazonaws.notifications#InternalServerException" + }, + { + "target": "com.amazonaws.notifications#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.notifications#ServiceQuotaExceededException" + }, + { + "target": "com.amazonaws.notifications#ThrottlingException" + }, + { + "target": "com.amazonaws.notifications#ValidationException" + } + ], + "traits": { + "aws.api#controlPlane": {}, + "aws.iam#iamAction": { + "documentation": "Grants permission to associate a Channel\n to a particular Managed Notification Configuration" + }, + "smithy.api#documentation": "

Associates an additional Channel with a particular ManagedNotificationConfiguration.

\n

Supported Channels include Chatbot, the Console Mobile Application, and emails (notifications-contacts).

", + "smithy.api#http": { + "code": 201, + "method": "PUT", + "uri": "/channels/associate-managed-notification/{channelArn}" + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.notifications#AssociateManagedNotificationAdditionalChannelRequest": { + "type": "structure", + "members": { + "channelArn": { + "target": "com.amazonaws.notifications#ChannelArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the Channel to associate with the ManagedNotificationConfiguration.

\n

Supported ARNs include Chatbot, the Console Mobile Application, and email (notifications-contacts).

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "managedNotificationConfigurationArn": { + "target": "com.amazonaws.notifications#ManagedNotificationConfigurationOsArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the ManagedNotificationConfiguration to associate with the additional Channel.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.notifications#AssociateManagedNotificationAdditionalChannelResponse": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.notifications#Channel": { "type": "resource", "identifiers": { @@ -176,6 +472,56 @@ "smithy.api#pattern": "^arn:aws:(chatbot|consoleapp|notifications-contacts):[a-zA-Z0-9-]*:[0-9]{12}:[a-zA-Z0-9-_.@]+/[a-zA-Z0-9/_.@:-]+$" } }, + "com.amazonaws.notifications#ChannelAssociationOverrideOption": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "name": "ENABLED", + "value": "ENABLED", + "documentation": " AWS User Notification service users can associate or disassociate a Channel with a notification configuration.\n" + }, + { + "name": "DISABLED", + "value": "DISABLED", + "documentation": " AWS User Notification service users can not associate or disassociate a Channel with a notification configuration.\n" + } + ] + } + }, + "com.amazonaws.notifications#ChannelIdentifier": { + "type": "string", + "traits": { + "smithy.api#pattern": "^ACCOUNT_PRIMARY|ACCOUNT_ALTERNATE_BILLING|ACCOUNT_ALTERNATE_OPERATIONS|ACCOUNT_ALTERNATE_SECURITY|arn:aws:(chatbot|consoleapp|notifications-contacts):[a-zA-Z0-9-]*:[0-9]{12}:[a-zA-Z0-9-_.@]+/[a-zA-Z0-9/_.@:-]+$" + } + }, + "com.amazonaws.notifications#ChannelType": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "name": "MOBILE", + "value": "MOBILE", + "documentation": " AWS Console Mobile App sends notifications to mobile devices. Link:https://aws.amazon.com/console/mobile/\n" + }, + { + "name": "CHATBOT", + "value": "CHATBOT", + "documentation": " Chatbot sends notifications to group platforms, like Slack or Chime. Link:https://aws.amazon.com/chatbot/\n" + }, + { + "name": "EMAIL", + "value": "EMAIL", + "documentation": " Email sends notifications to email addresses.\n" + }, + { + "name": "ACCOUNT_CONTACT", + "value": "ACCOUNT_CONTACT", + "documentation": " User Notification Service sends notifications to Account Managed contacts.\n" + } + ] + } + }, "com.amazonaws.notifications#Channels": { "type": "list", "member": { @@ -241,7 +587,7 @@ "aws.iam#iamAction": { "documentation": "Grants permission to create a new EventRule, associating it with a NotificationConfiguration" }, - "smithy.api#documentation": "

Creates an EventRule that is associated with a specified Notification Configuration.

", + "smithy.api#documentation": "

Creates an \n EventRule\n that\n is associated with a specified NotificationConfiguration.

", "smithy.api#http": { "code": 201, "method": "POST", @@ -256,34 +602,34 @@ "notificationConfigurationArn": { "target": "com.amazonaws.notifications#NotificationConfigurationArn", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the NotificationConfiguration associated with this EventRule.

", + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the NotificationConfiguration associated\n with this EventRule.

", "smithy.api#required": {} } }, "source": { "target": "com.amazonaws.notifications#Source", "traits": { - "smithy.api#documentation": "

The matched event source.

\n

Must match one of the valid EventBridge sources. Only AWS service sourced events are supported. For example, aws.ec2 and aws.cloudwatch. For more information, see Event delivery from AWS services in the Amazon EventBridge User Guide.

", + "smithy.api#documentation": "

The matched event source.

\n

Must match one of the valid EventBridge sources. Only Amazon Web Services service sourced events are supported. For example, aws.ec2 and aws.cloudwatch. For more information, see Event delivery from Amazon Web Services services in the Amazon EventBridge User Guide.

", "smithy.api#required": {} } }, "eventType": { "target": "com.amazonaws.notifications#EventType", "traits": { - "smithy.api#documentation": "

The event type to match.

\n

Must match one of the valid Amazon EventBridge event types. For example, EC2 Instance State-change Notification and AWS CloudWatch Alarm State Change. For more information, see Event delivery from AWS services in the Amazon EventBridge User Guide.

", + "smithy.api#documentation": "

The event type to match.

\n

Must match one of the valid Amazon EventBridge event types. For example, EC2 Instance State-change Notification and Amazon CloudWatch Alarm State Change. For more information, see Event delivery from Amazon Web Services services in the Amazon EventBridge User Guide.

", "smithy.api#required": {} } }, "eventPattern": { "target": "com.amazonaws.notifications#EventRuleEventPattern", "traits": { - "smithy.api#documentation": "

An additional event pattern used to further filter the events this EventRule receives.

\n

For more information, see Amazon EventBridge event patterns in the Amazon EventBridge User Guide.\n

" + "smithy.api#documentation": "

An additional event pattern used to further filter the events this\n EventRule receives.

\n

For more information, see Amazon EventBridge event patterns in the Amazon EventBridge User Guide.\n

" } }, "regions": { "target": "com.amazonaws.notifications#Regions", "traits": { - "smithy.api#documentation": "

A list of AWS Regions that send events to this EventRule.

", + "smithy.api#documentation": "

A list of Amazon Web Services Regions that send events to this\n EventRule.

", "smithy.api#required": {} } } @@ -305,14 +651,14 @@ "notificationConfigurationArn": { "target": "com.amazonaws.notifications#NotificationConfigurationArn", "traits": { - "smithy.api#documentation": "

The ARN of a NotificationConfiguration.

", + "smithy.api#documentation": "

The ARN of a NotificationConfiguration.

", "smithy.api#required": {} } }, "statusSummaryByRegion": { "target": "com.amazonaws.notifications#StatusSummaryByRegion", "traits": { - "smithy.api#documentation": "

A list of an EventRule's status by Region. Regions are mapped to EventRuleStatusSummary.

", + "smithy.api#documentation": "

A list of an EventRule's status by Region. Regions are mapped to EventRuleStatusSummary.

", "smithy.api#required": {} } } @@ -354,7 +700,7 @@ "aws.iam#iamAction": { "documentation": "Grants permission to create a NotificationConfiguration" }, - "smithy.api#documentation": "

Creates a new NotificationConfiguration.

", + "smithy.api#documentation": "

Creates a new NotificationConfiguration.

", "smithy.api#http": { "code": 201, "method": "POST", @@ -369,21 +715,21 @@ "name": { "target": "com.amazonaws.notifications#NotificationConfigurationName", "traits": { - "smithy.api#documentation": "

The name of the NotificationConfiguration. Supports RFC 3986's unreserved characters.

", + "smithy.api#documentation": "

The name of the NotificationConfiguration. Supports RFC 3986's unreserved characters.

", "smithy.api#required": {} } }, "description": { "target": "com.amazonaws.notifications#NotificationConfigurationDescription", "traits": { - "smithy.api#documentation": "

The description of the NotificationConfiguration.

", + "smithy.api#documentation": "

The description of the NotificationConfiguration.

", "smithy.api#required": {} } }, "aggregationDuration": { "target": "com.amazonaws.notifications#AggregationDuration", "traits": { - "smithy.api#documentation": "

The aggregation preference of the NotificationConfiguration.

\n
    \n
  • \n

    Values:

    \n
      \n
    • \n

      \n LONG\n

      \n
        \n
      • \n

        Aggregate notifications for long periods of time (12 hours).

        \n
      • \n
      \n
    • \n
    • \n

      \n SHORT\n

      \n
        \n
      • \n

        Aggregate notifications for short periods of time (5 minutes).

        \n
      • \n
      \n
    • \n
    • \n

      \n NONE\n

      \n
        \n
      • \n

        Don't aggregate notifications.

        \n

        No delay in delivery.

        \n
      • \n
      \n
    • \n
    \n
  • \n
" + "smithy.api#documentation": "

The aggregation preference of the NotificationConfiguration.

\n
    \n
  • \n

    Values:

    \n
      \n
    • \n

      \n LONG\n

      \n
        \n
      • \n

        Aggregate notifications for long periods of time (12 hours).

        \n
      • \n
      \n
    • \n
    • \n

      \n SHORT\n

      \n
        \n
      • \n

        Aggregate notifications for short periods of time (5 minutes).

        \n
      • \n
      \n
    • \n
    • \n

      \n NONE\n

      \n
        \n
      • \n

        Don't aggregate notifications.

        \n
      • \n
      \n
    • \n
    \n
  • \n
" } }, "tags": { @@ -403,14 +749,14 @@ "arn": { "target": "com.amazonaws.notifications#NotificationConfigurationArn", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the the resource.

", + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the NotificationConfiguration.

", "smithy.api#required": {} } }, "status": { "target": "com.amazonaws.notifications#NotificationConfigurationStatus", "traits": { - "smithy.api#documentation": "

The status of this NotificationConfiguration.

\n

The status should always be INACTIVE when part of the CreateNotificationConfiguration response.

\n
    \n
  • \n

    Values:

    \n
      \n
    • \n

      \n ACTIVE\n

      \n
        \n
      • \n

        All EventRules are ACTIVE and any call can be run.

        \n
      • \n
      \n
    • \n
    • \n

      \n PARTIALLY_ACTIVE\n

      \n
        \n
      • \n

        Some EventRules are ACTIVE and some are INACTIVE.

        \n
      • \n
      • \n

        Any call can be run.

        \n
      • \n
      \n
    • \n
    • \n

      \n INACTIVE\n

      \n
        \n
      • \n

        All EventRules are INACTIVE and any call can be run.

        \n
      • \n
      \n
    • \n
    • \n

      \n DELETING\n

      \n
        \n
      • \n

        This NotificationConfiguration is being deleted.

        \n
      • \n
      • \n

        Only GET and LIST calls can be run.

        \n
      • \n
      \n
    • \n
    \n
  • \n
", + "smithy.api#documentation": "

The current status of this NotificationConfiguration.

", "smithy.api#required": {} } } @@ -458,7 +804,7 @@ "aws.iam#iamAction": { "documentation": "Grants permission to delete an EventRule" }, - "smithy.api#documentation": "

Deletes an EventRule.

", + "smithy.api#documentation": "

Deletes an EventRule.

", "smithy.api#http": { "code": 200, "method": "DELETE", @@ -473,7 +819,7 @@ "arn": { "target": "com.amazonaws.notifications#EventRuleArn", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the EventRule to delete.

", + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the EventRule to delete.

", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -523,7 +869,7 @@ "aws.iam#iamAction": { "documentation": "Grants permission to delete a NotificationConfiguration" }, - "smithy.api#documentation": "

Deletes a NotificationConfiguration.

", + "smithy.api#documentation": "

Deletes a NotificationConfiguration.

", "smithy.api#http": { "code": 200, "method": "DELETE", @@ -538,7 +884,7 @@ "arn": { "target": "com.amazonaws.notifications#NotificationConfigurationArn", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the NotificationConfiguration to delete.

", + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the NotificationConfiguration to\n delete.

", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -588,7 +934,7 @@ "aws.iam#iamAction": { "documentation": "Grants permission to deregister a NotificationHub" }, - "smithy.api#documentation": "

Deregisters a NotificationHub in the specified Region.

\n \n

You can't deregister the last NotificationHub in the account. NotificationEvents stored in the deregistered NotificationHub are no longer be visible. Recreating a new NotificationHub in the same Region restores access to those NotificationEvents.

\n
", + "smithy.api#documentation": "

Deregisters a NotificationConfiguration in the specified Region.

\n \n

You can't deregister the last NotificationHub in the account.\n NotificationEvents stored in the deregistered NotificationConfiguration are no\n longer be visible. Recreating a new NotificationConfiguration in the same Region restores access\n to those NotificationEvents.

\n
", "smithy.api#http": { "code": 200, "method": "DELETE", @@ -603,7 +949,7 @@ "notificationHubRegion": { "target": "com.amazonaws.notifications#Region", "traits": { - "smithy.api#documentation": "

The NotificationHub Region.

", + "smithy.api#documentation": "

The NotificationConfiguration Region.

", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -619,14 +965,14 @@ "notificationHubRegion": { "target": "com.amazonaws.notifications#Region", "traits": { - "smithy.api#documentation": "

The NotificationHub Region.

", + "smithy.api#documentation": "

The NotificationConfiguration Region.

", "smithy.api#required": {} } }, "statusSummary": { "target": "com.amazonaws.notifications#NotificationHubStatusSummary", "traits": { - "smithy.api#documentation": "

NotificationHub status information.

", + "smithy.api#documentation": "

\n NotificationConfiguration status information.

", "smithy.api#required": {} } } @@ -668,24 +1014,30 @@ } } }, - "com.amazonaws.notifications#DisassociateChannel": { + "com.amazonaws.notifications#DisableNotificationsAccessForOrganization": { "type": "operation", "input": { - "target": "com.amazonaws.notifications#DisassociateChannelRequest" + "target": "com.amazonaws.notifications#DisableNotificationsAccessForOrganizationRequest" }, "output": { - "target": "com.amazonaws.notifications#DisassociateChannelResponse" + "target": "com.amazonaws.notifications#DisableNotificationsAccessForOrganizationResponse" }, "errors": [ { "target": "com.amazonaws.notifications#AccessDeniedException" }, + { + "target": "com.amazonaws.notifications#ConflictException" + }, { "target": "com.amazonaws.notifications#InternalServerException" }, { "target": "com.amazonaws.notifications#ResourceNotFoundException" }, + { + "target": "com.amazonaws.notifications#ServiceQuotaExceededException" + }, { "target": "com.amazonaws.notifications#ThrottlingException" }, @@ -696,13 +1048,69 @@ "traits": { "aws.api#controlPlane": {}, "aws.iam#iamAction": { - "documentation": "Grants permission to remove a Channel from a NotificationConfiguration" + "documentation": "Grants permission to disable Service Trust for AWS User Notifications", + "requiredActions": [ + "organizations:DisableAWSServiceAccess" + ] }, - "smithy.api#documentation": "

Disassociates a Channel from a specified NotificationConfiguration. Supported Channels include AWS Chatbot, the AWS Console Mobile Application, and emails (notifications-contacts).

", + "smithy.api#documentation": "

Disables service trust between User Notifications and Amazon Web Services Organizations.

", "smithy.api#http": { "code": 200, - "method": "POST", - "uri": "/channels/disassociate/{arn}" + "method": "DELETE", + "uri": "/organization/access" + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.notifications#DisableNotificationsAccessForOrganizationRequest": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.notifications#DisableNotificationsAccessForOrganizationResponse": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.notifications#DisassociateChannel": { + "type": "operation", + "input": { + "target": "com.amazonaws.notifications#DisassociateChannelRequest" + }, + "output": { + "target": "com.amazonaws.notifications#DisassociateChannelResponse" + }, + "errors": [ + { + "target": "com.amazonaws.notifications#AccessDeniedException" + }, + { + "target": "com.amazonaws.notifications#InternalServerException" + }, + { + "target": "com.amazonaws.notifications#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.notifications#ThrottlingException" + }, + { + "target": "com.amazonaws.notifications#ValidationException" + } + ], + "traits": { + "aws.api#controlPlane": {}, + "aws.iam#iamAction": { + "documentation": "Grants permission to remove a Channel from a NotificationConfiguration" + }, + "smithy.api#documentation": "

Disassociates a Channel from a specified NotificationConfiguration. Supported Channels include Chatbot, the Console Mobile Application, and emails (notifications-contacts).

", + "smithy.api#http": { + "code": 200, + "method": "POST", + "uri": "/channels/disassociate/{arn}" }, "smithy.api#idempotent": {} } @@ -721,7 +1129,7 @@ "notificationConfigurationArn": { "target": "com.amazonaws.notifications#NotificationConfigurationArn", "traits": { - "smithy.api#documentation": "

The ARN of the NotificationConfiguration to disassociate.

", + "smithy.api#documentation": "

The ARN of the NotificationConfiguration to disassociate.

", "smithy.api#required": {} } } @@ -737,6 +1145,210 @@ "smithy.api#output": {} } }, + "com.amazonaws.notifications#DisassociateManagedNotificationAccountContact": { + "type": "operation", + "input": { + "target": "com.amazonaws.notifications#DisassociateManagedNotificationAccountContactRequest" + }, + "output": { + "target": "com.amazonaws.notifications#DisassociateManagedNotificationAccountContactResponse" + }, + "errors": [ + { + "target": "com.amazonaws.notifications#AccessDeniedException" + }, + { + "target": "com.amazonaws.notifications#ConflictException" + }, + { + "target": "com.amazonaws.notifications#InternalServerException" + }, + { + "target": "com.amazonaws.notifications#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.notifications#ThrottlingException" + }, + { + "target": "com.amazonaws.notifications#ValidationException" + } + ], + "traits": { + "aws.api#controlPlane": {}, + "aws.iam#iamAction": { + "documentation": "Grants permission to remove an Account contact from a Managed Notification Configuration" + }, + "smithy.api#documentation": "

Disassociates an Account Contact with a particular ManagedNotificationConfiguration.

", + "smithy.api#http": { + "code": 200, + "method": "PUT", + "uri": "/contacts/disassociate-managed-notification/{contactIdentifier}" + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.notifications#DisassociateManagedNotificationAccountContactRequest": { + "type": "structure", + "members": { + "contactIdentifier": { + "target": "com.amazonaws.notifications#AccountContactType", + "traits": { + "smithy.api#documentation": "

The unique value of an Account Contact Type to associate with the ManagedNotificationConfiguration.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "managedNotificationConfigurationArn": { + "target": "com.amazonaws.notifications#ManagedNotificationConfigurationOsArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the ManagedNotificationConfiguration to associate with the Account Contact.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.notifications#DisassociateManagedNotificationAccountContactResponse": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.notifications#DisassociateManagedNotificationAdditionalChannel": { + "type": "operation", + "input": { + "target": "com.amazonaws.notifications#DisassociateManagedNotificationAdditionalChannelRequest" + }, + "output": { + "target": "com.amazonaws.notifications#DisassociateManagedNotificationAdditionalChannelResponse" + }, + "errors": [ + { + "target": "com.amazonaws.notifications#AccessDeniedException" + }, + { + "target": "com.amazonaws.notifications#InternalServerException" + }, + { + "target": "com.amazonaws.notifications#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.notifications#ThrottlingException" + }, + { + "target": "com.amazonaws.notifications#ValidationException" + } + ], + "traits": { + "aws.api#controlPlane": {}, + "aws.iam#iamAction": { + "documentation": "Grants permission to remove a Channel from a Managed Notification Configuration" + }, + "smithy.api#documentation": "

Disassociates an additional Channel from a particular ManagedNotificationConfiguration.

\n

Supported Channels include Chatbot, the Console Mobile Application, and emails (notifications-contacts).

", + "smithy.api#http": { + "code": 200, + "method": "PUT", + "uri": "/channels/disassociate-managed-notification/{channelArn}" + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.notifications#DisassociateManagedNotificationAdditionalChannelRequest": { + "type": "structure", + "members": { + "channelArn": { + "target": "com.amazonaws.notifications#ChannelArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the Channel to associate with the ManagedNotificationConfiguration.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "managedNotificationConfigurationArn": { + "target": "com.amazonaws.notifications#ManagedNotificationConfigurationOsArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the Managed Notification Configuration to associate with the additional Channel.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.notifications#DisassociateManagedNotificationAdditionalChannelResponse": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.notifications#EnableNotificationsAccessForOrganization": { + "type": "operation", + "input": { + "target": "com.amazonaws.notifications#EnableNotificationsAccessForOrganizationRequest" + }, + "output": { + "target": "com.amazonaws.notifications#EnableNotificationsAccessForOrganizationResponse" + }, + "errors": [ + { + "target": "com.amazonaws.notifications#AccessDeniedException" + }, + { + "target": "com.amazonaws.notifications#ConflictException" + }, + { + "target": "com.amazonaws.notifications#InternalServerException" + }, + { + "target": "com.amazonaws.notifications#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.notifications#ServiceQuotaExceededException" + }, + { + "target": "com.amazonaws.notifications#ThrottlingException" + }, + { + "target": "com.amazonaws.notifications#ValidationException" + } + ], + "traits": { + "aws.api#controlPlane": {}, + "aws.iam#iamAction": { + "documentation": "Grants permission to enable Service Trust for AWS User Notifications", + "requiredActions": [ + "organizations:EnableAWSServiceAccess", + "iam:CreateServiceLinkedRole" + ] + }, + "smithy.api#documentation": "

Enables service trust between User Notifications and Amazon Web Services Organizations.

", + "smithy.api#http": { + "code": 200, + "method": "POST", + "uri": "/organization/access" + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.notifications#EnableNotificationsAccessForOrganizationRequest": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.notifications#EnableNotificationsAccessForOrganizationResponse": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.notifications#ErrorMessage": { "type": "string" }, @@ -830,20 +1442,20 @@ "status": { "target": "com.amazonaws.notifications#EventRuleStatus", "traits": { - "smithy.api#documentation": "

The status of the EventRule.

\n
    \n
  • \n

    Values:

    \n
      \n
    • \n

      \n ACTIVE\n

      \n
        \n
      • \n

        The EventRule can process events.

        \n
      • \n
      \n
    • \n
    • \n

      \n INACTIVE\n

      \n
        \n
      • \n

        The EventRule may be unable to process events.

        \n
      • \n
      \n
    • \n
    • \n

      \n CREATING\n

      \n
        \n
      • \n

        The EventRule is being created.

        \n

        Only GET and LIST calls can be run.

        \n
      • \n
      \n
    • \n
    • \n

      \n UPDATING\n

      \n
        \n
      • \n

        The EventRule is being updated.

        \n

        Only GET and LIST calls can be run.

        \n
      • \n
      \n
    • \n
    • \n

      \n DELETING\n

      \n
        \n
      • \n

        The EventRule is being deleted.

        \n

        Only GET and LIST calls can be run.

        \n
      • \n
      \n
    • \n
    \n
  • \n
", + "smithy.api#documentation": "

The status of the EventRule.

\n
    \n
  • \n

    Values:

    \n
      \n
    • \n

      \n ACTIVE\n

      \n
        \n
      • \n

        The EventRule can process events.

        \n
      • \n
      \n
    • \n
    • \n

      \n INACTIVE\n

      \n
        \n
      • \n

        The EventRule may be unable to process events.

        \n
      • \n
      \n
    • \n
    • \n

      \n CREATING\n

      \n
        \n
      • \n

        The EventRule is being created.

        \n

        Only GET and LIST calls can be run.

        \n
      • \n
      \n
    • \n
    • \n

      \n UPDATING\n

      \n
        \n
      • \n

        The EventRule is being updated.

        \n

        Only GET and LIST calls can be run.

        \n
      • \n
      \n
    • \n
    • \n

      \n DELETING\n

      \n
        \n
      • \n

        The EventRule is being deleted.

        \n

        Only GET and LIST calls can be run.

        \n
      • \n
      \n
    • \n
    \n
  • \n
", "smithy.api#required": {} } }, "reason": { "target": "com.amazonaws.notifications#EventRuleStatusReason", "traits": { - "smithy.api#documentation": "

A human-readable reason for EventRuleStatus.

", + "smithy.api#documentation": "

A human-readable reason for EventRuleStatus.

", "smithy.api#required": {} } } }, "traits": { - "smithy.api#documentation": "

Describes EventRule status information.

" + "smithy.api#documentation": "

Provides additional information about the current EventRule status.

" } }, "com.amazonaws.notifications#EventRuleStructure": { @@ -852,69 +1464,69 @@ "arn": { "target": "com.amazonaws.notifications#EventRuleArn", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the resource.

", + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the EventRule. CloudFormation stack generates this ARN and\n then uses this ARN to associate with the NotificationConfiguration.

", "smithy.api#required": {} } }, "notificationConfigurationArn": { "target": "com.amazonaws.notifications#NotificationConfigurationArn", "traits": { - "smithy.api#documentation": "

The ARN for the NotificationConfiguration associated with this EventRule.

", + "smithy.api#documentation": "

The ARN for the NotificationConfiguration associated with this EventRule.

", "smithy.api#required": {} } }, "creationTime": { "target": "com.amazonaws.notifications#CreationTime", "traits": { - "smithy.api#documentation": "

The creation time of the resource.

", + "smithy.api#documentation": "

The creation time of the EventRule.

", "smithy.api#required": {} } }, "source": { "target": "com.amazonaws.notifications#Source", "traits": { - "smithy.api#documentation": "

The matched event source.

\n

Must match one of the valid EventBridge sources. Only AWS service sourced events are supported. For example, aws.ec2 and aws.cloudwatch. For more information, see Event delivery from AWS services in the Amazon EventBridge User Guide.

", + "smithy.api#documentation": "

The event source this rule should match with the EventBridge event sources. It must match with atleast one of the valid EventBridge event sources. Only Amazon Web Services service sourced events are supported. \n For example, aws.ec2 and aws.cloudwatch. For more information, see Event delivery from Amazon Web Services services in the \n Amazon EventBridge User Guide.

", "smithy.api#required": {} } }, "eventType": { "target": "com.amazonaws.notifications#EventType", "traits": { - "smithy.api#documentation": "

The event type to match.

\n

Must match one of the valid Amazon EventBridge event types. For example, EC2 Instance State-change Notification and AWS CloudWatch Alarm State Change. For more information, see Event delivery from AWS services in the Amazon EventBridge User Guide.

", + "smithy.api#documentation": "

The event type this rule should match with the EventBridge events. It must match with atleast one of the valid EventBridge event types. For example, Amazon EC2 Instance State change Notification and Amazon CloudWatch State Change. For more information, see Event delivery from Amazon Web Services services in the \n Amazon EventBridge User Guide.

", "smithy.api#required": {} } }, "eventPattern": { "target": "com.amazonaws.notifications#EventRuleEventPattern", "traits": { - "smithy.api#documentation": "

An additional event pattern used to further filter the events this EventRule receives.

\n

For more information, see Amazon EventBridge event patterns in the Amazon EventBridge User Guide.\n

", + "smithy.api#documentation": "

An additional event pattern used to further filter the events this EventRule receives.

\n

For more information, see Amazon EventBridge event patterns in the Amazon EventBridge User Guide.\n

", "smithy.api#required": {} } }, "regions": { "target": "com.amazonaws.notifications#Regions", "traits": { - "smithy.api#documentation": "

A list of AWS Regions that send events to this EventRule.

", + "smithy.api#documentation": "

A list of Amazon Web Services Regions that send events to this EventRule.

", "smithy.api#required": {} } }, "managedRules": { "target": "com.amazonaws.notifications#ManagedRuleArns", "traits": { - "smithy.api#documentation": "

A list of Amazon EventBridge Managed Rule ARNs associated with this EventRule.

\n \n

These are created by AWS User Notifications within your account so your EventRules can function.

\n
", + "smithy.api#documentation": "

A list of Amazon EventBridge Managed Rule ARNs associated with this EventRule.

\n \n

These are created by User Notifications within your account so your EventRules can function.

\n
", "smithy.api#required": {} } }, "statusSummaryByRegion": { "target": "com.amazonaws.notifications#StatusSummaryByRegion", "traits": { - "smithy.api#documentation": "

A list of an EventRule's status by Region. Regions are mapped to EventRuleStatusSummary.

", + "smithy.api#documentation": "

A list of an EventRule's status by Region. Regions are mapped to EventRuleStatusSummary.

", "smithy.api#required": {} } } }, "traits": { - "smithy.api#documentation": "

Contains a complete list of fields related to an EventRule.

" + "smithy.api#documentation": "

Contains a complete list of fields related to an EventRule.

" } }, "com.amazonaws.notifications#EventRules": { @@ -978,7 +1590,7 @@ "aws.iam#iamAction": { "documentation": "Grants permission to get an EventRule" }, - "smithy.api#documentation": "

Returns a specified EventRule.

", + "smithy.api#documentation": "

Returns a specified EventRule.

", "smithy.api#http": { "code": 200, "method": "GET", @@ -993,7 +1605,7 @@ "arn": { "target": "com.amazonaws.notifications#EventRuleArn", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the EventRule to return.

", + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the EventRule to return.

", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -1016,56 +1628,56 @@ "notificationConfigurationArn": { "target": "com.amazonaws.notifications#NotificationConfigurationArn", "traits": { - "smithy.api#documentation": "

The ARN of a NotificationConfiguration.

", + "smithy.api#documentation": "

The ARN of a NotificationConfiguration.

", "smithy.api#required": {} } }, "creationTime": { "target": "com.amazonaws.notifications#CreationTime", "traits": { - "smithy.api#documentation": "

The date when the EventRule was created.

", + "smithy.api#documentation": "

The date when the EventRule was created.

", "smithy.api#required": {} } }, "source": { "target": "com.amazonaws.notifications#Source", "traits": { - "smithy.api#documentation": "

The matched event source.

\n

Must match one of the valid EventBridge sources. Only AWS service sourced events are supported. For example, aws.ec2 and aws.cloudwatch. For more information, see Event delivery from AWS services in the Amazon EventBridge User Guide.

", + "smithy.api#documentation": "

The matched event source.

\n

Must match one of the valid EventBridge sources. Only Amazon Web Services service sourced events are supported. For example, aws.ec2 and aws.cloudwatch. For more information, see Event delivery from Amazon Web Services services in the Amazon EventBridge User Guide.

", "smithy.api#required": {} } }, "eventType": { "target": "com.amazonaws.notifications#EventType", "traits": { - "smithy.api#documentation": "

The event type to match.

\n

Must match one of the valid Amazon EventBridge event types. For example, EC2 Instance State-change Notification and AWS CloudWatch Alarm State Change. For more information, see Event delivery from AWS services in the Amazon EventBridge User Guide.

", + "smithy.api#documentation": "

The event type to match.

\n

Must match one of the valid Amazon EventBridge event types. For example, EC2 Instance State-change Notification and Amazon CloudWatch Alarm State Change. For more information, see Event delivery from Amazon Web Services services in the Amazon EventBridge User Guide.

", "smithy.api#required": {} } }, "eventPattern": { "target": "com.amazonaws.notifications#EventRuleEventPattern", "traits": { - "smithy.api#documentation": "

An additional event pattern used to further filter the events this EventRule receives.

\n

For more information, see Amazon EventBridge event patterns in the Amazon EventBridge User Guide.\n

", + "smithy.api#documentation": "

An additional event pattern used to further filter the events this EventRule receives.

\n

For more information, see Amazon EventBridge event patterns in the Amazon EventBridge User Guide.\n

", "smithy.api#required": {} } }, "regions": { "target": "com.amazonaws.notifications#Regions", "traits": { - "smithy.api#documentation": "

A list of AWS Regions that send events to this EventRule.

", + "smithy.api#documentation": "

A list of Amazon Web Services Regions that send events to this EventRule.

", "smithy.api#required": {} } }, "managedRules": { "target": "com.amazonaws.notifications#ManagedRuleArns", "traits": { - "smithy.api#documentation": "

A list of managed rules from EventBridge that are are associated with this EventRule.

\n \n

These are created by AWS User Notifications within your account so this EventRule functions.

\n
", + "smithy.api#documentation": "

A list of managed rules from EventBridge that are associated with this\n EventRule.

\n \n

These are created by User Notifications within your account so this EventRule functions.

\n
", "smithy.api#required": {} } }, "statusSummaryByRegion": { "target": "com.amazonaws.notifications#StatusSummaryByRegion", "traits": { - "smithy.api#documentation": "

A list of an EventRule's status by Region. Regions are mapped to EventRuleStatusSummary.

", + "smithy.api#documentation": "

A list of an EventRule's status by Region. Regions are mapped to EventRuleStatusSummary.

", "smithy.api#required": {} } } @@ -1074,13 +1686,13 @@ "smithy.api#output": {} } }, - "com.amazonaws.notifications#GetNotificationConfiguration": { + "com.amazonaws.notifications#GetManagedNotificationChildEvent": { "type": "operation", "input": { - "target": "com.amazonaws.notifications#GetNotificationConfigurationRequest" + "target": "com.amazonaws.notifications#GetManagedNotificationChildEventRequest" }, "output": { - "target": "com.amazonaws.notifications#GetNotificationConfigurationResponse" + "target": "com.amazonaws.notifications#GetManagedNotificationChildEventResponse" }, "errors": [ { @@ -1102,75 +1714,69 @@ "traits": { "aws.api#controlPlane": {}, "aws.iam#iamAction": { - "documentation": "Grants permission to get a NotificationConfiguration" + "documentation": "Grants permission to get a Managed Notification Child Event" }, - "smithy.api#documentation": "

Returns a specified NotificationConfiguration.

", + "smithy.api#documentation": "

Returns the child event of a specific given ManagedNotificationEvent.

", "smithy.api#http": { "code": 200, "method": "GET", - "uri": "/notification-configurations/{arn}" + "uri": "/managed-notification-child-events/{arn}" }, "smithy.api#readonly": {} } }, - "com.amazonaws.notifications#GetNotificationConfigurationRequest": { + "com.amazonaws.notifications#GetManagedNotificationChildEventRequest": { "type": "structure", "members": { "arn": { - "target": "com.amazonaws.notifications#NotificationConfigurationArn", + "target": "com.amazonaws.notifications#ManagedNotificationChildEventArn", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the NotificationConfiguration to return.

", + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the ManagedNotificationChildEvent to return.

", "smithy.api#httpLabel": {}, "smithy.api#required": {} } + }, + "locale": { + "target": "com.amazonaws.notifications#LocaleCode", + "traits": { + "smithy.api#documentation": "

The locale code of the language used for the retrieved ManagedNotificationChildEvent. The default locale is English en_US.

", + "smithy.api#httpQuery": "locale" + } } }, "traits": { "smithy.api#input": {} } }, - "com.amazonaws.notifications#GetNotificationConfigurationResponse": { + "com.amazonaws.notifications#GetManagedNotificationChildEventResponse": { "type": "structure", "members": { "arn": { - "target": "com.amazonaws.notifications#NotificationConfigurationArn", + "target": "com.amazonaws.notifications#ManagedNotificationChildEventArn", "traits": { "smithy.api#documentation": "

The ARN of the resource.

", "smithy.api#required": {} } }, - "name": { - "target": "com.amazonaws.notifications#NotificationConfigurationName", - "traits": { - "smithy.api#documentation": "

The name of the NotificationConfiguration.

", - "smithy.api#required": {} - } - }, - "description": { - "target": "com.amazonaws.notifications#NotificationConfigurationDescription", - "traits": { - "smithy.api#documentation": "

The description of the NotificationConfiguration.

", - "smithy.api#required": {} - } - }, - "status": { - "target": "com.amazonaws.notifications#NotificationConfigurationStatus", + "managedNotificationConfigurationArn": { + "target": "com.amazonaws.notifications#ManagedNotificationConfigurationOsArn", "traits": { - "smithy.api#documentation": "

The status of this NotificationConfiguration.

\n

The status should always be INACTIVE when part of the CreateNotificationConfiguration response.

\n
    \n
  • \n

    Values:

    \n
      \n
    • \n

      \n ACTIVE\n

      \n
        \n
      • \n

        All EventRules are ACTIVE and any call can be run.

        \n
      • \n
      \n
    • \n
    • \n

      \n PARTIALLY_ACTIVE\n

      \n
        \n
      • \n

        Some EventRules are ACTIVE and some are INACTIVE.

        \n
      • \n
      • \n

        Any call can be run.

        \n
      • \n
      \n
    • \n
    • \n

      \n INACTIVE\n

      \n
        \n
      • \n

        All EventRules are INACTIVE and any call can be run.

        \n
      • \n
      \n
    • \n
    • \n

      \n DELETING\n

      \n
        \n
      • \n

        This NotificationConfiguration is being deleted. Only GET and LIST calls can be run.

        \n
      • \n
      • \n

        Only GET and LIST calls can be run.

        \n
      • \n
      \n
    • \n
    \n
  • \n
", + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the ManagedNotificationConfiguration associated with the ManagedNotificationChildEvent.

", "smithy.api#required": {} } }, "creationTime": { "target": "com.amazonaws.notifications#CreationTime", "traits": { - "smithy.api#documentation": "

The creation time of the NotificationConfiguration.

", + "smithy.api#documentation": "

The creation time of the ManagedNotificationChildEvent.

", "smithy.api#required": {} } }, - "aggregationDuration": { - "target": "com.amazonaws.notifications#AggregationDuration", + "content": { + "target": "com.amazonaws.notifications#ManagedNotificationChildEvent", "traits": { - "smithy.api#documentation": "

The aggregation preference of the NotificationConfiguration.

\n
    \n
  • \n

    Values:

    \n
      \n
    • \n

      \n LONG\n

      \n
        \n
      • \n

        Aggregate notifications for long periods of time (12 hours).

        \n
      • \n
      \n
    • \n
    • \n

      \n SHORT\n

      \n
        \n
      • \n

        Aggregate notifications for short periods of time (5 minutes).

        \n
      • \n
      \n
    • \n
    • \n

      \n NONE\n

      \n
        \n
      • \n

        Don't aggregate notifications.

        \n

        No delay in delivery.

        \n
      • \n
      \n
    • \n
    \n
  • \n
" + "smithy.api#documentation": "

The content of the ManagedNotificationChildEvent.

", + "smithy.api#required": {} } } }, @@ -1178,13 +1784,13 @@ "smithy.api#output": {} } }, - "com.amazonaws.notifications#GetNotificationEvent": { + "com.amazonaws.notifications#GetManagedNotificationConfiguration": { "type": "operation", "input": { - "target": "com.amazonaws.notifications#GetNotificationEventRequest" + "target": "com.amazonaws.notifications#GetManagedNotificationConfigurationRequest" }, "output": { - "target": "com.amazonaws.notifications#GetNotificationEventResponse" + "target": "com.amazonaws.notifications#GetManagedNotificationConfigurationResponse" }, "errors": [ { @@ -1206,106 +1812,83 @@ "traits": { "aws.api#controlPlane": {}, "aws.iam#iamAction": { - "documentation": "Grants permission to get a NotificationEvent" + "documentation": "Grants permission to get a Managed Notification Configuration" }, - "smithy.api#documentation": "

Returns a specified NotificationEvent.

\n \n

User Notifications stores notifications in the individual Regions you register as notification hubs and the Region of the source event rule. GetNotificationEvent only returns notifications stored in the same Region in which the action is called.\n\t User Notifications doesn't backfill notifications to new Regions selected as notification hubs. For this reason, we recommend that you make calls in your oldest registered notification hub. \n\t For more information, see Notification hubs in the AWS User Notifications User Guide.

\n
", + "smithy.api#documentation": "

Returns a specified ManagedNotificationConfiguration.

", "smithy.api#http": { "code": 200, "method": "GET", - "uri": "/notification-events/{arn}" + "uri": "/managed-notification-configurations/{arn}" }, "smithy.api#readonly": {} } }, - "com.amazonaws.notifications#GetNotificationEventRequest": { + "com.amazonaws.notifications#GetManagedNotificationConfigurationRequest": { "type": "structure", "members": { "arn": { - "target": "com.amazonaws.notifications#NotificationEventArn", + "target": "com.amazonaws.notifications#ManagedNotificationConfigurationOsArn", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the NotificationEvent to return.

", + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the ManagedNotificationConfiguration to return.

", "smithy.api#httpLabel": {}, "smithy.api#required": {} } - }, - "locale": { - "target": "com.amazonaws.notifications#LocaleCode", - "traits": { - "smithy.api#documentation": "

The locale code of the language used for the retrieved NotificationEvent. The default locale is English en_US.

", - "smithy.api#httpQuery": "locale" - } } }, "traits": { "smithy.api#input": {} } }, - "com.amazonaws.notifications#GetNotificationEventResponse": { + "com.amazonaws.notifications#GetManagedNotificationConfigurationResponse": { "type": "structure", "members": { "arn": { - "target": "com.amazonaws.notifications#NotificationEventArn", + "target": "com.amazonaws.notifications#ManagedNotificationConfigurationOsArn", "traits": { - "smithy.api#documentation": "

The ARN of the resource.

", + "smithy.api#documentation": "

The ARN of the ManagedNotificationConfiguration resource.

", "smithy.api#required": {} } }, - "notificationConfigurationArn": { - "target": "com.amazonaws.notifications#NotificationConfigurationArn", + "name": { + "target": "com.amazonaws.notifications#ManagedNotificationConfigurationName", "traits": { - "smithy.api#documentation": "

The ARN of the NotificationConfiguration.

", + "smithy.api#documentation": "

The name of the ManagedNotificationConfiguration.

", "smithy.api#required": {} } }, - "creationTime": { - "target": "com.amazonaws.notifications#CreationTime", + "description": { + "target": "com.amazonaws.notifications#ManagedNotificationConfigurationDescription", "traits": { - "smithy.api#documentation": "

The creation time of the NotificationEvent.

", + "smithy.api#documentation": "

The description of the ManagedNotificationConfiguration.

", "smithy.api#required": {} } }, - "content": { - "target": "com.amazonaws.notifications#NotificationEventSchema", + "category": { + "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "

The content of the NotificationEvent.

", + "smithy.api#documentation": "

The category of the ManagedNotificationConfiguration.

", "smithy.api#required": {} } - } - }, - "traits": { - "smithy.api#output": {} - } - }, - "com.amazonaws.notifications#InternalServerException": { - "type": "structure", - "members": { - "message": { - "target": "com.amazonaws.notifications#ErrorMessage", + }, + "subCategory": { + "target": "smithy.api#String", "traits": { + "smithy.api#documentation": "

The subCategory of the ManagedNotificationConfiguration.

", "smithy.api#required": {} } } }, "traits": { - "smithy.api#documentation": "

Unexpected error during processing of request.

", - "smithy.api#error": "server", - "smithy.api#httpError": 500, - "smithy.api#retryable": {} - } - }, - "com.amazonaws.notifications#LastActivationTime": { - "type": "timestamp", - "traits": { - "smithy.api#timestampFormat": "date-time" + "smithy.api#output": {} } }, - "com.amazonaws.notifications#ListChannels": { + "com.amazonaws.notifications#GetManagedNotificationEvent": { "type": "operation", "input": { - "target": "com.amazonaws.notifications#ListChannelsRequest" + "target": "com.amazonaws.notifications#GetManagedNotificationEventRequest" }, "output": { - "target": "com.amazonaws.notifications#ListChannelsResponse" + "target": "com.amazonaws.notifications#GetManagedNotificationEventResponse" }, "errors": [ { @@ -1327,50 +1910,33 @@ "traits": { "aws.api#controlPlane": {}, "aws.iam#iamAction": { - "documentation": "Grants permission to list Channels by NotificationConfiguration" + "documentation": "Grants permission to get a Managed NotificationEvent" }, - "smithy.api#documentation": "

Returns a list of Channels for a NotificationConfiguration.

", + "smithy.api#documentation": "

Returns a specified ManagedNotificationEvent.

", "smithy.api#http": { "code": 200, "method": "GET", - "uri": "/channels" - }, - "smithy.api#paginated": { - "inputToken": "nextToken", - "outputToken": "nextToken", - "pageSize": "maxResults", - "items": "channels" + "uri": "/managed-notification-events/{arn}" }, "smithy.api#readonly": {} } }, - "com.amazonaws.notifications#ListChannelsRequest": { + "com.amazonaws.notifications#GetManagedNotificationEventRequest": { "type": "structure", "members": { - "notificationConfigurationArn": { - "target": "com.amazonaws.notifications#NotificationConfigurationArn", + "arn": { + "target": "com.amazonaws.notifications#ManagedNotificationEventArn", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the NotificationConfiguration.

", - "smithy.api#httpQuery": "notificationConfigurationArn", + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the ManagedNotificationEvent to return.

", + "smithy.api#httpLabel": {}, "smithy.api#required": {} } }, - "maxResults": { - "target": "smithy.api#Integer", - "traits": { - "smithy.api#documentation": "

The maximum number of results to be returned in this call. The default value is 20.

", - "smithy.api#httpQuery": "maxResults", - "smithy.api#range": { - "min": 1, - "max": 100 - } - } - }, - "nextToken": { - "target": "com.amazonaws.notifications#NextToken", + "locale": { + "target": "com.amazonaws.notifications#LocaleCode", "traits": { - "smithy.api#documentation": "

The start token for paginated calls. Retrieved from the response of a previous ListNotificationEvents call. NextToken uses Base64 encoding.

", - "smithy.api#httpQuery": "nextToken" + "smithy.api#documentation": "

The locale code of the language used for the retrieved ManagedNotificationEvent. The default locale is English (en_US).

", + "smithy.api#httpQuery": "locale" } } }, @@ -1378,19 +1944,34 @@ "smithy.api#input": {} } }, - "com.amazonaws.notifications#ListChannelsResponse": { + "com.amazonaws.notifications#GetManagedNotificationEventResponse": { "type": "structure", "members": { - "nextToken": { - "target": "com.amazonaws.notifications#NextToken", + "arn": { + "target": "com.amazonaws.notifications#ManagedNotificationEventArn", "traits": { - "smithy.api#documentation": "

A pagination token. If a non-null pagination token is returned in a result, pass its value in another request to retrieve more entries.

" + "smithy.api#documentation": "

The ARN of the resource.

", + "smithy.api#required": {} } }, - "channels": { - "target": "com.amazonaws.notifications#Channels", + "managedNotificationConfigurationArn": { + "target": "com.amazonaws.notifications#ManagedNotificationConfigurationOsArn", "traits": { - "smithy.api#documentation": "

A list of Channels.

", + "smithy.api#documentation": "

The ARN of the ManagedNotificationConfiguration.

", + "smithy.api#required": {} + } + }, + "creationTime": { + "target": "com.amazonaws.notifications#CreationTime", + "traits": { + "smithy.api#documentation": "

The creation time of the ManagedNotificationEvent.

", + "smithy.api#required": {} + } + }, + "content": { + "target": "com.amazonaws.notifications#ManagedNotificationEvent", + "traits": { + "smithy.api#documentation": "

The content of the ManagedNotificationEvent.

", "smithy.api#required": {} } } @@ -1399,13 +1980,13 @@ "smithy.api#output": {} } }, - "com.amazonaws.notifications#ListEventRules": { + "com.amazonaws.notifications#GetNotificationConfiguration": { "type": "operation", "input": { - "target": "com.amazonaws.notifications#ListEventRulesRequest" + "target": "com.amazonaws.notifications#GetNotificationConfigurationRequest" }, "output": { - "target": "com.amazonaws.notifications#ListEventRulesResponse" + "target": "com.amazonaws.notifications#GetNotificationConfigurationResponse" }, "errors": [ { @@ -1427,85 +2008,89 @@ "traits": { "aws.api#controlPlane": {}, "aws.iam#iamAction": { - "documentation": "Grants permission to list EventRules" + "documentation": "Grants permission to get a NotificationConfiguration" }, - "smithy.api#documentation": "

Returns a list of EventRules according to specified filters, in reverse chronological order (newest first).

", + "smithy.api#documentation": "

Returns a specified NotificationConfiguration.

", "smithy.api#http": { "code": 200, "method": "GET", - "uri": "/event-rules" - }, - "smithy.api#paginated": { - "inputToken": "nextToken", - "outputToken": "nextToken", - "pageSize": "maxResults", - "items": "eventRules" + "uri": "/notification-configurations/{arn}" }, "smithy.api#readonly": {} } }, - "com.amazonaws.notifications#ListEventRulesRequest": { + "com.amazonaws.notifications#GetNotificationConfigurationRequest": { "type": "structure", "members": { - "notificationConfigurationArn": { + "arn": { "target": "com.amazonaws.notifications#NotificationConfigurationArn", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the NotificationConfiguration.

", - "smithy.api#httpQuery": "notificationConfigurationArn", + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the NotificationConfiguration to return.

", + "smithy.api#httpLabel": {}, "smithy.api#required": {} } - }, - "maxResults": { - "target": "smithy.api#Integer", - "traits": { - "smithy.api#documentation": "

The maximum number of results to be returned in this call. The default value is 20.

", - "smithy.api#httpQuery": "maxResults", - "smithy.api#range": { - "min": 1, - "max": 1000 - } - } - }, - "nextToken": { - "target": "com.amazonaws.notifications#NextToken", - "traits": { - "smithy.api#documentation": "

The start token for paginated calls. Retrieved from the response of a previous ListEventRules call. Next token uses Base64 encoding.

", - "smithy.api#httpQuery": "nextToken" - } } }, "traits": { "smithy.api#input": {} } }, - "com.amazonaws.notifications#ListEventRulesResponse": { + "com.amazonaws.notifications#GetNotificationConfigurationResponse": { "type": "structure", "members": { - "nextToken": { - "target": "com.amazonaws.notifications#NextToken", + "arn": { + "target": "com.amazonaws.notifications#NotificationConfigurationArn", "traits": { - "smithy.api#documentation": "

A pagination token. If a non-null pagination token is returned in a result, pass its value in another request to retrieve more entries.

" + "smithy.api#documentation": "

The ARN of the resource.

", + "smithy.api#required": {} } }, - "eventRules": { - "target": "com.amazonaws.notifications#EventRules", + "name": { + "target": "com.amazonaws.notifications#NotificationConfigurationName", + "traits": { + "smithy.api#documentation": "

The name of the NotificationConfiguration.

", + "smithy.api#required": {} + } + }, + "description": { + "target": "com.amazonaws.notifications#NotificationConfigurationDescription", + "traits": { + "smithy.api#documentation": "

The description of the NotificationConfiguration.

", + "smithy.api#required": {} + } + }, + "status": { + "target": "com.amazonaws.notifications#NotificationConfigurationStatus", + "traits": { + "smithy.api#documentation": "

The status of this NotificationConfiguration.

", + "smithy.api#required": {} + } + }, + "creationTime": { + "target": "com.amazonaws.notifications#CreationTime", "traits": { - "smithy.api#documentation": "

A list of EventRules.

", + "smithy.api#documentation": "

The creation time of the NotificationConfiguration.

", "smithy.api#required": {} } + }, + "aggregationDuration": { + "target": "com.amazonaws.notifications#AggregationDuration", + "traits": { + "smithy.api#documentation": "

The aggregation preference of the NotificationConfiguration.

\n
    \n
  • \n

    Values:

    \n
      \n
    • \n

      \n LONG\n

      \n
        \n
      • \n

        Aggregate notifications for long periods of time (12 hours).

        \n
      • \n
      \n
    • \n
    • \n

      \n SHORT\n

      \n
        \n
      • \n

        Aggregate notifications for short periods of time (5 minutes).

        \n
      • \n
      \n
    • \n
    • \n

      \n NONE\n

      \n
        \n
      • \n

        Don't aggregate notifications.

        \n
      • \n
      \n
    • \n
    \n
  • \n
" + } } }, "traits": { "smithy.api#output": {} } }, - "com.amazonaws.notifications#ListNotificationConfigurations": { + "com.amazonaws.notifications#GetNotificationEvent": { "type": "operation", "input": { - "target": "com.amazonaws.notifications#ListNotificationConfigurationsRequest" + "target": "com.amazonaws.notifications#GetNotificationEventRequest" }, "output": { - "target": "com.amazonaws.notifications#ListNotificationConfigurationsResponse" + "target": "com.amazonaws.notifications#GetNotificationEventResponse" }, "errors": [ { @@ -1514,6 +2099,9 @@ { "target": "com.amazonaws.notifications#InternalServerException" }, + { + "target": "com.amazonaws.notifications#ResourceNotFoundException" + }, { "target": "com.amazonaws.notifications#ThrottlingException" }, @@ -1524,79 +2112,33 @@ "traits": { "aws.api#controlPlane": {}, "aws.iam#iamAction": { - "documentation": "Grants permission to list NotificationConfigurations" + "documentation": "Grants permission to get a NotificationEvent" }, - "smithy.api#documentation": "

Returns a list of abbreviated NotificationConfigurations according to specified filters, in reverse chronological order (newest first).

", + "smithy.api#documentation": "

Returns a specified NotificationEvent.

\n \n

User Notifications stores notifications in the individual Regions you register as notification hubs and the Region of the source event rule. GetNotificationEvent only returns notifications stored in the same Region in which the action is called.\n\t User Notifications doesn't backfill notifications to new Regions selected as notification hubs. For this reason, we recommend that you make calls in your oldest registered notification hub. \n\t For more information, see Notification hubs in the Amazon Web Services User Notifications User Guide.

\n
", "smithy.api#http": { "code": 200, "method": "GET", - "uri": "/notification-configurations" - }, - "smithy.api#paginated": { - "inputToken": "nextToken", - "outputToken": "nextToken", - "pageSize": "maxResults", - "items": "notificationConfigurations" + "uri": "/notification-events/{arn}" }, - "smithy.api#readonly": {}, - "smithy.test#smokeTests": [ - { - "id": "ListNotificationConfigurationsSuccess", - "params": { - "status": "ACTIVE", - "maxResults": 3 - }, - "expect": { - "success": {} - }, - "vendorParamsShape": "aws.test#AwsVendorParams", - "vendorParams": { - "region": "us-east-1" - } - } - ] + "smithy.api#readonly": {} } }, - "com.amazonaws.notifications#ListNotificationConfigurationsRequest": { + "com.amazonaws.notifications#GetNotificationEventRequest": { "type": "structure", "members": { - "eventRuleSource": { - "target": "com.amazonaws.notifications#Source", - "traits": { - "smithy.api#documentation": "

The matched event source.

\n

Must match one of the valid EventBridge sources. Only AWS service sourced events are supported. For example, aws.ec2 and aws.cloudwatch. For more information, see Event delivery from AWS services in the Amazon EventBridge User Guide.

", - "smithy.api#httpQuery": "eventRuleSource" - } - }, - "channelArn": { - "target": "com.amazonaws.notifications#ChannelArn", - "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the Channel to match.

", - "smithy.api#httpQuery": "channelArn" - } - }, - "status": { - "target": "com.amazonaws.notifications#NotificationConfigurationStatus", - "traits": { - "smithy.api#documentation": "

The NotificationConfiguration status to match.

\n
    \n
  • \n

    Values:

    \n
      \n
    • \n

      \n ACTIVE\n

      \n
        \n
      • \n

        All EventRules are ACTIVE and any call can be run.

        \n
      • \n
      \n
    • \n
    • \n

      \n PARTIALLY_ACTIVE\n

      \n
        \n
      • \n

        Some EventRules are ACTIVE and some are INACTIVE. Any call can be run.

        \n
      • \n
      • \n

        Any call can be run.

        \n
      • \n
      \n
    • \n
    • \n

      \n INACTIVE\n

      \n
        \n
      • \n

        All EventRules are INACTIVE and any call can be run.

        \n
      • \n
      \n
    • \n
    • \n

      \n DELETING\n

      \n
        \n
      • \n

        This NotificationConfiguration is being deleted.

        \n
      • \n
      • \n

        Only GET and LIST calls can be run.

        \n
      • \n
      \n
    • \n
    \n
  • \n
", - "smithy.api#httpQuery": "status" - } - }, - "maxResults": { - "target": "smithy.api#Integer", + "arn": { + "target": "com.amazonaws.notifications#NotificationEventArn", "traits": { - "smithy.api#documentation": "

The maximum number of results to be returned in this call. Defaults to 20.

", - "smithy.api#httpQuery": "maxResults", - "smithy.api#range": { - "min": 1, - "max": 100 - } + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the NotificationEvent to return.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} } }, - "nextToken": { - "target": "com.amazonaws.notifications#NextToken", + "locale": { + "target": "com.amazonaws.notifications#LocaleCode", "traits": { - "smithy.api#documentation": "

The start token for paginated calls. Retrieved from the response of a previous ListEventRules call. Next token uses Base64 encoding.

", - "smithy.api#httpQuery": "nextToken" + "smithy.api#documentation": "

The locale code of the language used for the retrieved NotificationEvent. The default locale is English en_US.

", + "smithy.api#httpQuery": "locale" } } }, @@ -1604,19 +2146,34 @@ "smithy.api#input": {} } }, - "com.amazonaws.notifications#ListNotificationConfigurationsResponse": { + "com.amazonaws.notifications#GetNotificationEventResponse": { "type": "structure", "members": { - "nextToken": { - "target": "com.amazonaws.notifications#NextToken", - "traits": { - "smithy.api#documentation": "

A pagination token. If a non-null pagination token is returned in a result, pass its value in another request to retrieve more entries.

" + "arn": { + "target": "com.amazonaws.notifications#NotificationEventArn", + "traits": { + "smithy.api#documentation": "

The ARN of the resource.

", + "smithy.api#required": {} } }, - "notificationConfigurations": { - "target": "com.amazonaws.notifications#NotificationConfigurations", + "notificationConfigurationArn": { + "target": "com.amazonaws.notifications#NotificationConfigurationArn", + "traits": { + "smithy.api#documentation": "

The ARN of the NotificationConfiguration.

", + "smithy.api#required": {} + } + }, + "creationTime": { + "target": "com.amazonaws.notifications#CreationTime", "traits": { - "smithy.api#documentation": "

The NotificationConfigurations in the account.

", + "smithy.api#documentation": "

The creation time of the NotificationEvent.

", + "smithy.api#required": {} + } + }, + "content": { + "target": "com.amazonaws.notifications#NotificationEventSchema", + "traits": { + "smithy.api#documentation": "

The content of the NotificationEvent.

", "smithy.api#required": {} } } @@ -1625,13 +2182,13 @@ "smithy.api#output": {} } }, - "com.amazonaws.notifications#ListNotificationEvents": { + "com.amazonaws.notifications#GetNotificationsAccessForOrganization": { "type": "operation", "input": { - "target": "com.amazonaws.notifications#ListNotificationEventsRequest" + "target": "com.amazonaws.notifications#GetNotificationsAccessForOrganizationRequest" }, "output": { - "target": "com.amazonaws.notifications#ListNotificationEventsResponse" + "target": "com.amazonaws.notifications#GetNotificationsAccessForOrganizationResponse" }, "errors": [ { @@ -1650,90 +2207,122 @@ "traits": { "aws.api#controlPlane": {}, "aws.iam#iamAction": { - "documentation": "Grants permission to list NotificationEvents" + "documentation": "Grants permission to read Service Trust for AWS User Notifications" }, - "smithy.api#documentation": "

Returns a list of NotificationEvents according to specified filters, in reverse chronological order (newest first).

\n \n

User Notifications stores notifications in the individual Regions you register as notification hubs and the Region of the source event rule. ListNotificationEvents only returns notifications stored in the same Region in which the action is called.\n\t User Notifications doesn't backfill notifications to new Regions selected as notification hubs. For this reason, we recommend that you make calls in your oldest registered notification hub.\n\t For more information, see Notification hubs in the AWS User Notifications User Guide.

\n
", + "smithy.api#documentation": "

Returns the AccessStatus of Service Trust Enablement for User Notifications and Amazon Web Services Organizations.

", "smithy.api#http": { "code": 200, "method": "GET", - "uri": "/notification-events" - }, - "smithy.api#paginated": { - "inputToken": "nextToken", - "outputToken": "nextToken", - "pageSize": "maxResults", - "items": "notificationEvents" + "uri": "/organization/access" }, - "smithy.api#readonly": {}, - "smithy.test#smokeTests": [ - { - "id": "ListNotificationEventsSuccess", - "params": { - "includeChildEvents": true, - "maxResults": 3 - }, - "expect": { - "success": {} - }, - "vendorParamsShape": "aws.test#AwsVendorParams", - "vendorParams": { - "region": "us-east-1" - } - } - ] + "smithy.api#readonly": {} } }, - "com.amazonaws.notifications#ListNotificationEventsRequest": { + "com.amazonaws.notifications#GetNotificationsAccessForOrganizationRequest": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.notifications#GetNotificationsAccessForOrganizationResponse": { "type": "structure", "members": { - "startTime": { - "target": "smithy.api#Timestamp", + "notificationsAccessForOrganization": { + "target": "com.amazonaws.notifications#NotificationsAccessForOrganization", "traits": { - "smithy.api#documentation": "

The earliest time of events to return from this call.

", - "smithy.api#httpQuery": "startTime", - "smithy.api#timestampFormat": "date-time" + "smithy.api#documentation": "

The AccessStatus of Service Trust Enablement for User Notifications to Amazon Web Services Organizations.

", + "smithy.api#required": {} } - }, - "endTime": { - "target": "smithy.api#Timestamp", + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.notifications#InternalServerException": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.notifications#ErrorMessage", "traits": { - "smithy.api#documentation": "

Latest time of events to return from this call.

", - "smithy.api#httpQuery": "endTime", - "smithy.api#timestampFormat": "date-time" + "smithy.api#required": {} } + } + }, + "traits": { + "smithy.api#documentation": "

Unexpected error during processing of request.

", + "smithy.api#error": "server", + "smithy.api#httpError": 500, + "smithy.api#retryable": {} + } + }, + "com.amazonaws.notifications#LastActivationTime": { + "type": "timestamp", + "traits": { + "smithy.api#timestampFormat": "date-time" + } + }, + "com.amazonaws.notifications#ListChannels": { + "type": "operation", + "input": { + "target": "com.amazonaws.notifications#ListChannelsRequest" + }, + "output": { + "target": "com.amazonaws.notifications#ListChannelsResponse" + }, + "errors": [ + { + "target": "com.amazonaws.notifications#AccessDeniedException" }, - "locale": { - "target": "com.amazonaws.notifications#LocaleCode", - "traits": { - "smithy.api#documentation": "

The locale code of the language used for the retrieved NotificationEvent. The default locale is English (en_US).

", - "smithy.api#httpQuery": "locale" - } + { + "target": "com.amazonaws.notifications#InternalServerException" }, - "source": { - "target": "com.amazonaws.notifications#Source", - "traits": { - "smithy.api#documentation": "

The matched event source.

\n

Must match one of the valid EventBridge sources. Only AWS service sourced events are supported. For example, aws.ec2 and aws.cloudwatch. For more information, see Event delivery from AWS services in the Amazon EventBridge User Guide.

", - "smithy.api#httpQuery": "source" - } + { + "target": "com.amazonaws.notifications#ResourceNotFoundException" }, - "includeChildEvents": { - "target": "smithy.api#Boolean", - "traits": { - "smithy.api#documentation": "

Include aggregated child events in the result.

", - "smithy.api#httpQuery": "includeChildEvents" - } + { + "target": "com.amazonaws.notifications#ThrottlingException" }, - "aggregateNotificationEventArn": { - "target": "com.amazonaws.notifications#NotificationEventArn", + { + "target": "com.amazonaws.notifications#ValidationException" + } + ], + "traits": { + "aws.api#controlPlane": {}, + "aws.iam#iamAction": { + "documentation": "Grants permission to list Channels by NotificationConfiguration" + }, + "smithy.api#documentation": "

Returns a list of Channels for a NotificationConfiguration.

", + "smithy.api#http": { + "code": 200, + "method": "GET", + "uri": "/channels" + }, + "smithy.api#paginated": { + "inputToken": "nextToken", + "outputToken": "nextToken", + "pageSize": "maxResults", + "items": "channels" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.notifications#ListChannelsRequest": { + "type": "structure", + "members": { + "notificationConfigurationArn": { + "target": "com.amazonaws.notifications#NotificationConfigurationArn", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the aggregatedNotificationEventArn to match.

", - "smithy.api#httpQuery": "aggregateNotificationEventArn" + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the NotificationConfiguration.

", + "smithy.api#httpQuery": "notificationConfigurationArn", + "smithy.api#required": {} } }, "maxResults": { "target": "smithy.api#Integer", "traits": { - "smithy.api#documentation": "

The maximum number of results to be returned in this call. Defaults to 20.

", + "smithy.api#documentation": "

The maximum number of results to be returned in this call. The default value is 20.

", "smithy.api#httpQuery": "maxResults", "smithy.api#range": { "min": 1, @@ -1744,7 +2333,7 @@ "nextToken": { "target": "com.amazonaws.notifications#NextToken", "traits": { - "smithy.api#documentation": "

The start token for paginated calls. Retrieved from the response of a previous ListEventRules call. Next token uses Base64 encoding.

", + "smithy.api#documentation": "

The start token for paginated calls. Retrieved from the response of a previous\n ListNotificationEvents call. NextToken uses Base64 encoding.

", "smithy.api#httpQuery": "nextToken" } } @@ -1753,7 +2342,7 @@ "smithy.api#input": {} } }, - "com.amazonaws.notifications#ListNotificationEventsResponse": { + "com.amazonaws.notifications#ListChannelsResponse": { "type": "structure", "members": { "nextToken": { @@ -1762,10 +2351,10 @@ "smithy.api#documentation": "

A pagination token. If a non-null pagination token is returned in a result, pass its value in another request to retrieve more entries.

" } }, - "notificationEvents": { - "target": "com.amazonaws.notifications#NotificationEvents", + "channels": { + "target": "com.amazonaws.notifications#Channels", "traits": { - "smithy.api#documentation": "

The list of notification events.

", + "smithy.api#documentation": "

A list of Channels.

", "smithy.api#required": {} } } @@ -1774,13 +2363,13 @@ "smithy.api#output": {} } }, - "com.amazonaws.notifications#ListNotificationHubs": { + "com.amazonaws.notifications#ListEventRules": { "type": "operation", "input": { - "target": "com.amazonaws.notifications#ListNotificationHubsRequest" + "target": "com.amazonaws.notifications#ListEventRulesRequest" }, "output": { - "target": "com.amazonaws.notifications#ListNotificationHubsResponse" + "target": "com.amazonaws.notifications#ListEventRulesResponse" }, "errors": [ { @@ -1789,6 +2378,9 @@ { "target": "com.amazonaws.notifications#InternalServerException" }, + { + "target": "com.amazonaws.notifications#ResourceNotFoundException" + }, { "target": "com.amazonaws.notifications#ThrottlingException" }, @@ -1799,56 +2391,49 @@ "traits": { "aws.api#controlPlane": {}, "aws.iam#iamAction": { - "documentation": "Grants permission to list NotificationHubs" + "documentation": "Grants permission to list EventRules" }, - "smithy.api#documentation": "

Returns a list of NotificationHubs.

", + "smithy.api#documentation": "

Returns a list of EventRules according to specified filters, in reverse chronological order (newest first).

", "smithy.api#http": { "code": 200, "method": "GET", - "uri": "/notification-hubs" + "uri": "/event-rules" }, "smithy.api#paginated": { "inputToken": "nextToken", "outputToken": "nextToken", "pageSize": "maxResults", - "items": "notificationHubs" + "items": "eventRules" }, - "smithy.api#readonly": {}, - "smithy.test#smokeTests": [ - { - "id": "ListNotificationHubsSuccess", - "params": { - "maxResults": 3 - }, - "expect": { - "success": {} - }, - "vendorParamsShape": "aws.test#AwsVendorParams", - "vendorParams": { - "region": "us-east-1" - } - } - ] + "smithy.api#readonly": {} } }, - "com.amazonaws.notifications#ListNotificationHubsRequest": { + "com.amazonaws.notifications#ListEventRulesRequest": { "type": "structure", "members": { + "notificationConfigurationArn": { + "target": "com.amazonaws.notifications#NotificationConfigurationArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the NotificationConfiguration.

", + "smithy.api#httpQuery": "notificationConfigurationArn", + "smithy.api#required": {} + } + }, "maxResults": { "target": "smithy.api#Integer", "traits": { - "smithy.api#documentation": "

The maximum number of records to list in a single response.

", + "smithy.api#documentation": "

The maximum number of results to be returned in this call. The default value is 20.

", "smithy.api#httpQuery": "maxResults", "smithy.api#range": { - "min": 3, - "max": 3 + "min": 1, + "max": 1000 } } }, "nextToken": { "target": "com.amazonaws.notifications#NextToken", "traits": { - "smithy.api#documentation": "

A pagination token. Set to null to start listing notification hubs from the start.

", + "smithy.api#documentation": "

The start token for paginated calls. Retrieved from the response of a previous\n ListEventRules call. Next token uses Base64 encoding.

", "smithy.api#httpQuery": "nextToken" } } @@ -1857,34 +2442,34 @@ "smithy.api#input": {} } }, - "com.amazonaws.notifications#ListNotificationHubsResponse": { + "com.amazonaws.notifications#ListEventRulesResponse": { "type": "structure", "members": { - "notificationHubs": { - "target": "com.amazonaws.notifications#NotificationHubs", - "traits": { - "smithy.api#documentation": "

The NotificationHubs in the account.

", - "smithy.api#required": {} - } - }, "nextToken": { "target": "com.amazonaws.notifications#NextToken", "traits": { "smithy.api#documentation": "

A pagination token. If a non-null pagination token is returned in a result, pass its value in another request to retrieve more entries.

" } + }, + "eventRules": { + "target": "com.amazonaws.notifications#EventRules", + "traits": { + "smithy.api#documentation": "

A list of EventRules.

", + "smithy.api#required": {} + } } }, "traits": { "smithy.api#output": {} } }, - "com.amazonaws.notifications#ListTagsForResource": { + "com.amazonaws.notifications#ListManagedNotificationChannelAssociations": { "type": "operation", "input": { - "target": "com.amazonaws.notifications#ListTagsForResourceRequest" + "target": "com.amazonaws.notifications#ListManagedNotificationChannelAssociationsRequest" }, "output": { - "target": "com.amazonaws.notifications#ListTagsForResourceResponse" + "target": "com.amazonaws.notifications#ListManagedNotificationChannelAssociationsResponse" }, "errors": [ { @@ -1906,129 +2491,1606 @@ "traits": { "aws.api#controlPlane": {}, "aws.iam#iamAction": { - "documentation": "Grants permission to get tags for a resource" + "documentation": "Grants permission to list Account contacts and Channels associated with a Managed Notification Configuration" }, - "smithy.api#documentation": "

Returns a list of tags for a specified Amazon Resource Name (ARN).

\n

For more information, see Tagging your AWS resources in the Tagging AWS Resources User Guide.

\n \n

This is only supported for NotificationConfigurations.

\n
", + "smithy.api#documentation": "

Returns a list of Account contacts and Channels associated with a ManagedNotificationConfiguration, in paginated format.

", "smithy.api#http": { + "code": 200, "method": "GET", - "uri": "/tags/{arn}" + "uri": "/channels/list-managed-notification-channel-associations" + }, + "smithy.api#paginated": { + "inputToken": "nextToken", + "outputToken": "nextToken", + "pageSize": "maxResults", + "items": "channelAssociations" }, "smithy.api#readonly": {} } }, - "com.amazonaws.notifications#ListTagsForResourceRequest": { + "com.amazonaws.notifications#ListManagedNotificationChannelAssociationsRequest": { "type": "structure", "members": { - "arn": { - "target": "com.amazonaws.notifications#NotificationConfigurationArn", + "managedNotificationConfigurationArn": { + "target": "com.amazonaws.notifications#ManagedNotificationConfigurationOsArn", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) to use to list tags.

", - "smithy.api#httpLabel": {}, + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the ManagedNotificationConfiguration to match.

", + "smithy.api#httpQuery": "managedNotificationConfigurationArn", "smithy.api#required": {} } - } + }, + "maxResults": { + "target": "smithy.api#Integer", + "traits": { + "smithy.api#documentation": "

The maximum number of results to be returned in this call. Defaults to 20.

", + "smithy.api#httpQuery": "maxResults", + "smithy.api#range": { + "min": 1, + "max": 100 + } + } + }, + "nextToken": { + "target": "com.amazonaws.notifications#NextToken", + "traits": { + "smithy.api#documentation": "

The start token for paginated calls. Retrieved from the response of a previous ListManagedNotificationChannelAssociations call.

", + "smithy.api#httpQuery": "nextToken" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.notifications#ListManagedNotificationChannelAssociationsResponse": { + "type": "structure", + "members": { + "nextToken": { + "target": "com.amazonaws.notifications#NextToken", + "traits": { + "smithy.api#documentation": "

A pagination token. If a non-null pagination token is returned in a result, pass its value in another request to retrieve more entries.

" + } + }, + "channelAssociations": { + "target": "com.amazonaws.notifications#ManagedNotificationChannelAssociations", + "traits": { + "smithy.api#documentation": "

A list that contains the following information about a channel association.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.notifications#ListManagedNotificationChildEvents": { + "type": "operation", + "input": { + "target": "com.amazonaws.notifications#ListManagedNotificationChildEventsRequest" + }, + "output": { + "target": "com.amazonaws.notifications#ListManagedNotificationChildEventsResponse" + }, + "errors": [ + { + "target": "com.amazonaws.notifications#AccessDeniedException" + }, + { + "target": "com.amazonaws.notifications#InternalServerException" + }, + { + "target": "com.amazonaws.notifications#ThrottlingException" + }, + { + "target": "com.amazonaws.notifications#ValidationException" + } + ], + "traits": { + "aws.api#controlPlane": {}, + "aws.iam#iamAction": { + "documentation": "Grants permission to list Managed Notification Child Events" + }, + "smithy.api#documentation": "

Returns a list of ManagedNotificationChildEvents for a specified aggregate ManagedNotificationEvent, ordered by creation time in reverse chronological order (newest first).

", + "smithy.api#http": { + "code": 200, + "method": "GET", + "uri": "/list-managed-notification-child-events/{aggregateManagedNotificationEventArn}" + }, + "smithy.api#paginated": { + "inputToken": "nextToken", + "outputToken": "nextToken", + "pageSize": "maxResults", + "items": "managedNotificationChildEvents" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.notifications#ListManagedNotificationChildEventsRequest": { + "type": "structure", + "members": { + "aggregateManagedNotificationEventArn": { + "target": "com.amazonaws.notifications#ManagedNotificationEventArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the ManagedNotificationEvent.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "startTime": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The earliest time of events to return from this call.

", + "smithy.api#httpQuery": "startTime", + "smithy.api#timestampFormat": "date-time" + } + }, + "endTime": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

Latest time of events to return from this call.

", + "smithy.api#httpQuery": "endTime", + "smithy.api#timestampFormat": "date-time" + } + }, + "locale": { + "target": "com.amazonaws.notifications#LocaleCode", + "traits": { + "smithy.api#documentation": "

The locale code of the language used for the retrieved NotificationEvent. The default locale is English.en_US.

", + "smithy.api#httpQuery": "locale" + } + }, + "maxResults": { + "target": "smithy.api#Integer", + "traits": { + "smithy.api#documentation": "

The maximum number of results to be returned in this call. Defaults to 20.

", + "smithy.api#httpQuery": "maxResults", + "smithy.api#range": { + "min": 1, + "max": 100 + } + } + }, + "relatedAccount": { + "target": "com.amazonaws.notifications#AccountId", + "traits": { + "smithy.api#documentation": "

The Amazon Web Services account ID associated with the Managed Notification Child Events.

", + "smithy.api#httpQuery": "relatedAccount" + } + }, + "organizationalUnitId": { + "target": "com.amazonaws.notifications#OrganizationalUnitId", + "traits": { + "smithy.api#documentation": "

The identifier of the Amazon Web Services Organizations organizational unit (OU) associated with the Managed Notification Child Events.

", + "smithy.api#httpQuery": "organizationalUnitId" + } + }, + "nextToken": { + "target": "com.amazonaws.notifications#NextToken", + "traits": { + "smithy.api#documentation": "

The start token for paginated calls. Retrieved from the response of a previous ListManagedNotificationChannelAssociations call. Next token uses Base64 encoding.

", + "smithy.api#httpQuery": "nextToken" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.notifications#ListManagedNotificationChildEventsResponse": { + "type": "structure", + "members": { + "nextToken": { + "target": "com.amazonaws.notifications#NextToken", + "traits": { + "smithy.api#documentation": "

A pagination token. If a non-null pagination token is returned in a result, pass its value in another request to retrieve more entries.

" + } + }, + "managedNotificationChildEvents": { + "target": "com.amazonaws.notifications#ManagedNotificationChildEvents", + "traits": { + "smithy.api#documentation": "

A pagination token. If a non-null pagination token is returned in a result, pass its value in another request to retrieve more entries.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.notifications#ListManagedNotificationConfigurations": { + "type": "operation", + "input": { + "target": "com.amazonaws.notifications#ListManagedNotificationConfigurationsRequest" + }, + "output": { + "target": "com.amazonaws.notifications#ListManagedNotificationConfigurationsResponse" + }, + "errors": [ + { + "target": "com.amazonaws.notifications#AccessDeniedException" + }, + { + "target": "com.amazonaws.notifications#InternalServerException" + }, + { + "target": "com.amazonaws.notifications#ThrottlingException" + }, + { + "target": "com.amazonaws.notifications#ValidationException" + } + ], + "traits": { + "aws.api#controlPlane": {}, + "aws.iam#iamAction": { + "documentation": "Grants permission to list Managed Notification Configurations" + }, + "smithy.api#documentation": "

Returns a list of Managed Notification Configurations according to specified filters, ordered by creation time in reverse chronological order (newest first).

", + "smithy.api#http": { + "code": 200, + "method": "GET", + "uri": "/managed-notification-configurations" + }, + "smithy.api#paginated": { + "inputToken": "nextToken", + "outputToken": "nextToken", + "pageSize": "maxResults", + "items": "managedNotificationConfigurations" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.notifications#ListManagedNotificationConfigurationsRequest": { + "type": "structure", + "members": { + "channelIdentifier": { + "target": "com.amazonaws.notifications#ChannelIdentifier", + "traits": { + "smithy.api#documentation": "

The identifier or ARN of the notification channel to filter configurations by.

", + "smithy.api#httpQuery": "channelIdentifier" + } + }, + "maxResults": { + "target": "smithy.api#Integer", + "traits": { + "smithy.api#documentation": "

The maximum number of results to be returned in this call. Defaults to 20.

", + "smithy.api#httpQuery": "maxResults", + "smithy.api#range": { + "min": 1, + "max": 100 + } + } + }, + "nextToken": { + "target": "com.amazonaws.notifications#NextToken", + "traits": { + "smithy.api#documentation": "

The start token for paginated calls. Retrieved from the response of a previous ListManagedNotificationChannelAssociations call. Next token uses Base64 encoding.

", + "smithy.api#httpQuery": "nextToken" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.notifications#ListManagedNotificationConfigurationsResponse": { + "type": "structure", + "members": { + "nextToken": { + "target": "com.amazonaws.notifications#NextToken", + "traits": { + "smithy.api#documentation": "

A pagination token. If a non-null pagination token is returned in a result, pass its value in another request to retrieve more entries.

" + } + }, + "managedNotificationConfigurations": { + "target": "com.amazonaws.notifications#ManagedNotificationConfigurations", + "traits": { + "smithy.api#documentation": "

A list of Managed Notification Configurations matching the request criteria.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.notifications#ListManagedNotificationEvents": { + "type": "operation", + "input": { + "target": "com.amazonaws.notifications#ListManagedNotificationEventsRequest" + }, + "output": { + "target": "com.amazonaws.notifications#ListManagedNotificationEventsResponse" + }, + "errors": [ + { + "target": "com.amazonaws.notifications#AccessDeniedException" + }, + { + "target": "com.amazonaws.notifications#InternalServerException" + }, + { + "target": "com.amazonaws.notifications#ThrottlingException" + }, + { + "target": "com.amazonaws.notifications#ValidationException" + } + ], + "traits": { + "aws.api#controlPlane": {}, + "aws.iam#iamAction": { + "documentation": "Grants permission to list Managed Notification Events" + }, + "smithy.api#documentation": "

Returns a list of Managed Notification Events according to specified filters, ordered by creation time in reverse chronological order (newest first).

", + "smithy.api#http": { + "code": 200, + "method": "GET", + "uri": "/managed-notification-events" + }, + "smithy.api#paginated": { + "inputToken": "nextToken", + "outputToken": "nextToken", + "pageSize": "maxResults", + "items": "managedNotificationEvents" + }, + "smithy.api#readonly": {}, + "smithy.test#smokeTests": [ + { + "id": "ListManagedNotificationEventsSuccess", + "params": { + "maxResults": 3 + }, + "expect": { + "success": {} + }, + "vendorParamsShape": "aws.test#AwsVendorParams", + "vendorParams": { + "region": "us-east-1" + } + } + ] + } + }, + "com.amazonaws.notifications#ListManagedNotificationEventsRequest": { + "type": "structure", + "members": { + "startTime": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The earliest time of events to return from this call.

", + "smithy.api#httpQuery": "startTime", + "smithy.api#timestampFormat": "date-time" + } + }, + "endTime": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

Latest time of events to return from this call.

", + "smithy.api#httpQuery": "endTime", + "smithy.api#timestampFormat": "date-time" + } + }, + "locale": { + "target": "com.amazonaws.notifications#LocaleCode", + "traits": { + "smithy.api#documentation": "

The locale code of the language used for the retrieved NotificationEvent. The default locale is English (en_US).

", + "smithy.api#httpQuery": "locale" + } + }, + "source": { + "target": "com.amazonaws.notifications#Source", + "traits": { + "smithy.api#documentation": "

The Amazon Web Services service the event originates from. For example aws.cloudwatch.

", + "smithy.api#httpQuery": "source" + } + }, + "maxResults": { + "target": "smithy.api#Integer", + "traits": { + "smithy.api#documentation": "

The maximum number of results to be returned in this call. Defaults to 20.

", + "smithy.api#httpQuery": "maxResults", + "smithy.api#range": { + "min": 1, + "max": 100 + } + } + }, + "nextToken": { + "target": "com.amazonaws.notifications#NextToken", + "traits": { + "smithy.api#documentation": "

The start token for paginated calls. Retrieved from the response of a previous ListManagedNotificationChannelAssociations call. Next token uses Base64 encoding.

", + "smithy.api#httpQuery": "nextToken" + } + }, + "organizationalUnitId": { + "target": "com.amazonaws.notifications#OrganizationalUnitId", + "traits": { + "smithy.api#documentation": "

The Organizational Unit Id that an Amazon Web Services account belongs to.

", + "smithy.api#httpQuery": "organizationalUnitId" + } + }, + "relatedAccount": { + "target": "com.amazonaws.notifications#AccountId", + "traits": { + "smithy.api#documentation": "

The Amazon Web Services account ID associated with the Managed Notification Events.

", + "smithy.api#httpQuery": "relatedAccount" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.notifications#ListManagedNotificationEventsResponse": { + "type": "structure", + "members": { + "nextToken": { + "target": "com.amazonaws.notifications#NextToken", + "traits": { + "smithy.api#documentation": "

A pagination token. If a non-null pagination token is returned in a result, pass its value in another request to retrieve more entries.

" + } + }, + "managedNotificationEvents": { + "target": "com.amazonaws.notifications#ManagedNotificationEvents", + "traits": { + "smithy.api#documentation": "

A list of Managed Notification Events matching the request criteria.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.notifications#ListNotificationConfigurations": { + "type": "operation", + "input": { + "target": "com.amazonaws.notifications#ListNotificationConfigurationsRequest" + }, + "output": { + "target": "com.amazonaws.notifications#ListNotificationConfigurationsResponse" + }, + "errors": [ + { + "target": "com.amazonaws.notifications#AccessDeniedException" + }, + { + "target": "com.amazonaws.notifications#InternalServerException" + }, + { + "target": "com.amazonaws.notifications#ThrottlingException" + }, + { + "target": "com.amazonaws.notifications#ValidationException" + } + ], + "traits": { + "aws.api#controlPlane": {}, + "aws.iam#iamAction": { + "documentation": "Grants permission to list NotificationConfigurations" + }, + "smithy.api#documentation": "

Returns a list of abbreviated NotificationConfigurations according to\n specified filters, in reverse chronological order (newest first).

", + "smithy.api#http": { + "code": 200, + "method": "GET", + "uri": "/notification-configurations" + }, + "smithy.api#paginated": { + "inputToken": "nextToken", + "outputToken": "nextToken", + "pageSize": "maxResults", + "items": "notificationConfigurations" + }, + "smithy.api#readonly": {}, + "smithy.test#smokeTests": [ + { + "id": "ListNotificationConfigurationsSuccess", + "params": { + "status": "ACTIVE", + "maxResults": 3 + }, + "expect": { + "success": {} + }, + "vendorParamsShape": "aws.test#AwsVendorParams", + "vendorParams": { + "region": "us-east-1" + } + } + ] + } + }, + "com.amazonaws.notifications#ListNotificationConfigurationsRequest": { + "type": "structure", + "members": { + "eventRuleSource": { + "target": "com.amazonaws.notifications#Source", + "traits": { + "smithy.api#documentation": "

The matched event source.

\n

Must match one of the valid EventBridge sources. Only Amazon Web Services service sourced events are supported. For example, aws.ec2 and aws.cloudwatch. For more information, see Event delivery from Amazon Web Services services in the Amazon EventBridge User Guide.

", + "smithy.api#httpQuery": "eventRuleSource" + } + }, + "channelArn": { + "target": "com.amazonaws.notifications#ChannelArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the Channel to match.

", + "smithy.api#httpQuery": "channelArn" + } + }, + "status": { + "target": "com.amazonaws.notifications#NotificationConfigurationStatus", + "traits": { + "smithy.api#documentation": "

The NotificationConfiguration status to match.

\n
    \n
  • \n

    Values:

    \n
      \n
    • \n

      \n ACTIVE\n

      \n
        \n
      • \n

        All EventRules are ACTIVE and any call can be run.

        \n
      • \n
      \n
    • \n
    • \n

      \n PARTIALLY_ACTIVE\n

      \n
        \n
      • \n

        Some EventRules are ACTIVE and some are INACTIVE. Any call can be run.

        \n
      • \n
      • \n

        Any call can be run.

        \n
      • \n
      \n
    • \n
    • \n

      \n INACTIVE\n

      \n
        \n
      • \n

        All EventRules are INACTIVE and any call can be run.

        \n
      • \n
      \n
    • \n
    • \n

      \n DELETING\n

      \n
        \n
      • \n

        This NotificationConfiguration is being deleted.

        \n
      • \n
      • \n

        Only GET and LIST calls can be run.

        \n
      • \n
      \n
    • \n
    \n
  • \n
", + "smithy.api#httpQuery": "status" + } + }, + "maxResults": { + "target": "smithy.api#Integer", + "traits": { + "smithy.api#documentation": "

The maximum number of results to be returned in this call. Defaults to 20.

", + "smithy.api#httpQuery": "maxResults", + "smithy.api#range": { + "min": 1, + "max": 100 + } + } + }, + "nextToken": { + "target": "com.amazonaws.notifications#NextToken", + "traits": { + "smithy.api#documentation": "

The start token for paginated calls. Retrieved from the response of a previous\n ListEventRules call. Next token uses Base64 encoding.

", + "smithy.api#httpQuery": "nextToken" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.notifications#ListNotificationConfigurationsResponse": { + "type": "structure", + "members": { + "nextToken": { + "target": "com.amazonaws.notifications#NextToken", + "traits": { + "smithy.api#documentation": "

A pagination token. If a non-null pagination token is returned in a result, pass its value in another request to retrieve more entries.

" + } + }, + "notificationConfigurations": { + "target": "com.amazonaws.notifications#NotificationConfigurations", + "traits": { + "smithy.api#documentation": "

The NotificationConfigurations in the account.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.notifications#ListNotificationEvents": { + "type": "operation", + "input": { + "target": "com.amazonaws.notifications#ListNotificationEventsRequest" + }, + "output": { + "target": "com.amazonaws.notifications#ListNotificationEventsResponse" + }, + "errors": [ + { + "target": "com.amazonaws.notifications#AccessDeniedException" + }, + { + "target": "com.amazonaws.notifications#InternalServerException" + }, + { + "target": "com.amazonaws.notifications#ThrottlingException" + }, + { + "target": "com.amazonaws.notifications#ValidationException" + } + ], + "traits": { + "aws.api#controlPlane": {}, + "aws.iam#iamAction": { + "documentation": "Grants permission to list NotificationEvents" + }, + "smithy.api#documentation": "

Returns a list of NotificationEvents according to specified filters, in reverse chronological order (newest first).

\n \n

User Notifications stores notifications in the individual Regions you register as notification hubs and the Region of the source event rule. ListNotificationEvents only returns notifications stored in the same Region in which the action is called.\n\t User Notifications doesn't backfill notifications to new Regions selected as notification hubs. For this reason, we recommend that you make calls in your oldest registered notification hub.\n\t For more information, see Notification hubs in the Amazon Web Services User Notifications User Guide.

\n
", + "smithy.api#http": { + "code": 200, + "method": "GET", + "uri": "/notification-events" + }, + "smithy.api#paginated": { + "inputToken": "nextToken", + "outputToken": "nextToken", + "pageSize": "maxResults", + "items": "notificationEvents" + }, + "smithy.api#readonly": {}, + "smithy.test#smokeTests": [ + { + "id": "ListNotificationEventsSuccess", + "params": { + "includeChildEvents": true, + "maxResults": 3 + }, + "expect": { + "success": {} + }, + "vendorParamsShape": "aws.test#AwsVendorParams", + "vendorParams": { + "region": "us-east-1" + } + } + ] + } + }, + "com.amazonaws.notifications#ListNotificationEventsRequest": { + "type": "structure", + "members": { + "startTime": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The earliest time of events to return from this call.

", + "smithy.api#httpQuery": "startTime", + "smithy.api#timestampFormat": "date-time" + } + }, + "endTime": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

Latest time of events to return from this call.

", + "smithy.api#httpQuery": "endTime", + "smithy.api#timestampFormat": "date-time" + } + }, + "locale": { + "target": "com.amazonaws.notifications#LocaleCode", + "traits": { + "smithy.api#documentation": "

The locale code of the language used for the retrieved NotificationEvent. The default locale is English (en_US).

", + "smithy.api#httpQuery": "locale" + } + }, + "source": { + "target": "com.amazonaws.notifications#Source", + "traits": { + "smithy.api#documentation": "

The matched event source.

\n

Must match one of the valid EventBridge sources. Only Amazon Web Services service sourced events are supported. For example, aws.ec2 and aws.cloudwatch. For more information, see Event delivery from Amazon Web Services services in the Amazon EventBridge User Guide.

", + "smithy.api#httpQuery": "source" + } + }, + "includeChildEvents": { + "target": "smithy.api#Boolean", + "traits": { + "smithy.api#documentation": "

Include aggregated child events in the result.

", + "smithy.api#httpQuery": "includeChildEvents" + } + }, + "aggregateNotificationEventArn": { + "target": "com.amazonaws.notifications#NotificationEventArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the aggregatedNotificationEventArn to\n match.

", + "smithy.api#httpQuery": "aggregateNotificationEventArn" + } + }, + "maxResults": { + "target": "smithy.api#Integer", + "traits": { + "smithy.api#documentation": "

The maximum number of results to be returned in this call. Defaults to 20.

", + "smithy.api#httpQuery": "maxResults", + "smithy.api#range": { + "min": 1, + "max": 100 + } + } + }, + "nextToken": { + "target": "com.amazonaws.notifications#NextToken", + "traits": { + "smithy.api#documentation": "

The start token for paginated calls. Retrieved from the response of a previous\n ListEventRules call. Next token uses Base64 encoding.

", + "smithy.api#httpQuery": "nextToken" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.notifications#ListNotificationEventsResponse": { + "type": "structure", + "members": { + "nextToken": { + "target": "com.amazonaws.notifications#NextToken", + "traits": { + "smithy.api#documentation": "

A pagination token. If a non-null pagination token is returned in a result, pass its value in another request to retrieve more entries.

" + } + }, + "notificationEvents": { + "target": "com.amazonaws.notifications#NotificationEvents", + "traits": { + "smithy.api#documentation": "

The list of notification events.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.notifications#ListNotificationHubs": { + "type": "operation", + "input": { + "target": "com.amazonaws.notifications#ListNotificationHubsRequest" + }, + "output": { + "target": "com.amazonaws.notifications#ListNotificationHubsResponse" + }, + "errors": [ + { + "target": "com.amazonaws.notifications#AccessDeniedException" + }, + { + "target": "com.amazonaws.notifications#InternalServerException" + }, + { + "target": "com.amazonaws.notifications#ThrottlingException" + }, + { + "target": "com.amazonaws.notifications#ValidationException" + } + ], + "traits": { + "aws.api#controlPlane": {}, + "aws.iam#iamAction": { + "documentation": "Grants permission to list NotificationHubs" + }, + "smithy.api#documentation": "

Returns a list of NotificationHubs.

", + "smithy.api#http": { + "code": 200, + "method": "GET", + "uri": "/notification-hubs" + }, + "smithy.api#paginated": { + "inputToken": "nextToken", + "outputToken": "nextToken", + "pageSize": "maxResults", + "items": "notificationHubs" + }, + "smithy.api#readonly": {}, + "smithy.test#smokeTests": [ + { + "id": "ListNotificationHubsSuccess", + "params": { + "maxResults": 3 + }, + "expect": { + "success": {} + }, + "vendorParamsShape": "aws.test#AwsVendorParams", + "vendorParams": { + "region": "us-east-1" + } + } + ] + } + }, + "com.amazonaws.notifications#ListNotificationHubsRequest": { + "type": "structure", + "members": { + "maxResults": { + "target": "smithy.api#Integer", + "traits": { + "smithy.api#documentation": "

The maximum number of records to list in a single response.

", + "smithy.api#httpQuery": "maxResults", + "smithy.api#range": { + "min": 3, + "max": 3 + } + } + }, + "nextToken": { + "target": "com.amazonaws.notifications#NextToken", + "traits": { + "smithy.api#documentation": "

A pagination token. Set to null to start listing notification hubs from the start.

", + "smithy.api#httpQuery": "nextToken" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.notifications#ListNotificationHubsResponse": { + "type": "structure", + "members": { + "notificationHubs": { + "target": "com.amazonaws.notifications#NotificationHubs", + "traits": { + "smithy.api#documentation": "

The NotificationHubs in the account.

", + "smithy.api#required": {} + } + }, + "nextToken": { + "target": "com.amazonaws.notifications#NextToken", + "traits": { + "smithy.api#documentation": "

A pagination token. If a non-null pagination token is returned in a result, pass its value in another request to retrieve more entries.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.notifications#ListTagsForResource": { + "type": "operation", + "input": { + "target": "com.amazonaws.notifications#ListTagsForResourceRequest" + }, + "output": { + "target": "com.amazonaws.notifications#ListTagsForResourceResponse" + }, + "errors": [ + { + "target": "com.amazonaws.notifications#AccessDeniedException" + }, + { + "target": "com.amazonaws.notifications#InternalServerException" + }, + { + "target": "com.amazonaws.notifications#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.notifications#ThrottlingException" + }, + { + "target": "com.amazonaws.notifications#ValidationException" + } + ], + "traits": { + "aws.api#controlPlane": {}, + "aws.iam#iamAction": { + "documentation": "Grants permission to get tags for a resource" + }, + "smithy.api#documentation": "

Returns a list of tags for a specified Amazon Resource Name (ARN).

\n

For more information, see Tagging your Amazon Web Services resources in the Tagging Amazon Web Services Resources User Guide.

\n \n

This is only supported for NotificationConfigurations.

\n
", + "smithy.api#http": { + "method": "GET", + "uri": "/tags/{arn}" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.notifications#ListTagsForResourceRequest": { + "type": "structure", + "members": { + "arn": { + "target": "com.amazonaws.notifications#NotificationConfigurationArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) to use to list tags.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.notifications#ListTagsForResourceResponse": { + "type": "structure", + "members": { + "tags": { + "target": "com.amazonaws.notifications#TagMap", + "traits": { + "smithy.api#documentation": "

A list of tags for the specified ARN.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.notifications#LocaleCode": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "name": "de_DE", + "value": "de_DE", + "documentation": "German (Germany)" + }, + { + "name": "en_CA", + "value": "en_CA", + "documentation": "English (Canada)" + }, + { + "name": "en_US", + "value": "en_US", + "documentation": "English (United States). This is the default locale." + }, + { + "name": "en_UK", + "value": "en_UK", + "documentation": "English (United Kingdom)" + }, + { + "name": "es_ES", + "value": "es_ES", + "documentation": "Spanish (Spain)" + }, + { + "name": "fr_CA", + "value": "fr_CA", + "documentation": "French (Canada)" + }, + { + "name": "fr_FR", + "value": "fr_FR", + "documentation": "French (France)" + }, + { + "name": "id_ID", + "value": "id_ID", + "documentation": "Bahasa Indonesian (Indonesia)" + }, + { + "name": "it_IT", + "value": "it_IT", + "documentation": "Italian (Italy)" + }, + { + "name": "ja_JP", + "value": "ja_JP", + "documentation": "Japanese (Japan)" + }, + { + "name": "ko_KR", + "value": "ko_KR", + "documentation": "Korean (Korea)" + }, + { + "name": "pt_BR", + "value": "pt_BR", + "documentation": "Portuguese (Brazil)" + }, + { + "name": "tr_TR", + "value": "tr_TR", + "documentation": "Turkish (Turkey)" + }, + { + "name": "zh_CN", + "value": "zh_CN", + "documentation": "Chinese (China)" + }, + { + "name": "zh_TW", + "value": "zh_TW", + "documentation": "Chinese (Taiwan)" + } + ], + "smithy.api#suppress": [ + "EnumTrait" + ] + } + }, + "com.amazonaws.notifications#ManagedNotificationAccountContactAssociation": { + "type": "resource", + "identifiers": { + "managedNotificationConfigurationArn": { + "target": "com.amazonaws.notifications#ManagedNotificationConfigurationOsArn" + }, + "contactIdentifier": { + "target": "com.amazonaws.notifications#AccountContactType" + } + }, + "put": { + "target": "com.amazonaws.notifications#AssociateManagedNotificationAccountContact" + }, + "delete": { + "target": "com.amazonaws.notifications#DisassociateManagedNotificationAccountContact" + }, + "traits": { + "aws.cloudformation#cfnResource": {} + } + }, + "com.amazonaws.notifications#ManagedNotificationAdditionalChannelAssociation": { + "type": "resource", + "identifiers": { + "managedNotificationConfigurationArn": { + "target": "com.amazonaws.notifications#ManagedNotificationConfigurationOsArn" + }, + "channelArn": { + "target": "com.amazonaws.notifications#ChannelArn" + } + }, + "put": { + "target": "com.amazonaws.notifications#AssociateManagedNotificationAdditionalChannel" + }, + "delete": { + "target": "com.amazonaws.notifications#DisassociateManagedNotificationAdditionalChannel" + }, + "traits": { + "aws.cloudformation#cfnResource": {} + } + }, + "com.amazonaws.notifications#ManagedNotificationChannelAssociationSummary": { + "type": "structure", + "members": { + "channelIdentifier": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The unique identifier for the notification channel.

", + "smithy.api#required": {} + } + }, + "channelType": { + "target": "com.amazonaws.notifications#ChannelType", + "traits": { + "smithy.api#documentation": "

The type of notification channel used for message delivery.

\n
    \n
  • \n

    Values:

    \n
      \n
    • \n

      \n ACCOUNT_CONTACT\n

      \n
        \n
      • \n

        Delivers notifications to Account Managed contacts through the User Notification Service.

        \n
      • \n
      \n
    • \n
    • \n

      \n MOBILE\n

      \n
        \n
      • \n

        Delivers notifications through the Amazon Web Services Console Mobile Application to mobile devices.

        \n
      • \n
      \n
    • \n
    • \n

      \n CHATBOT\n

      \n
        \n
      • \n

        Delivers notifications through Chatbot to collaboration platforms (Slack, Chime).

        \n
      • \n
      \n
    • \n
    • \n

      \n EMAIL\n

      \n
        \n
      • \n

        Delivers notifications to email addresses.

        \n
      • \n
      \n
    • \n
    \n
  • \n
", + "smithy.api#required": {} + } + }, + "overrideOption": { + "target": "com.amazonaws.notifications#ChannelAssociationOverrideOption", + "traits": { + "smithy.api#documentation": "

Controls whether users can modify channel associations for a notification configuration.

\n
    \n
  • \n

    Values:

    \n
      \n
    • \n

      \n ENABLED\n

      \n
        \n
      • \n

        Users can associate or disassociate channels with the notification configuration.

        \n
      • \n
      \n
    • \n
    • \n

      \n DISABLED\n

      \n
        \n
      • \n

        Users cannot associate or disassociate channels with the notification configuration.

        \n
      • \n
      \n
    • \n
    \n
  • \n
" + } + } + }, + "traits": { + "smithy.api#documentation": "

Provides a summary of channel associations for a managed notification configuration.

" + } + }, + "com.amazonaws.notifications#ManagedNotificationChannelAssociations": { + "type": "list", + "member": { + "target": "com.amazonaws.notifications#ManagedNotificationChannelAssociationSummary" + } + }, + "com.amazonaws.notifications#ManagedNotificationChildEvent": { + "type": "structure", + "members": { + "schemaVersion": { + "target": "com.amazonaws.notifications#SchemaVersion", + "traits": { + "smithy.api#documentation": "

The schema version of the Managed Notification Child Event.

", + "smithy.api#required": {} + } + }, + "id": { + "target": "com.amazonaws.notifications#NotificationEventId", + "traits": { + "smithy.api#documentation": "

The unique identifier for a Managed Notification Child Event.

", + "smithy.api#required": {} + } + }, + "messageComponents": { + "target": "com.amazonaws.notifications#MessageComponents", + "traits": { + "smithy.api#required": {} + } + }, + "sourceEventDetailUrl": { + "target": "com.amazonaws.notifications#Url", + "traits": { + "smithy.api#documentation": "

The source event URL.

" + } + }, + "sourceEventDetailUrlDisplayText": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The detailed URL for the source event.

" + } + }, + "notificationType": { + "target": "com.amazonaws.notifications#NotificationType", + "traits": { + "smithy.api#documentation": "

The type of event causing the notification.

\n
    \n
  • \n

    Values:

    \n
      \n
    • \n

      \n ALERT\n

      \n
        \n
      • \n

        A notification about an event where something was triggered, initiated, reopened, deployed, or a threshold was breached.

        \n
      • \n
      \n
    • \n
    • \n

      \n WARNING\n

      \n
        \n
      • \n

        A notification about an event where an issue is about to arise. For example, something is approaching a threshold.

        \n
      • \n
      \n
    • \n
    • \n

      \n ANNOUNCEMENT\n

      \n
        \n
      • \n

        A notification about an important event. For example, a step in a workflow or escalation path or that a workflow was updated.

        \n
      • \n
      \n
    • \n
    • \n

      \n INFORMATIONAL\n

      \n
        \n
      • \n

        A notification about informational messages. For example, recommendations, service announcements, or reminders.

        \n
      • \n
      \n
    • \n
    \n
  • \n
", + "smithy.api#required": {} + } + }, + "eventStatus": { + "target": "com.amazonaws.notifications#EventStatus", + "traits": { + "smithy.api#documentation": "

The assesed nature of the event.

\n
    \n
  • \n

    Values:

    \n
      \n
    • \n

      \n HEALTHY\n

      \n
        \n
      • \n

        All EventRules are ACTIVE.

        \n
      • \n
      \n
    • \n
    • \n

      \n UNHEALTHY\n

      \n
        \n
      • \n

        Some EventRules are ACTIVE and some are INACTIVE.

        \n
      • \n
      \n
    • \n
    \n
  • \n
" + } + }, + "aggregateManagedNotificationEventArn": { + "target": "com.amazonaws.notifications#ManagedNotificationEventArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the ManagedNotificationEvent that is associated with this Managed Notification Child Event.

", + "smithy.api#required": {} + } + }, + "startTime": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The notification event start time.

", + "smithy.api#timestampFormat": "date-time" + } + }, + "endTime": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The end time of the event.

", + "smithy.api#timestampFormat": "date-time" + } + }, + "textParts": { + "target": "com.amazonaws.notifications#TextParts", + "traits": { + "smithy.api#documentation": "

A list of text values.

", + "smithy.api#required": {} + } + }, + "organizationalUnitId": { + "target": "com.amazonaws.notifications#OrganizationalUnitId", + "traits": { + "smithy.api#documentation": "

The Organizational Unit Id that an Amazon Web Services account belongs to.

" + } + }, + "aggregationDetail": { + "target": "com.amazonaws.notifications#AggregationDetail", + "traits": { + "smithy.api#documentation": "

Provides detailed information about the dimensions used for event summarization and aggregation.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

A ManagedNotificationChildEvent is a notification-focused representation of an event. They contain semantic information used to create aggregated or non-aggregated end-user notifications.

" + } + }, + "com.amazonaws.notifications#ManagedNotificationChildEventArn": { + "type": "string", + "traits": { + "aws.api#arnReference": { + "service": "com.amazonaws.notifications#Notifications", + "resource": "com.amazonaws.notifications#ManagedNotificationChildEvent" + }, + "smithy.api#pattern": "^arn:[-.a-z0-9]{1,63}:notifications::[0-9]{12}:managed-notification-configuration/category/[a-zA-Z0-9\\-]{3,64}/sub-category/[a-zA-Z0-9\\-]{3,64}/event/[a-z0-9]{27}/child-event/[a-z0-9]{27}$" + } + }, + "com.amazonaws.notifications#ManagedNotificationChildEventOverview": { + "type": "structure", + "members": { + "arn": { + "target": "com.amazonaws.notifications#ManagedNotificationEventArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the ManagedNotificationChildEvent.

", + "smithy.api#required": {} + } + }, + "managedNotificationConfigurationArn": { + "target": "com.amazonaws.notifications#ManagedNotificationConfigurationOsArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the ManagedNotificationConfiguration.

", + "smithy.api#required": {} + } + }, + "relatedAccount": { + "target": "com.amazonaws.notifications#AccountId", + "traits": { + "smithy.api#documentation": "

The account that related to the ManagedNotificationChildEvent.

", + "smithy.api#required": {} + } + }, + "creationTime": { + "target": "com.amazonaws.notifications#CreationTime", + "traits": { + "smithy.api#documentation": "

The creation time of the ManagedNotificationChildEvent.

", + "smithy.api#required": {} + } + }, + "childEvent": { + "target": "com.amazonaws.notifications#ManagedNotificationChildEventSummary", + "traits": { + "smithy.api#documentation": "

The content of the ManagedNotificationChildEvent.

", + "smithy.api#required": {} + } + }, + "aggregateManagedNotificationEventArn": { + "target": "com.amazonaws.notifications#ManagedNotificationEventArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the ManagedNotificationEvent that is associated with this ManagedNotificationChildEvent.

", + "smithy.api#required": {} + } + }, + "organizationalUnitId": { + "target": "com.amazonaws.notifications#OrganizationalUnitId", + "traits": { + "smithy.api#documentation": "

The Organizational Unit Id that an AWS account belongs to.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Describes an overview and metadata for a ManagedNotificationChildEvent.

" + } + }, + "com.amazonaws.notifications#ManagedNotificationChildEventResource": { + "type": "resource", + "identifiers": { + "arn": { + "target": "com.amazonaws.notifications#ManagedNotificationChildEventArn" + } + }, + "read": { + "target": "com.amazonaws.notifications#GetManagedNotificationChildEvent" + }, + "list": { + "target": "com.amazonaws.notifications#ListManagedNotificationChildEvents" + }, + "traits": { + "aws.api#arn": { + "template": "{arn}", + "absolute": true + } + } + }, + "com.amazonaws.notifications#ManagedNotificationChildEventSummary": { + "type": "structure", + "members": { + "schemaVersion": { + "target": "com.amazonaws.notifications#SchemaVersion", + "traits": { + "smithy.api#documentation": "

The schema version of the ManagedNotificationChildEvent.

", + "smithy.api#required": {} + } + }, + "sourceEventMetadata": { + "target": "com.amazonaws.notifications#ManagedSourceEventMetadataSummary", + "traits": { + "smithy.api#documentation": "

Contains all event metadata present identically across all NotificationEvents. All fields are present in Source Events via Eventbridge.

", + "smithy.api#required": {} + } + }, + "messageComponents": { + "target": "com.amazonaws.notifications#MessageComponentsSummary", + "traits": { + "smithy.api#required": {} + } + }, + "aggregationDetail": { + "target": "com.amazonaws.notifications#AggregationDetail", + "traits": { + "smithy.api#documentation": "

Provides detailed information about the dimensions used for event summarization and aggregation.

", + "smithy.api#required": {} + } + }, + "eventStatus": { + "target": "com.amazonaws.notifications#EventStatus", + "traits": { + "smithy.api#documentation": "

The perceived nature of the event.

\n
    \n
  • \n

    Values:

    \n
      \n
    • \n

      \n HEALTHY\n

      \n
        \n
      • \n

        All EventRules are ACTIVE and any call can be run.

        \n
      • \n
      \n
    • \n
    • \n

      \n UNHEALTHY\n

      \n
        \n
      • \n

        Some EventRules are ACTIVE and some are INACTIVE. Any call can be run.

        \n
      • \n
      \n
    • \n
    \n
  • \n
", + "smithy.api#required": {} + } + }, + "notificationType": { + "target": "com.amazonaws.notifications#NotificationType", + "traits": { + "smithy.api#documentation": "

The Type of the event causing this notification.

\n
    \n
  • \n

    Values:

    \n
      \n
    • \n

      \n ALERT\n

      \n
        \n
      • \n

        A notification about an event where something was triggered, initiated, reopened, deployed, or a threshold was breached.

        \n
      • \n
      \n
    • \n
    • \n

      \n WARNING\n

      \n
        \n
      • \n

        A notification about an event where an issue is about to arise. For example, something is approaching a threshold.

        \n
      • \n
      \n
    • \n
    • \n

      \n ANNOUNCEMENT\n

      \n
        \n
      • \n

        A notification about an important event. For example, a step in a workflow or escalation path or that a workflow was updated.

        \n
      • \n
      \n
    • \n
    • \n

      \n INFORMATIONAL\n

      \n
        \n
      • \n

        A notification about informational messages. For example, recommendations, service announcements, or reminders.

        \n
      • \n
      \n
    • \n
    \n
  • \n
", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Describes a short summary and metadata for a ManagedNotificationChildEvent.

" + } + }, + "com.amazonaws.notifications#ManagedNotificationChildEvents": { + "type": "list", + "member": { + "target": "com.amazonaws.notifications#ManagedNotificationChildEventOverview" + } + }, + "com.amazonaws.notifications#ManagedNotificationConfiguration": { + "type": "resource", + "identifiers": { + "arn": { + "target": "com.amazonaws.notifications#ManagedNotificationConfigurationOsArn" + } + }, + "read": { + "target": "com.amazonaws.notifications#GetManagedNotificationConfiguration" + }, + "list": { + "target": "com.amazonaws.notifications#ListManagedNotificationConfigurations" + }, + "traits": { + "aws.api#arn": { + "template": "{arn}", + "absolute": true + }, + "aws.cloudformation#cfnResource": {} + } + }, + "com.amazonaws.notifications#ManagedNotificationConfigurationDescription": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 256 + }, + "smithy.api#pattern": "^[^\\u0001-\\u001F\\u007F-\\u009F]*$" + } + }, + "com.amazonaws.notifications#ManagedNotificationConfigurationName": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 64 + }, + "smithy.api#pattern": "^[A-Za-z0-9\\-]+$" + } + }, + "com.amazonaws.notifications#ManagedNotificationConfigurationOsArn": { + "type": "string", + "traits": { + "smithy.api#pattern": "^arn:[-.a-z0-9]{1,63}:notifications::[0-9]{12}:managed-notification-configuration/category/[a-zA-Z0-9\\-]{3,64}/sub-category/[a-zA-Z0-9\\-]{3,64}$" + } + }, + "com.amazonaws.notifications#ManagedNotificationConfigurationStructure": { + "type": "structure", + "members": { + "arn": { + "target": "com.amazonaws.notifications#ManagedNotificationConfigurationOsArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the ManagedNotificationConfiguration.

", + "smithy.api#required": {} + } + }, + "name": { + "target": "com.amazonaws.notifications#ManagedNotificationConfigurationName", + "traits": { + "smithy.api#documentation": "

The name of the ManagedNotificationConfiguration.

", + "smithy.api#required": {} + } + }, + "description": { + "target": "com.amazonaws.notifications#ManagedNotificationConfigurationDescription", + "traits": { + "smithy.api#documentation": "

The description of the ManagedNotificationConfiguration.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Describes the basic structure and properties of a ManagedNotificationConfiguration.

" + } + }, + "com.amazonaws.notifications#ManagedNotificationConfigurations": { + "type": "list", + "member": { + "target": "com.amazonaws.notifications#ManagedNotificationConfigurationStructure" + } + }, + "com.amazonaws.notifications#ManagedNotificationEvent": { + "type": "structure", + "members": { + "schemaVersion": { + "target": "com.amazonaws.notifications#SchemaVersion", + "traits": { + "smithy.api#documentation": "

Version of the ManagedNotificationEvent schema.

", + "smithy.api#required": {} + } + }, + "id": { + "target": "com.amazonaws.notifications#NotificationEventId", + "traits": { + "smithy.api#documentation": "

Unique identifier for a ManagedNotificationEvent.

", + "smithy.api#required": {} + } + }, + "messageComponents": { + "target": "com.amazonaws.notifications#MessageComponents", + "traits": { + "smithy.api#required": {} + } + }, + "sourceEventDetailUrl": { + "target": "com.amazonaws.notifications#Url", + "traits": { + "smithy.api#documentation": "

URL defined by Source Service to be used by notification consumers to get additional information about event.

" + } + }, + "sourceEventDetailUrlDisplayText": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

Text that needs to be hyperlinked with the sourceEventDetailUrl. For example, the description of the sourceEventDetailUrl.

" + } + }, + "notificationType": { + "target": "com.amazonaws.notifications#NotificationType", + "traits": { + "smithy.api#documentation": "

The nature of the event causing this notification.

\n
    \n
  • \n

    Values:

    \n
      \n
    • \n

      \n ALERT\n

      \n
        \n
      • \n

        A notification about an event where something was triggered, initiated, reopened, deployed, or a threshold was breached.

        \n
      • \n
      \n
    • \n
    • \n

      \n WARNING\n

      \n
        \n
      • \n

        A notification about an event where an issue is about to arise. For example, something is approaching a threshold.

        \n
      • \n
      \n
    • \n
    • \n

      \n ANNOUNCEMENT\n

      \n
        \n
      • \n

        A notification about an important event. For example, a step in a workflow or escalation path or that a workflow was updated.

        \n
      • \n
      \n
    • \n
    • \n

      \n INFORMATIONAL\n

      \n
        \n
      • \n

        A notification about informational messages. For example, recommendations, service announcements, or reminders.

        \n
      • \n
      \n
    • \n
    \n
  • \n
", + "smithy.api#required": {} + } + }, + "eventStatus": { + "target": "com.amazonaws.notifications#EventStatus", + "traits": { + "smithy.api#documentation": "

The status of an event.

\n
    \n
  • \n

    Values:

    \n
      \n
    • \n

      \n HEALTHY\n

      \n
        \n
      • \n

        All EventRules are ACTIVE and any call can be run.

        \n
      • \n
      \n
    • \n
    • \n

      \n UNHEALTHY\n

      \n
        \n
      • \n

        Some EventRules are ACTIVE and some are INACTIVE. Any call can be run.

        \n
      • \n
      \n
    • \n
    \n
  • \n
" + } + }, + "aggregationEventType": { + "target": "com.amazonaws.notifications#AggregationEventType", + "traits": { + "smithy.api#documentation": "

The notifications aggregation type.

" + } + }, + "aggregationSummary": { + "target": "com.amazonaws.notifications#AggregationSummary" + }, + "startTime": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The earliest time of events to return from this call.

", + "smithy.api#timestampFormat": "date-time" + } + }, + "endTime": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The end time of the notification event.

", + "smithy.api#timestampFormat": "date-time" + } + }, + "textParts": { + "target": "com.amazonaws.notifications#TextParts", + "traits": { + "smithy.api#documentation": "

A list of text values.

", + "smithy.api#required": {} + } + }, + "organizationalUnitId": { + "target": "com.amazonaws.notifications#OrganizationalUnitId", + "traits": { + "smithy.api#documentation": "

The Organizational Unit Id that an Amazon Web Services account belongs to.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

A notification-focused representation of an event. They contain semantic information used by AccountContacts or Additional Channels to create end-user notifications.

" + } + }, + "com.amazonaws.notifications#ManagedNotificationEventArn": { + "type": "string", + "traits": { + "aws.api#arnReference": { + "service": "com.amazonaws.notifications#Notifications", + "resource": "com.amazonaws.notifications#ManagedNotificationEvent" + }, + "smithy.api#pattern": "^arn:[-.a-z0-9]{1,63}:notifications::[0-9]{12}:managed-notification-configuration/category/[a-zA-Z0-9\\-]{3,64}/sub-category/[a-zA-Z0-9\\-]{3,64}/event/[a-z0-9]{27}$" + } + }, + "com.amazonaws.notifications#ManagedNotificationEventOverview": { + "type": "structure", + "members": { + "arn": { + "target": "com.amazonaws.notifications#ManagedNotificationEventArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the ManagedNotificationEvent.

", + "smithy.api#required": {} + } + }, + "managedNotificationConfigurationArn": { + "target": "com.amazonaws.notifications#ManagedNotificationConfigurationOsArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the ManagedNotificationConfiguration.

", + "smithy.api#required": {} + } + }, + "relatedAccount": { + "target": "com.amazonaws.notifications#AccountId", + "traits": { + "smithy.api#documentation": "

The account that related to the ManagedNotificationEvent.

", + "smithy.api#required": {} + } + }, + "creationTime": { + "target": "com.amazonaws.notifications#CreationTime", + "traits": { + "smithy.api#documentation": "

The creation time of the ManagedNotificationEvent.

", + "smithy.api#required": {} + } + }, + "notificationEvent": { + "target": "com.amazonaws.notifications#ManagedNotificationEventSummary", + "traits": { + "smithy.api#required": {} + } + }, + "aggregationEventType": { + "target": "com.amazonaws.notifications#AggregationEventType", + "traits": { + "smithy.api#documentation": "

The notifications aggregation type.

\n
    \n
  • \n

    Values:

    \n
      \n
    • \n

      \n AGGREGATE\n

      \n
        \n
      • \n

        The notification event is an aggregate notification. Aggregate notifications summarize grouped events over a specified time period.

        \n
      • \n
      \n
    • \n
    • \n

      \n CHILD\n

      \n
        \n
      • \n

        Some EventRules are ACTIVE and some are INACTIVE. Any call can be run.

        \n
      • \n
      \n
    • \n
    • \n

      \n NONE\n

      \n
        \n
      • \n

        The notification isn't aggregated.

        \n
      • \n
      \n
    • \n
    \n
  • \n
" + } + }, + "organizationalUnitId": { + "target": "com.amazonaws.notifications#OrganizationalUnitId", + "traits": { + "smithy.api#documentation": "

The Organizational Unit Id that an Amazon Web Services account belongs to.

" + } + }, + "aggregationSummary": { + "target": "com.amazonaws.notifications#AggregationSummary" + }, + "aggregatedNotificationRegions": { + "target": "com.amazonaws.notifications#AggregatedNotificationRegions", + "traits": { + "smithy.api#documentation": "

The list of the regions where the aggregated notifications in this NotificationEvent originated.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Describes an overview and metadata for a ManagedNotificationEvent.

" + } + }, + "com.amazonaws.notifications#ManagedNotificationEventResource": { + "type": "resource", + "identifiers": { + "arn": { + "target": "com.amazonaws.notifications#ManagedNotificationEventArn" + } + }, + "read": { + "target": "com.amazonaws.notifications#GetManagedNotificationEvent" + }, + "list": { + "target": "com.amazonaws.notifications#ListManagedNotificationEvents" }, "traits": { - "smithy.api#input": {} + "aws.api#arn": { + "template": "{arn}", + "absolute": true + } } }, - "com.amazonaws.notifications#ListTagsForResourceResponse": { + "com.amazonaws.notifications#ManagedNotificationEventSummary": { "type": "structure", "members": { - "tags": { - "target": "com.amazonaws.notifications#TagMap", + "schemaVersion": { + "target": "com.amazonaws.notifications#SchemaVersion", "traits": { - "smithy.api#documentation": "

A list of tags for the specified ARN.

" + "smithy.api#documentation": "

The schema version of the ManagedNotificationEvent.

", + "smithy.api#required": {} + } + }, + "sourceEventMetadata": { + "target": "com.amazonaws.notifications#ManagedSourceEventMetadataSummary", + "traits": { + "smithy.api#documentation": "

Contains metadata about the event that caused the ManagedNotificationEvent.

", + "smithy.api#required": {} + } + }, + "messageComponents": { + "target": "com.amazonaws.notifications#MessageComponentsSummary", + "traits": { + "smithy.api#required": {} + } + }, + "eventStatus": { + "target": "com.amazonaws.notifications#EventStatus", + "traits": { + "smithy.api#documentation": "

The managed notification event status.

\n
    \n
  • \n

    Values:

    \n
      \n
    • \n

      \n HEALTHY\n

      \n
        \n
      • \n

        All EventRules are ACTIVE.

        \n
      • \n
      \n
    • \n
    • \n

      \n UNHEALTHY\n

      \n
        \n
      • \n

        Some EventRules are ACTIVE and some are INACTIVE.

        \n
      • \n
      \n
    • \n
    \n
  • \n
", + "smithy.api#required": {} + } + }, + "notificationType": { + "target": "com.amazonaws.notifications#NotificationType", + "traits": { + "smithy.api#documentation": "

The Type of event causing the notification.

\n
    \n
  • \n

    Values:

    \n
      \n
    • \n

      \n ALERT\n

      \n
        \n
      • \n

        A notification about an event where something was triggered, initiated, reopened, deployed, or a threshold was breached.

        \n
      • \n
      \n
    • \n
    • \n

      \n WARNING\n

      \n
        \n
      • \n

        A notification about an event where an issue is about to arise. For example, something is approaching a threshold.

        \n
      • \n
      \n
    • \n
    • \n

      \n ANNOUNCEMENT\n

      \n
        \n
      • \n

        A notification about an important event. For example, a step in a workflow or escalation path or that a workflow was updated.

        \n
      • \n
      \n
    • \n
    • \n

      \n INFORMATIONAL\n

      \n
        \n
      • \n

        A notification about informational messages. For example, recommendations, service announcements, or reminders.

        \n
      • \n
      \n
    • \n
    \n
  • \n
", + "smithy.api#required": {} } } }, "traits": { - "smithy.api#output": {} + "smithy.api#documentation": "

A short summary of a ManagedNotificationEvent. This is only used when listing managed notification events.

" } }, - "com.amazonaws.notifications#LocaleCode": { - "type": "string", - "traits": { - "smithy.api#enum": [ - { - "name": "de_DE", - "value": "de_DE", - "documentation": "German (Germany)" - }, - { - "name": "en_CA", - "value": "en_CA", - "documentation": "English (Canada)" - }, - { - "name": "en_US", - "value": "en_US", - "documentation": "English (United States). This is the default locale." - }, - { - "name": "en_UK", - "value": "en_UK", - "documentation": "English (United Kingdom)" - }, - { - "name": "es_ES", - "value": "es_ES", - "documentation": "Spanish (Spain)" - }, - { - "name": "fr_CA", - "value": "fr_CA", - "documentation": "French (Canada)" - }, - { - "name": "fr_FR", - "value": "fr_FR", - "documentation": "French (France)" - }, - { - "name": "id_ID", - "value": "id_ID", - "documentation": "Bahasa Indonesian (Indonesia)" - }, - { - "name": "it_IT", - "value": "it_IT", - "documentation": "Italian (Italy)" - }, - { - "name": "ja_JP", - "value": "ja_JP", - "documentation": "Japanese (Japan)" - }, - { - "name": "ko_KR", - "value": "ko_KR", - "documentation": "Korean (Korea)" - }, - { - "name": "pt_BR", - "value": "pt_BR", - "documentation": "Portuguese (Brazil)" - }, - { - "name": "tr_TR", - "value": "tr_TR", - "documentation": "Turkish (Turkey)" - }, - { - "name": "zh_CN", - "value": "zh_CN", - "documentation": "Chinese (China)" - }, - { - "name": "zh_TW", - "value": "zh_TW", - "documentation": "Chinese (Taiwan)" - } - ], - "smithy.api#suppress": [ - "EnumTrait" - ] + "com.amazonaws.notifications#ManagedNotificationEvents": { + "type": "list", + "member": { + "target": "com.amazonaws.notifications#ManagedNotificationEventOverview" } }, "com.amazonaws.notifications#ManagedRuleArn": { @@ -2044,6 +4106,41 @@ "target": "com.amazonaws.notifications#ManagedRuleArn" } }, + "com.amazonaws.notifications#ManagedSourceEventMetadataSummary": { + "type": "structure", + "members": { + "eventOriginRegion": { + "target": "com.amazonaws.notifications#Region", + "traits": { + "smithy.api#documentation": "

The Region where the notification originated.

", + "smithy.api#length": { + "max": 32 + } + } + }, + "source": { + "target": "com.amazonaws.notifications#Source", + "traits": { + "smithy.api#documentation": "

The source service of the notification.

\n

Must match one of the valid EventBridge sources. Only Amazon Web Services service sourced events are supported. For example, aws.ec2 and aws.cloudwatch. For more information, see Event delivery from Amazon Web Services services in the Amazon EventBridge User Guide.

", + "smithy.api#required": {} + } + }, + "eventType": { + "target": "com.amazonaws.notifications#EventType", + "traits": { + "smithy.api#documentation": "

The event Type of the notification.

", + "smithy.api#length": { + "min": 1, + "max": 256 + }, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

A short summary and metadata for a managed notification event.

" + } + }, "com.amazonaws.notifications#Media": { "type": "list", "member": { @@ -2070,7 +4167,7 @@ "url": { "target": "com.amazonaws.notifications#Url", "traits": { - "smithy.api#documentation": "

The url of the media.

", + "smithy.api#documentation": "

The URL of the media.

", "smithy.api#required": {} } }, @@ -2118,7 +4215,7 @@ "paragraphSummary": { "target": "com.amazonaws.notifications#TextPartReference", "traits": { - "smithy.api#documentation": "

A paragraph long or multiple sentence summary. For example, AWS Chatbot notifications.

" + "smithy.api#documentation": "

A paragraph long or multiple sentence summary. For example, Chatbot notifications.

" } }, "completeDescription": { @@ -2260,42 +4357,42 @@ "arn": { "target": "com.amazonaws.notifications#NotificationConfigurationArn", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the resource.

", + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the NotificationConfiguration\n resource.

", "smithy.api#required": {} } }, "name": { "target": "com.amazonaws.notifications#NotificationConfigurationName", "traits": { - "smithy.api#documentation": "

The name of the NotificationConfiguration. Supports RFC 3986's unreserved characters.

", + "smithy.api#documentation": "

The name of the NotificationConfiguration. Supports RFC 3986's\n unreserved characters.

", "smithy.api#required": {} } }, "description": { "target": "com.amazonaws.notifications#NotificationConfigurationDescription", "traits": { - "smithy.api#documentation": "

The description of the NotificationConfiguration.

", + "smithy.api#documentation": "

The description of the NotificationConfiguration.

", "smithy.api#required": {} } }, "status": { "target": "com.amazonaws.notifications#NotificationConfigurationStatus", "traits": { - "smithy.api#documentation": "

The status of this NotificationConfiguration.

\n

The status should always be INACTIVE when part of the CreateNotificationConfiguration response.

\n
    \n
  • \n

    Values:

    \n
      \n
    • \n

      \n ACTIVE\n

      \n
        \n
      • \n

        All EventRules are ACTIVE and any call can be run.

        \n
      • \n
      \n
    • \n
    • \n

      \n PARTIALLY_ACTIVE\n

      \n
        \n
      • \n

        Some EventRules are ACTIVE and some are INACTIVE.

        \n
      • \n
      • \n

        Any call can be run.

        \n
      • \n
      \n
    • \n
    • \n

      \n INACTIVE\n

      \n
        \n
      • \n

        All EventRules are INACTIVE and any call can be run.

        \n
      • \n
      \n
    • \n
    • \n

      \n DELETING\n

      \n
        \n
      • \n

        This NotificationConfiguration is being deleted. Only GET and LIST calls can be run.

        \n
      • \n
      • \n

        Only GET and LIST calls can be run.

        \n
      • \n
      \n
    • \n
    \n
  • \n
", + "smithy.api#documentation": "

The current status of the NotificationConfiguration.

", "smithy.api#required": {} } }, "creationTime": { "target": "com.amazonaws.notifications#CreationTime", "traits": { - "smithy.api#documentation": "

The creation time of the resource.

", + "smithy.api#documentation": "

The creation time of the NotificationConfiguration.

", "smithy.api#required": {} } }, "aggregationDuration": { "target": "com.amazonaws.notifications#AggregationDuration", "traits": { - "smithy.api#documentation": "

The aggregation preference of the NotificationConfiguration.

\n
    \n
  • \n

    Values:

    \n
      \n
    • \n

      \n LONG\n

      \n
        \n
      • \n

        Aggregate notifications for long periods of time (12 hours).

        \n
      • \n
      \n
    • \n
    • \n

      \n SHORT\n

      \n
        \n
      • \n

        Aggregate notifications for short periods of time (5 minutes).

        \n
      • \n
      \n
    • \n
    • \n

      \n NONE\n

      \n
        \n
      • \n

        Don't aggregate notifications.

        \n

        No delay in delivery.

        \n
      • \n
      \n
    • \n
    \n
  • \n
" + "smithy.api#documentation": "

The aggregation preference of the NotificationConfiguration.

\n
    \n
  • \n

    Values:

    \n
      \n
    • \n

      \n LONG\n

      \n
        \n
      • \n

        Aggregate notifications for long periods of time (12 hours).

        \n
      • \n
      \n
    • \n
    • \n

      \n SHORT\n

      \n
        \n
      • \n

        Aggregate notifications for short periods of time (5 minutes).

        \n
      • \n
      \n
    • \n
    • \n

      \n NONE\n

      \n
        \n
      • \n

        Don't aggregate notifications.

        \n
      • \n
      \n
    • \n
    \n
  • \n
" } } }, @@ -2338,46 +4435,52 @@ "notificationConfigurationArn": { "target": "com.amazonaws.notifications#NotificationConfigurationArn", "traits": { - "smithy.api#documentation": "

The ARN of the NotificationConfiguration.

", + "smithy.api#documentation": "

The ARN of the NotificationConfiguration.

", "smithy.api#required": {} } }, "relatedAccount": { "target": "com.amazonaws.notifications#AccountId", "traits": { - "smithy.api#documentation": "

The account name containing the NotificationHub.

", + "smithy.api#documentation": "

The account name containing the NotificationHub.

", "smithy.api#required": {} } }, "creationTime": { "target": "com.amazonaws.notifications#CreationTime", "traits": { - "smithy.api#documentation": "

The creation time of the NotificationEvent.

", + "smithy.api#documentation": "

The creation time of the NotificationEvent.

", "smithy.api#required": {} } }, "notificationEvent": { "target": "com.amazonaws.notifications#NotificationEventSummary", "traits": { - "smithy.api#documentation": "

Refers to a NotificationEventSummary object.

\n

Similar in structure to content in the GetNotificationEvent response.

", + "smithy.api#documentation": "

Refers to a NotificationEventSummary object.

\n

Similar in structure to content in the GetNotificationEvent response.

", "smithy.api#required": {} } }, "aggregationEventType": { "target": "com.amazonaws.notifications#AggregationEventType", "traits": { - "smithy.api#documentation": "

The NotificationConfiguration's aggregation type.

\n
    \n
  • \n

    Values:

    \n
      \n
    • \n

      \n AGGREGATE\n

      \n
        \n
      • \n

        The notification event is an aggregate notification. Aggregate notifications summarize grouped events over a specified time period.

        \n
      • \n
      \n
    • \n
    • \n

      \n CHILD\n

      \n
        \n
      • \n

        Some EventRules are ACTIVE and some are INACTIVE. Any call can be run.

        \n
      • \n
      \n
    • \n
    • \n

      \n NONE\n

      \n
        \n
      • \n

        The notification isn't aggregated.

        \n
      • \n
      \n
    • \n
    \n
  • \n
" + "smithy.api#documentation": "

The NotificationConfiguration's aggregation type.

\n
    \n
  • \n

    Values:

    \n
      \n
    • \n

      \n AGGREGATE\n

      \n
        \n
      • \n

        The notification event is an aggregate notification. Aggregate notifications summarize grouped events over a specified time period.

        \n
      • \n
      \n
    • \n
    • \n

      \n CHILD\n

      \n
        \n
      • \n

        Some EventRules are ACTIVE and some are INACTIVE. Any call can be run.

        \n
      • \n
      \n
    • \n
    • \n

      \n NONE\n

      \n
        \n
      • \n

        The notification isn't aggregated.

        \n
      • \n
      \n
    • \n
    \n
  • \n
" } }, "aggregateNotificationEventArn": { "target": "com.amazonaws.notifications#NotificationEventArn", "traits": { - "smithy.api#documentation": "

The ARN of the aggregatedNotificationEventArn to match.

" + "smithy.api#documentation": "

The ARN of the aggregatedNotificationEventArn to match.

" + } + }, + "aggregationSummary": { + "target": "com.amazonaws.notifications#AggregationSummary", + "traits": { + "smithy.api#documentation": "

Provides an aggregated summary data for notification events.

" } } }, "traits": { - "smithy.api#documentation": "

Describes a short summary of a NotificationEvent. This is only used when listing notification events.

" + "smithy.api#documentation": "

Describes a short summary of a NotificationEvent. This is only used when listing notification events.

" } }, "com.amazonaws.notifications#NotificationEventResource": { @@ -2413,7 +4516,7 @@ "id": { "target": "com.amazonaws.notifications#NotificationEventId", "traits": { - "smithy.api#documentation": "

The unique identifier for a NotificationEvent.

", + "smithy.api#documentation": "

The unique identifier for a NotificationEvent.

", "smithy.api#required": {} } }, @@ -2452,19 +4555,25 @@ "eventStatus": { "target": "com.amazonaws.notifications#EventStatus", "traits": { - "smithy.api#documentation": "

The assesed nature of the event.

\n
    \n
  • \n

    Values:

    \n
      \n
    • \n

      \n HEALTHY\n

      \n
        \n
      • \n

        All EventRules are ACTIVE and any call can be run.

        \n
      • \n
      \n
    • \n
    • \n

      \n UNHEALTHY\n

      \n
        \n
      • \n

        Some EventRules are ACTIVE and some are INACTIVE. Any call can be run.

        \n
      • \n
      \n
    • \n
    \n
  • \n
" + "smithy.api#documentation": "

The assessed nature of the event.

\n
    \n
  • \n

    Values:

    \n
      \n
    • \n

      \n HEALTHY\n

      \n
        \n
      • \n

        All EventRules are ACTIVE and any call can be run.

        \n
      • \n
      \n
    • \n
    • \n

      \n UNHEALTHY\n

      \n
        \n
      • \n

        Some EventRules are ACTIVE and some are INACTIVE. Any call can be run.

        \n
      • \n
      \n
    • \n
    \n
  • \n
" } }, "aggregationEventType": { "target": "com.amazonaws.notifications#AggregationEventType", "traits": { - "smithy.api#documentation": "

The NotificationConfiguration's aggregation type.

\n
    \n
  • \n

    Values:

    \n
      \n
    • \n

      \n AGGREGATE\n

      \n
        \n
      • \n

        The notification event is an aggregate notification. Aggregate notifications summarize grouped events over a specified time period.

        \n
      • \n
      \n
    • \n
    • \n

      \n CHILD\n

      \n
        \n
      • \n

        Some EventRules are ACTIVE and some are INACTIVE. Any call can be run.

        \n
      • \n
      \n
    • \n
    • \n

      \n NONE\n

      \n
        \n
      • \n

        The notification isn't aggregated.

        \n
      • \n
      \n
    • \n
    \n
  • \n
" + "smithy.api#documentation": "

The aggregation type of the NotificationConfiguration.

\n
    \n
  • \n

    Values:

    \n
      \n
    • \n

      \n AGGREGATE\n

      \n
        \n
      • \n

        The notification event is an aggregate notification. Aggregate notifications summarize grouped events over a specified time period.

        \n
      • \n
      \n
    • \n
    • \n

      \n CHILD\n

      \n
        \n
      • \n

        Some EventRules are ACTIVE and some are INACTIVE. Any call can be run.

        \n
      • \n
      \n
    • \n
    • \n

      \n NONE\n

      \n
        \n
      • \n

        The notification isn't aggregated.

        \n
      • \n
      \n
    • \n
    \n
  • \n
" } }, "aggregateNotificationEventArn": { "target": "com.amazonaws.notifications#NotificationEventArn", "traits": { - "smithy.api#documentation": "

If the value of aggregationEventType is not NONE, this is the Amazon Resource Event (ARN) of the parent aggregate notification.

\n

This is omitted if notification isn't aggregated.

" + "smithy.api#documentation": "

If the value of aggregationEventType is not NONE, this is the\n Amazon Resource Event (ARN) of the parent aggregate notification.

\n

This is omitted if notification isn't aggregated.

" + } + }, + "aggregationSummary": { + "target": "com.amazonaws.notifications#AggregationSummary", + "traits": { + "smithy.api#documentation": "

Provides additional information about how multiple notifications are grouped.

" } }, "startTime": { @@ -2497,7 +4606,7 @@ } }, "traits": { - "smithy.api#documentation": "

A NotificationEvent is a notification-focused representation of an event. They contain semantic information used by Channels to create end-user notifications.

" + "smithy.api#documentation": "

A NotificationEvent is a notification-focused representation of an event. They contain semantic information used by Channels to create end-user notifications.

" } }, "com.amazonaws.notifications#NotificationEventSummary": { @@ -2527,7 +4636,7 @@ "eventStatus": { "target": "com.amazonaws.notifications#EventStatus", "traits": { - "smithy.api#documentation": "

The notification event status.

\n
    \n
  • \n

    Values:

    \n
      \n
    • \n

      \n HEALTHY\n

      \n
        \n
      • \n

        All EventRules are ACTIVE and any call can be run.

        \n
      • \n
      \n
    • \n
    • \n

      \n UNHEALTHY\n

      \n
        \n
      • \n

        Some EventRules are ACTIVE and some are INACTIVE. Any call can be run.

        \n
      • \n
      \n
    • \n
    \n
  • \n
", + "smithy.api#documentation": "

Provides additional information about the current status of the NotificationEvent.

\n
    \n
  • \n

    Values:

    \n
      \n
    • \n

      \n HEALTHY\n

      \n
        \n
      • \n

        All EventRules are ACTIVE.

        \n
      • \n
      \n
    • \n
    • \n

      \n UNHEALTHY\n

      \n
        \n
      • \n

        Some EventRules are ACTIVE and some are INACTIVE.

        \n
      • \n
      \n
    • \n
    \n
  • \n
", "smithy.api#required": {} } }, @@ -2540,7 +4649,7 @@ } }, "traits": { - "smithy.api#documentation": "

Describes a short summary and metadata for a notification event.

" + "smithy.api#documentation": "

Describes a short summary and metadata for a NotificationEvent.

" } }, "com.amazonaws.notifications#NotificationEvents": { @@ -2591,19 +4700,19 @@ "creationTime": { "target": "com.amazonaws.notifications#CreationTime", "traits": { - "smithy.api#documentation": "

The date and time the resource was created.

", + "smithy.api#documentation": "

The date and time the NotificationHubOverview was created.

", "smithy.api#required": {} } }, "lastActivationTime": { "target": "com.amazonaws.notifications#LastActivationTime", "traits": { - "smithy.api#documentation": "

The most recent time this NotificationHub had an ACTIVE status.

" + "smithy.api#documentation": "

The most recent time this NotificationHub had an ACTIVE status.

" } } }, "traits": { - "smithy.api#documentation": "

Describes an overview of a NotificationHub.

\n

A NotificationHub is an account-level setting used to select the Regions where you want to store, process and replicate your notifications.

" + "smithy.api#documentation": "

Describes an overview of a NotificationHub.

\n

A NotificationConfiguration is an account-level setting used to select the Regions where you want to store, process and replicate your notifications.

" } }, "com.amazonaws.notifications#NotificationHubStatus": { @@ -2642,20 +4751,20 @@ "status": { "target": "com.amazonaws.notifications#NotificationHubStatus", "traits": { - "smithy.api#documentation": "

Status information about the NotificationHub.

\n
    \n
  • \n

    Values:

    \n
      \n
    • \n

      \n ACTIVE\n

      \n
        \n
      • \n

        Incoming NotificationEvents are replicated to this NotificationHub.

        \n
      • \n
      \n
    • \n
    • \n

      \n REGISTERING\n

      \n
        \n
      • \n

        The NotificationHub is initializing. A NotificationHub with this status can't be deregistered.

        \n
      • \n
      \n
    • \n
    • \n

      \n DEREGISTERING\n

      \n
        \n
      • \n

        The NotificationHub is being deleted. You can't register additional NotificationHubs in the same Region as a NotificationHub with this status.

        \n
      • \n
      \n
    • \n
    \n
  • \n
", + "smithy.api#documentation": "

Status information about the NotificationHub.

\n
    \n
  • \n

    Values:

    \n
      \n
    • \n

      \n ACTIVE\n

      \n
        \n
      • \n

        Incoming NotificationEvents are replicated to this\n NotificationHub.

        \n
      • \n
      \n
    • \n
    • \n

      \n REGISTERING\n

      \n
        \n
      • \n

        The NotificationConfiguration is initializing. A NotificationConfiguration with this status can't be deregistered.

        \n
      • \n
      \n
    • \n
    • \n

      \n DEREGISTERING\n

      \n
        \n
      • \n

        The NotificationConfiguration is being deleted. You can't register additional\n NotificationHubs in the same Region as a\n NotificationConfiguration with this status.

        \n
      • \n
      \n
    • \n
    \n
  • \n
", "smithy.api#required": {} } }, "reason": { "target": "com.amazonaws.notifications#NotificationHubStatusReason", "traits": { - "smithy.api#documentation": "

An Explanation for the current status.

", + "smithy.api#documentation": "

An explanation for the current status.

", "smithy.api#required": {} } } }, "traits": { - "smithy.api#documentation": "

NotificationHub status information.

" + "smithy.api#documentation": "

Provides additional information about the current NotificationHub status.

" } }, "com.amazonaws.notifications#NotificationHubs": { @@ -2691,6 +4800,9 @@ "type": "service", "version": "2018-05-10", "operations": [ + { + "target": "com.amazonaws.notifications#ListManagedNotificationChannelAssociations" + }, { "target": "com.amazonaws.notifications#ListTagsForResource" }, @@ -2708,6 +4820,21 @@ { "target": "com.amazonaws.notifications#EventRule" }, + { + "target": "com.amazonaws.notifications#ManagedNotificationAccountContactAssociation" + }, + { + "target": "com.amazonaws.notifications#ManagedNotificationAdditionalChannelAssociation" + }, + { + "target": "com.amazonaws.notifications#ManagedNotificationChildEventResource" + }, + { + "target": "com.amazonaws.notifications#ManagedNotificationConfiguration" + }, + { + "target": "com.amazonaws.notifications#ManagedNotificationEventResource" + }, { "target": "com.amazonaws.notifications#NotificationConfiguration" }, @@ -2716,6 +4843,9 @@ }, { "target": "com.amazonaws.notifications#NotificationHub" + }, + { + "target": "com.amazonaws.notifications#OrganizationAccess" } ], "traits": { @@ -2753,7 +4883,7 @@ ], "maxAge": 86400 }, - "smithy.api#documentation": "

The AWS User Notifications API Reference provides descriptions, API request parameters, and the JSON response for each of the User Notification API actions.

\n

User Notification control APIs are currently available in US East (Virginia) - us-east-1.

\n

\n GetNotificationEvent\n\t and ListNotificationEvents APIs are currently available in\n\t commercial partition Regions and only return notifications stored in the same Region in which they're called.

\n

The User Notifications console can only be used in US East (Virginia). Your data however, is stored in each Region chosen as a \n\t notification hub in addition to US East (Virginia).

", + "smithy.api#documentation": "

The Amazon Web Services User Notifications API Reference provides descriptions, API request parameters, and the JSON response for each of the User Notification API actions.

\n

User Notification control plane APIs are currently available in US East (Virginia) - us-east-1.

\n

\n GetNotificationEvent\n\t and ListNotificationEvents APIs are currently available in\n\t commercial partition Regions and only return notifications stored in the same Region in which they're called.

\n

The User Notifications console can only be used in US East (Virginia). Your data however, is stored in each Region chosen as a \n\t notification hub in addition to US East (Virginia).

", "smithy.api#title": "AWS User Notifications", "smithy.rules#endpointRuleSet": { "version": "1.0", @@ -3095,6 +5225,39 @@ } } }, + "com.amazonaws.notifications#NotificationsAccessForOrganization": { + "type": "structure", + "members": { + "accessStatus": { + "target": "com.amazonaws.notifications#AccessStatus", + "traits": { + "smithy.api#documentation": "

Access Status for the Orgs Service.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Orgs Service trust for User Notifications.

" + } + }, + "com.amazonaws.notifications#OrganizationAccess": { + "type": "resource", + "put": { + "target": "com.amazonaws.notifications#EnableNotificationsAccessForOrganization" + }, + "read": { + "target": "com.amazonaws.notifications#GetNotificationsAccessForOrganization" + }, + "delete": { + "target": "com.amazonaws.notifications#DisableNotificationsAccessForOrganization" + } + }, + "com.amazonaws.notifications#OrganizationalUnitId": { + "type": "string", + "traits": { + "smithy.api#pattern": "^Root|ou-[0-9a-z]{4,32}-[a-z0-9]{8,32}$" + } + }, "com.amazonaws.notifications#QuotaCode": { "type": "string" }, @@ -3152,7 +5315,7 @@ "aws.iam#iamAction": { "documentation": "Grants permission to register a NotificationHub" }, - "smithy.api#documentation": "

Registers a NotificationHub in the specified Region.

\n

There is a maximum of one NotificationHub per Region. You can have a maximum of 3 NotificationHubs at a time.

", + "smithy.api#documentation": "

Registers a NotificationConfiguration in the specified Region.

\n

There is a maximum of one NotificationConfiguration per Region. You can have a\n maximum of 3 NotificationHub resources at a time.

", "smithy.api#http": { "code": 201, "method": "POST", @@ -3167,7 +5330,7 @@ "notificationHubRegion": { "target": "com.amazonaws.notifications#Region", "traits": { - "smithy.api#documentation": "

The Region of the NotificationHub.

", + "smithy.api#documentation": "

The Region of the NotificationHub.

", "smithy.api#required": {} } } @@ -3182,14 +5345,14 @@ "notificationHubRegion": { "target": "com.amazonaws.notifications#Region", "traits": { - "smithy.api#documentation": "

The Region of the NotificationHub.

", + "smithy.api#documentation": "

The Region of the NotificationHub.

", "smithy.api#required": {} } }, "statusSummary": { "target": "com.amazonaws.notifications#NotificationHubStatusSummary", "traits": { - "smithy.api#documentation": "

NotificationHub status information.

", + "smithy.api#documentation": "

Provides additional information about the current NotificationConfiguration\n status information.

", "smithy.api#required": {} } }, @@ -3278,6 +5441,23 @@ "target": "com.amazonaws.notifications#Resource" } }, + "com.amazonaws.notifications#SampleAggregationDimensionValues": { + "type": "list", + "member": { + "target": "smithy.api#String", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 256 + } + } + }, + "traits": { + "smithy.api#length": { + "max": 50 + } + } + }, "com.amazonaws.notifications#SchemaVersion": { "type": "string", "traits": { @@ -3377,7 +5557,7 @@ "relatedAccount": { "target": "com.amazonaws.notifications#AccountId", "traits": { - "smithy.api#documentation": "

The Primary AWS account of Source Event

", + "smithy.api#documentation": "

The primary Amazon Web Services account of SourceEvent.

", "smithy.api#pattern": "^[0-9]{12}$", "smithy.api#required": {} } @@ -3385,7 +5565,7 @@ "source": { "target": "com.amazonaws.notifications#Source", "traits": { - "smithy.api#documentation": "

The AWS servvice the event originates from. For example aws.cloudwatch.

", + "smithy.api#documentation": "

The Amazon Web Services service the event originates from. For example aws.cloudwatch.

", "smithy.api#required": {} } }, @@ -3400,7 +5580,7 @@ "eventType": { "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "

The type of event. For example, an AWS CloudWatch state change.

", + "smithy.api#documentation": "

The type of event. For example, an Amazon CloudWatch state change.

", "smithy.api#length": { "min": 1, "max": 256 @@ -3411,7 +5591,7 @@ "relatedResources": { "target": "com.amazonaws.notifications#Resources", "traits": { - "smithy.api#documentation": "

A list of resources related to this NotificationEvent.

", + "smithy.api#documentation": "

A list of resources related to this NotificationEvent.

", "smithy.api#required": {} } } @@ -3435,14 +5615,14 @@ "source": { "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "

The matched event source.

\n

Must match one of the valid EventBridge sources. Only AWS service sourced events are supported. For example, aws.ec2 and aws.cloudwatch. For more information, see Event delivery from AWS services in the Amazon EventBridge User Guide.

", + "smithy.api#documentation": "

The matched event source.

\n

Must match one of the valid EventBridge sources. Only Amazon Web Services service sourced events are supported. For example, aws.ec2 and aws.cloudwatch. For more information, see Event delivery from Amazon Web Services services in the Amazon EventBridge User Guide.

", "smithy.api#required": {} } }, "eventType": { "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "

The event type to match.

\n

Must match one of the valid Amazon EventBridge event types. For example, EC2 Instance State-change Notification and AWS CloudWatch Alarm State Change. For more information, see Event delivery from AWS services in the Amazon EventBridge User Guide.

", + "smithy.api#documentation": "

The event type to match.

\n

Must match one of the valid Amazon EventBridge event types. For example, EC2 Instance State-change Notification and Amazon CloudWatch Alarm State Change. For more information, see Event delivery from Amazon Web Services services in the Amazon EventBridge User Guide.

", "smithy.api#length": { "min": 1, "max": 256 @@ -3452,7 +5632,7 @@ } }, "traits": { - "smithy.api#documentation": "

Contains metadata about the event that caused the NotificationEvent. For other specific values, see sourceEventMetadata.

" + "smithy.api#documentation": "

Contains metadata about the event that caused the NotificationEvent. For\n other specific values, see sourceEventMetadata.

" } }, "com.amazonaws.notifications#StatusSummaryByRegion": { @@ -3464,6 +5644,68 @@ "target": "com.amazonaws.notifications#EventRuleStatusSummary" } }, + "com.amazonaws.notifications#SummarizationDimensionDetail": { + "type": "structure", + "members": { + "name": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The name of the SummarizationDimensionDetail.

", + "smithy.api#required": {} + } + }, + "value": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

Value of the property used to summarize aggregated events.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Provides detailed information about the dimensions used for event summarization and aggregation.

" + } + }, + "com.amazonaws.notifications#SummarizationDimensionDetails": { + "type": "list", + "member": { + "target": "com.amazonaws.notifications#SummarizationDimensionDetail" + } + }, + "com.amazonaws.notifications#SummarizationDimensionOverview": { + "type": "structure", + "members": { + "name": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

Name of the summarization dimension.

", + "smithy.api#required": {} + } + }, + "count": { + "target": "smithy.api#Integer", + "traits": { + "smithy.api#documentation": "

Total number of occurrences for this dimension.

", + "smithy.api#required": {} + } + }, + "sampleValues": { + "target": "com.amazonaws.notifications#SampleAggregationDimensionValues", + "traits": { + "smithy.api#documentation": "

Indicates the sample values found within the dimension.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Provides an overview of how data is summarized across different dimensions.

" + } + }, + "com.amazonaws.notifications#SummarizationDimensionOverviews": { + "type": "list", + "member": { + "target": "com.amazonaws.notifications#SummarizationDimensionOverview" + } + }, "com.amazonaws.notifications#TagKey": { "type": "string", "traits": { @@ -3527,7 +5769,7 @@ "aws.iam#iamAction": { "documentation": "Grants permission to tag a resource" }, - "smithy.api#documentation": "

Tags the resource with a tag key and value.

\n

For more information, see Tagging your AWS resources in the Tagging AWS Resources User Guide.

\n \n

This is only supported for NotificationConfigurations.

\n
", + "smithy.api#documentation": "

Tags the resource with a tag key and value.

\n

For more information, see Tagging your Amazon Web Services resources in the Tagging Amazon Web Services Resources User Guide.

\n \n

This is only supported for NotificationConfigurations.

\n
", "smithy.api#http": { "method": "POST", "uri": "/tags/{arn}" @@ -3645,7 +5887,7 @@ "displayText": { "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "

A short single line description of the link. Must be hyperlinked with the URL itself.

\n

Used for text parts with the type URL.

", + "smithy.api#documentation": "

A short single line description of the link. Must be hyper-linked with the URL itself.

\n

Used for text parts with the type URL.

", "smithy.api#length": { "min": 1, "max": 1024 @@ -3746,7 +5988,7 @@ "aws.iam#iamAction": { "documentation": "Grants permission to remove tags from a resource" }, - "smithy.api#documentation": "

Untags a resource with a specified Amazon Resource Name (ARN).

\n

For more information, see Tagging your AWS resources in the Tagging AWS Resources User Guide.

", + "smithy.api#documentation": "

Untags a resource with a specified Amazon Resource Name (ARN).

\n

For more information, see Tagging your Amazon Web Services resources in the Tagging Amazon Web Services Resources User Guide.

", "smithy.api#http": { "method": "DELETE", "uri": "/tags/{arn}" @@ -3818,7 +6060,7 @@ "aws.iam#iamAction": { "documentation": "Grants permission to update an EventRule" }, - "smithy.api#documentation": "

Updates an existing EventRule.

", + "smithy.api#documentation": "

Updates an existing EventRule.

", "smithy.api#http": { "code": 200, "method": "PUT", @@ -3833,7 +6075,7 @@ "arn": { "target": "com.amazonaws.notifications#EventRuleArn", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) to use to update the EventRule.

", + "smithy.api#documentation": "

The Amazon Resource Name (ARN) to use to update the EventRule.

", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -3841,13 +6083,13 @@ "eventPattern": { "target": "com.amazonaws.notifications#EventRuleEventPattern", "traits": { - "smithy.api#documentation": "

An additional event pattern used to further filter the events this EventRule receives.

\n

For more information, see Amazon EventBridge event patterns in the Amazon EventBridge User Guide.\n

" + "smithy.api#documentation": "

An additional event pattern used to further filter the events this EventRule receives.

\n

For more information, see Amazon EventBridge event patterns in the Amazon EventBridge User Guide.\n

" } }, "regions": { "target": "com.amazonaws.notifications#Regions", "traits": { - "smithy.api#documentation": "

A list of AWS Regions that sends events to this EventRule.

" + "smithy.api#documentation": "

A list of Amazon Web Services Regions that sends events to this EventRule.

" } } }, @@ -3861,14 +6103,14 @@ "arn": { "target": "com.amazonaws.notifications#EventRuleArn", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) to use to update the EventRule.

", + "smithy.api#documentation": "

The Amazon Resource Name (ARN) to use to update the EventRule.

", "smithy.api#required": {} } }, "notificationConfigurationArn": { "target": "com.amazonaws.notifications#NotificationConfigurationArn", "traits": { - "smithy.api#documentation": "

The ARN of the NotificationConfiguration.

", + "smithy.api#documentation": "

The ARN of the NotificationConfiguration.

", "smithy.api#required": {} } }, @@ -3917,7 +6159,7 @@ "aws.iam#iamAction": { "documentation": "Grants permission to update a NotificationConfiguration" }, - "smithy.api#documentation": "

Updates a NotificationConfiguration.

", + "smithy.api#documentation": "

Updates a NotificationConfiguration.

", "smithy.api#http": { "code": 200, "method": "PUT", @@ -3932,7 +6174,7 @@ "arn": { "target": "com.amazonaws.notifications#NotificationConfigurationArn", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) used to update the NotificationConfiguration.

", + "smithy.api#documentation": "

The Amazon Resource Name (ARN) used to update the NotificationConfiguration.

", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -3940,19 +6182,19 @@ "name": { "target": "com.amazonaws.notifications#NotificationConfigurationName", "traits": { - "smithy.api#documentation": "

The name of the NotificationConfiguration.

" + "smithy.api#documentation": "

The name of the NotificationConfiguration.

" } }, "description": { "target": "com.amazonaws.notifications#NotificationConfigurationDescription", "traits": { - "smithy.api#documentation": "

The description of the NotificationConfiguration.

" + "smithy.api#documentation": "

The description of the NotificationConfiguration.

" } }, "aggregationDuration": { "target": "com.amazonaws.notifications#AggregationDuration", "traits": { - "smithy.api#documentation": "

The status of this NotificationConfiguration.

\n

The status should always be INACTIVE when part of the CreateNotificationConfiguration response.

\n
    \n
  • \n

    Values:

    \n
      \n
    • \n

      \n ACTIVE\n

      \n
        \n
      • \n

        All EventRules are ACTIVE and any call can be run.

        \n
      • \n
      \n
    • \n
    • \n

      \n PARTIALLY_ACTIVE\n

      \n
        \n
      • \n

        Some EventRules are ACTIVE and some are INACTIVE. Any call can be run.

        \n
      • \n
      • \n

        Any call can be run.

        \n
      • \n
      \n
    • \n
    • \n

      \n INACTIVE\n

      \n
        \n
      • \n

        All EventRules are INACTIVE and any call can be run.

        \n
      • \n
      \n
    • \n
    • \n

      \n DELETING\n

      \n
        \n
      • \n

        This NotificationConfiguration is being deleted.

        \n
      • \n
      • \n

        Only GET and LIST calls can be run.

        \n
      • \n
      \n
    • \n
    \n
  • \n
" + "smithy.api#documentation": "

The aggregation preference of the NotificationConfiguration.

\n
    \n
  • \n

    Values:

    \n
      \n
    • \n

      \n LONG\n

      \n
        \n
      • \n

        Aggregate notifications for long periods of time (12 hours).

        \n
      • \n
      \n
    • \n
    • \n

      \n SHORT\n

      \n
        \n
      • \n

        Aggregate notifications for short periods of time (5 minutes).

        \n
      • \n
      \n
    • \n
    • \n

      \n NONE\n

      \n
        \n
      • \n

        Don't aggregate notifications.

        \n
      • \n
      \n
    • \n
    \n
  • \n
" } } }, @@ -3966,7 +6208,7 @@ "arn": { "target": "com.amazonaws.notifications#NotificationConfigurationArn", "traits": { - "smithy.api#documentation": "

The ARN used to update the NotificationConfiguration.

", + "smithy.api#documentation": "

The ARN used to update the NotificationConfiguration.

", "smithy.api#required": {} } } diff --git a/models/organizations.json b/models/organizations.json index 8c22f0db6e..d73d982a44 100644 --- a/models/organizations.json +++ b/models/organizations.json @@ -2183,7 +2183,7 @@ } }, "traits": { - "smithy.api#documentation": "

Performing this operation violates a minimum or maximum value limit. For example,\n attempting to remove the last service control policy (SCP) from an OU or root, inviting\n or creating too many accounts to the organization, or attaching too many policies to an\n account, OU, or root. This exception includes a reason that contains additional\n information about the violated limit:

\n \n

Some of the reasons in the following list might not be applicable to this specific\n API or operation.

\n
\n
    \n
  • \n

    ACCOUNT_CANNOT_LEAVE_ORGANIZATION: You attempted to remove the management\n account from the organization. You can't remove the management account. Instead,\n after you remove all member accounts, delete the organization itself.

    \n
  • \n
  • \n

    ACCOUNT_CANNOT_LEAVE_WITHOUT_PHONE_VERIFICATION: You attempted to remove an\n account from the organization that doesn't yet have enough information to exist\n as a standalone account. This account requires you to first complete phone\n verification. Follow the steps at Removing a member account from your organization in the\n Organizations User Guide.

    \n
  • \n
  • \n

    ACCOUNT_CREATION_RATE_LIMIT_EXCEEDED: You attempted to exceed the number of\n accounts that you can create in one day.

    \n
  • \n
  • \n

    ACCOUNT_CREATION_NOT_COMPLETE: Your account setup isn't complete or your\n account isn't fully active. You must complete the account setup before you\n create an organization.

    \n
  • \n
  • \n

    ACCOUNT_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the limit on the number\n of accounts in an organization. If you need more accounts, contact Amazon Web Services Support to\n request an increase in your limit.

    \n

    Or the number of invitations that you tried to send would cause you to exceed\n the limit of accounts in your organization. Send fewer invitations or contact\n Amazon Web Services Support to request an increase in the number of accounts.

    \n \n

    Deleted and closed accounts still count toward your limit.

    \n
    \n \n

    If you get this exception when running a command immediately after\n creating the organization, wait one hour and try again. After an hour, if\n the command continues to fail with this error, contact Amazon Web Services Support.

    \n
    \n
  • \n
  • \n

    CANNOT_REGISTER_SUSPENDED_ACCOUNT_AS_DELEGATED_ADMINISTRATOR: You cannot\n register a suspended account as a delegated administrator.

    \n
  • \n
  • \n

    CANNOT_REGISTER_MASTER_AS_DELEGATED_ADMINISTRATOR: You attempted to register\n the management account of the organization as a delegated administrator for an\n Amazon Web Services service integrated with Organizations. You can designate only a member account as a\n delegated administrator.

    \n
  • \n
  • \n

    CANNOT_CLOSE_MANAGEMENT_ACCOUNT: You attempted to close the management\n account. To close the management account for the organization, you must first\n either remove or close all member accounts in the organization. Follow standard\n account closure process using root credentials.​

    \n
  • \n
  • \n

    CANNOT_REMOVE_DELEGATED_ADMINISTRATOR_FROM_ORG: You attempted to remove an\n account that is registered as a delegated administrator for a service integrated\n with your organization. To complete this operation, you must first deregister\n this account as a delegated administrator.

    \n
  • \n
  • \n

    CLOSE_ACCOUNT_QUOTA_EXCEEDED: You have exceeded close account quota for the\n past 30 days.

    \n
  • \n
  • \n

    CLOSE_ACCOUNT_REQUESTS_LIMIT_EXCEEDED: You attempted to exceed the number of\n accounts that you can close at a time. ​

    \n
  • \n
  • \n

    CREATE_ORGANIZATION_IN_BILLING_MODE_UNSUPPORTED_REGION: To create an\n organization in the specified region, you must enable all features mode.

    \n
  • \n
  • \n

    DELEGATED_ADMINISTRATOR_EXISTS_FOR_THIS_SERVICE: You attempted to register an\n Amazon Web Services account as a delegated administrator for an Amazon Web Services service that already has\n a delegated administrator. To complete this operation, you must first deregister\n any existing delegated administrators for this service.

    \n
  • \n
  • \n

    EMAIL_VERIFICATION_CODE_EXPIRED: The email verification code is only valid for\n a limited period of time. You must resubmit the request and generate a new\n verfication code.

    \n
  • \n
  • \n

    HANDSHAKE_RATE_LIMIT_EXCEEDED: You attempted to exceed the number of\n handshakes that you can send in one day.

    \n
  • \n
  • \n

    INVALID_PAYMENT_INSTRUMENT: You cannot remove an account because no supported\n payment method is associated with the account. Amazon Web Services does not support cards\n issued by financial institutions in Russia or Belarus. For more information, see\n Managing your\n Amazon Web Services payments.

    \n
  • \n
  • \n

    MASTER_ACCOUNT_ADDRESS_DOES_NOT_MATCH_MARKETPLACE: To create an account in\n this organization, you first must migrate the organization's management account\n to the marketplace that corresponds to the management account's address. All\n accounts in an organization must be associated with the same marketplace.

    \n
  • \n
  • \n

    MASTER_ACCOUNT_MISSING_BUSINESS_LICENSE: Applies only to the Amazon Web Services Regions in\n China. To create an organization, the master must have a valid business license.\n For more information, contact customer support.

    \n
  • \n
  • \n

    MASTER_ACCOUNT_MISSING_CONTACT_INFO: To complete this operation, you must\n first provide a valid contact address and phone number for the management\n account. Then try the operation again.

    \n
  • \n
  • \n

    MASTER_ACCOUNT_NOT_GOVCLOUD_ENABLED: To complete this operation, the\n management account must have an associated account in the Amazon Web Services GovCloud\n (US-West) Region. For more information, see Organizations\n in the \n Amazon Web Services GovCloud User Guide.

    \n
  • \n
  • \n

    MASTER_ACCOUNT_PAYMENT_INSTRUMENT_REQUIRED: To create an organization with\n this management account, you first must associate a valid payment instrument,\n such as a credit card, with the account. For more information, see Considerations before removing an account from an organization in\n the Organizations User Guide.

    \n
  • \n
  • \n

    MAX_DELEGATED_ADMINISTRATORS_FOR_SERVICE_LIMIT_EXCEEDED: You attempted to\n register more delegated administrators than allowed for the service principal.\n

    \n
  • \n
  • \n

    MAX_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to exceed the number\n of policies of a certain type that can be attached to an entity at one\n time.

    \n
  • \n
  • \n

    MAX_TAG_LIMIT_EXCEEDED: You have exceeded the number of tags allowed on this\n resource.

    \n
  • \n
  • \n

    MEMBER_ACCOUNT_PAYMENT_INSTRUMENT_REQUIRED: To complete this operation with\n this member account, you first must associate a valid payment instrument, such\n as a credit card, with the account. For more information, see Considerations before removing an account from an organization in\n the Organizations User Guide.

    \n
  • \n
  • \n

    MIN_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to detach a policy\n from an entity that would cause the entity to have fewer than the minimum number\n of policies of a certain type required.

    \n
  • \n
  • \n

    ORGANIZATION_NOT_IN_ALL_FEATURES_MODE: You attempted to perform an operation\n that requires the organization to be configured to support all features. An\n organization that supports only consolidated billing features can't perform this\n operation.

    \n
  • \n
  • \n

    OU_DEPTH_LIMIT_EXCEEDED: You attempted to create an OU tree that is too many\n levels deep.

    \n
  • \n
  • \n

    OU_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the number of OUs that you\n can have in an organization.

    \n
  • \n
  • \n

    POLICY_CONTENT_LIMIT_EXCEEDED: You attempted to create a policy that is larger\n than the maximum size.

    \n
  • \n
  • \n

    POLICY_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the number of policies\n that you can have in an organization.

    \n
  • \n
  • \n

    SERVICE_ACCESS_NOT_ENABLED: You attempted to register a delegated\n administrator before you enabled service access. Call the\n EnableAWSServiceAccess API first.

    \n
  • \n
  • \n

    TAG_POLICY_VIOLATION: You attempted to create or update a resource with tags\n that are not compliant with the tag policy requirements for this account.

    \n
  • \n
  • \n

    WAIT_PERIOD_ACTIVE: After you create an Amazon Web Services account, you must wait until at least seven days after the account was created.\n Invited accounts aren't subject to this waiting period.

    \n
  • \n
", + "smithy.api#documentation": "

Performing this operation violates a minimum or maximum value limit. For example,\n attempting to remove the last service control policy (SCP) from an OU or root, inviting\n or creating too many accounts to the organization, or attaching too many policies to an\n account, OU, or root. This exception includes a reason that contains additional\n information about the violated limit:

\n \n

Some of the reasons in the following list might not be applicable to this specific\n API or operation.

\n
\n
    \n
  • \n

    ACCOUNT_CANNOT_LEAVE_ORGANIZATION: You attempted to remove the management\n account from the organization. You can't remove the management account. Instead,\n after you remove all member accounts, delete the organization itself.

    \n
  • \n
  • \n

    ACCOUNT_CANNOT_LEAVE_WITHOUT_PHONE_VERIFICATION: You attempted to remove an\n account from the organization that doesn't yet have enough information to exist\n as a standalone account. This account requires you to first complete phone\n verification. Follow the steps at Removing a member account from your organization in the\n Organizations User Guide.

    \n
  • \n
  • \n

    ACCOUNT_CREATION_RATE_LIMIT_EXCEEDED: You attempted to exceed the number of\n accounts that you can create in one day.

    \n
  • \n
  • \n

    ACCOUNT_CREATION_NOT_COMPLETE: Your account setup isn't complete or your\n account isn't fully active. You must complete the account setup before you\n create an organization.

    \n
  • \n
  • \n

    ACCOUNT_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the limit on the number\n of accounts in an organization. If you need more accounts, contact Amazon Web Services Support to\n request an increase in your limit.

    \n

    Or the number of invitations that you tried to send would cause you to exceed\n the limit of accounts in your organization. Send fewer invitations or contact\n Amazon Web Services Support to request an increase in the number of accounts.

    \n \n

    Deleted and closed accounts still count toward your limit.

    \n
    \n \n

    If you get this exception when running a command immediately after\n creating the organization, wait one hour and try again. After an hour, if\n the command continues to fail with this error, contact Amazon Web Services Support.

    \n
    \n
  • \n
  • \n

    ALL_FEATURES_MIGRATION_ORGANIZATION_SIZE_LIMIT_EXCEEDED:\n Your organization has more than 5000 accounts, and you can only use the standard migration process for organizations with less than 5000 accounts.\n Use the assisted migration process to enable all features mode, or create a support case for assistance if you are unable to use assisted migration.

    \n
  • \n
  • \n

    CANNOT_REGISTER_SUSPENDED_ACCOUNT_AS_DELEGATED_ADMINISTRATOR: You cannot\n register a suspended account as a delegated administrator.

    \n
  • \n
  • \n

    CANNOT_REGISTER_MASTER_AS_DELEGATED_ADMINISTRATOR: You attempted to register\n the management account of the organization as a delegated administrator for an\n Amazon Web Services service integrated with Organizations. You can designate only a member account as a\n delegated administrator.

    \n
  • \n
  • \n

    CANNOT_CLOSE_MANAGEMENT_ACCOUNT: You attempted to close the management\n account. To close the management account for the organization, you must first\n either remove or close all member accounts in the organization. Follow standard\n account closure process using root credentials.​

    \n
  • \n
  • \n

    CANNOT_REMOVE_DELEGATED_ADMINISTRATOR_FROM_ORG: You attempted to remove an\n account that is registered as a delegated administrator for a service integrated\n with your organization. To complete this operation, you must first deregister\n this account as a delegated administrator.

    \n
  • \n
  • \n

    CLOSE_ACCOUNT_QUOTA_EXCEEDED: You have exceeded close account quota for the\n past 30 days.

    \n
  • \n
  • \n

    CLOSE_ACCOUNT_REQUESTS_LIMIT_EXCEEDED: You attempted to exceed the number of\n accounts that you can close at a time. ​

    \n
  • \n
  • \n

    CREATE_ORGANIZATION_IN_BILLING_MODE_UNSUPPORTED_REGION: To create an\n organization in the specified region, you must enable all features mode.

    \n
  • \n
  • \n

    DELEGATED_ADMINISTRATOR_EXISTS_FOR_THIS_SERVICE: You attempted to register an\n Amazon Web Services account as a delegated administrator for an Amazon Web Services service that already has\n a delegated administrator. To complete this operation, you must first deregister\n any existing delegated administrators for this service.

    \n
  • \n
  • \n

    EMAIL_VERIFICATION_CODE_EXPIRED: The email verification code is only valid for\n a limited period of time. You must resubmit the request and generate a new\n verfication code.

    \n
  • \n
  • \n

    HANDSHAKE_RATE_LIMIT_EXCEEDED: You attempted to exceed the number of\n handshakes that you can send in one day.

    \n
  • \n
  • \n

    INVALID_PAYMENT_INSTRUMENT: You cannot remove an account because no supported\n payment method is associated with the account. Amazon Web Services does not support cards\n issued by financial institutions in Russia or Belarus. For more information, see\n Managing your\n Amazon Web Services payments.

    \n
  • \n
  • \n

    MASTER_ACCOUNT_ADDRESS_DOES_NOT_MATCH_MARKETPLACE: To create an account in\n this organization, you first must migrate the organization's management account\n to the marketplace that corresponds to the management account's address. All\n accounts in an organization must be associated with the same marketplace.

    \n
  • \n
  • \n

    MASTER_ACCOUNT_MISSING_BUSINESS_LICENSE: Applies only to the Amazon Web Services Regions in\n China. To create an organization, the master must have a valid business license.\n For more information, contact customer support.

    \n
  • \n
  • \n

    MASTER_ACCOUNT_MISSING_CONTACT_INFO: To complete this operation, you must\n first provide a valid contact address and phone number for the management\n account. Then try the operation again.

    \n
  • \n
  • \n

    MASTER_ACCOUNT_NOT_GOVCLOUD_ENABLED: To complete this operation, the\n management account must have an associated account in the Amazon Web Services GovCloud\n (US-West) Region. For more information, see Organizations\n in the \n Amazon Web Services GovCloud User Guide.

    \n
  • \n
  • \n

    MASTER_ACCOUNT_PAYMENT_INSTRUMENT_REQUIRED: To create an organization with\n this management account, you first must associate a valid payment instrument,\n such as a credit card, with the account. For more information, see Considerations before removing an account from an organization in\n the Organizations User Guide.

    \n
  • \n
  • \n

    MAX_DELEGATED_ADMINISTRATORS_FOR_SERVICE_LIMIT_EXCEEDED: You attempted to\n register more delegated administrators than allowed for the service principal.\n

    \n
  • \n
  • \n

    MAX_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to exceed the number\n of policies of a certain type that can be attached to an entity at one\n time.

    \n
  • \n
  • \n

    MAX_TAG_LIMIT_EXCEEDED: You have exceeded the number of tags allowed on this\n resource.

    \n
  • \n
  • \n

    MEMBER_ACCOUNT_PAYMENT_INSTRUMENT_REQUIRED: To complete this operation with\n this member account, you first must associate a valid payment instrument, such\n as a credit card, with the account. For more information, see Considerations before removing an account from an organization in\n the Organizations User Guide.

    \n
  • \n
  • \n

    MIN_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to detach a policy\n from an entity that would cause the entity to have fewer than the minimum number\n of policies of a certain type required.

    \n
  • \n
  • \n

    ORGANIZATION_NOT_IN_ALL_FEATURES_MODE: You attempted to perform an operation\n that requires the organization to be configured to support all features. An\n organization that supports only consolidated billing features can't perform this\n operation.

    \n
  • \n
  • \n

    OU_DEPTH_LIMIT_EXCEEDED: You attempted to create an OU tree that is too many\n levels deep.

    \n
  • \n
  • \n

    OU_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the number of OUs that you\n can have in an organization.

    \n
  • \n
  • \n

    POLICY_CONTENT_LIMIT_EXCEEDED: You attempted to create a policy that is larger\n than the maximum size.

    \n
  • \n
  • \n

    POLICY_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the number of policies\n that you can have in an organization.

    \n
  • \n
  • \n

    SERVICE_ACCESS_NOT_ENABLED: You attempted to register a delegated\n administrator before you enabled service access. Call the\n EnableAWSServiceAccess API first.

    \n
  • \n
  • \n

    TAG_POLICY_VIOLATION: You attempted to create or update a resource with tags\n that are not compliant with the tag policy requirements for this account.

    \n
  • \n
  • \n

    WAIT_PERIOD_ACTIVE: After you create an Amazon Web Services account, you must wait until at least seven days after the account was created.\n Invited accounts aren't subject to this waiting period.

    \n
  • \n
", "smithy.api#error": "client", "smithy.api#httpError": 409 } @@ -2400,6 +2400,12 @@ "traits": { "smithy.api#enumValue": "CANNOT_REGISTER_SUSPENDED_ACCOUNT_AS_DELEGATED_ADMINISTRATOR" } + }, + "ALL_FEATURES_MIGRATION_ORGANIZATION_SIZE_LIMIT_EXCEEDED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ALL_FEATURES_MIGRATION_ORGANIZATION_SIZE_LIMIT_EXCEEDED" + } } } }, @@ -4727,6 +4733,9 @@ { "target": "com.amazonaws.organizations#ConcurrentModificationException" }, + { + "target": "com.amazonaws.organizations#ConstraintViolationException" + }, { "target": "com.amazonaws.organizations#HandshakeConstraintViolationException" }, diff --git a/models/outposts.json b/models/outposts.json index 93969fca11..c4d64247e0 100644 --- a/models/outposts.json +++ b/models/outposts.json @@ -5193,6 +5193,12 @@ "traits": { "smithy.api#enumValue": "AH532P6W" } + }, + "CS8365C": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "CS8365C" + } } } }, @@ -5593,7 +5599,7 @@ } ], "traits": { - "smithy.api#documentation": "

Starts the specified capacity task. You can have one active capacity task per order or Outpost.

", + "smithy.api#documentation": "

Starts the specified capacity task. You can have one active capacity task for each order and each Outpost.

", "smithy.api#http": { "method": "POST", "uri": "/outposts/{OutpostIdentifier}/capacity", @@ -6409,7 +6415,7 @@ "PowerConnector": { "target": "com.amazonaws.outposts#PowerConnector", "traits": { - "smithy.api#documentation": "

The power connector that Amazon Web Services should plan to provide for connections to the hardware.\n Note the correlation between PowerPhase and PowerConnector.

\n
    \n
  • \n

    Single-phase AC feed

    \n
      \n
    • \n

      \n L6-30P – (common in US); 30A; single phase

      \n
    • \n
    • \n

      \n IEC309 (blue) – P+N+E, 6hr; 32 A; single\n phase

      \n
    • \n
    \n
  • \n
  • \n

    Three-phase AC feed

    \n
      \n
    • \n

      \n AH530P7W (red) – 3P+N+E, 7hr; 30A; three\n phase

      \n
    • \n
    • \n

      \n AH532P6W (red) – 3P+N+E, 6hr; 32A; three\n phase

      \n
    • \n
    \n
  • \n
" + "smithy.api#documentation": "

The power connector that Amazon Web Services should plan to provide for connections to the hardware.\n Note the correlation between PowerPhase and PowerConnector.

\n
    \n
  • \n

    Single-phase AC feed

    \n
      \n
    • \n

      \n L6-30P – (common in US); 30A; single phase

      \n
    • \n
    • \n

      \n IEC309 (blue) – P+N+E, 6hr; 32 A; single\n phase

      \n
    • \n
    \n
  • \n
  • \n

    Three-phase AC feed

    \n
      \n
    • \n

      \n AH530P7W (red) – 3P+N+E, 7hr; 30A; three\n phase

      \n
    • \n
    • \n

      \n AH532P6W (red) – 3P+N+E, 6hr; 32A; three\n phase

      \n
    • \n
    • \n

      \n CS8365C – (common in US); 3P+E, 50A; three phase

      \n
    • \n
    \n
  • \n
" } }, "PowerFeedDrop": { diff --git a/models/partnercentral-selling.json b/models/partnercentral-selling.json index 02fdd1918c..e25abe9f00 100644 --- a/models/partnercentral-selling.json +++ b/models/partnercentral-selling.json @@ -4,6 +4,17 @@ "com.amazonaws.partnercentralselling#AWSPartnerCentralSelling": { "type": "service", "version": "2022-07-26", + "operations": [ + { + "target": "com.amazonaws.partnercentralselling#ListTagsForResource" + }, + { + "target": "com.amazonaws.partnercentralselling#TagResource" + }, + { + "target": "com.amazonaws.partnercentralselling#UntagResource" + } + ], "resources": [ { "target": "com.amazonaws.partnercentralselling#Engagement" @@ -418,6 +429,9 @@ { "target": "com.amazonaws.partnercentralselling#AccessDeniedException" }, + { + "target": "com.amazonaws.partnercentralselling#ConflictException" + }, { "target": "com.amazonaws.partnercentralselling#InternalServerException" }, @@ -438,7 +452,7 @@ "aws.iam#iamAction": { "documentation": "Grants permission to accept Engagement Invitations on AWS Partner Central" }, - "smithy.api#documentation": "

\n Use the AcceptEngagementInvitation action to accept an engagement invitation shared by AWS. \n Accepting the invitation indicates your willingness to participate in the engagement, \n granting you access to all engagement-related data.\n

", + "smithy.api#documentation": "

Use the AcceptEngagementInvitation action to accept an engagement\n invitation shared by AWS. Accepting the invitation indicates your willingness to\n participate in the engagement, granting you access to all engagement-related\n data.

", "smithy.api#http": { "method": "POST", "uri": "/AcceptEngagementInvitation", @@ -452,14 +466,14 @@ "Catalog": { "target": "com.amazonaws.partnercentralselling#CatalogIdentifier", "traits": { - "smithy.api#documentation": "

\n The CatalogType parameter specifies the catalog associated with the engagement invitation. \n Accepted values are AWS and Sandbox, \n which determine the environment in which the engagement invitation is managed.\n

", + "smithy.api#documentation": "

The CatalogType parameter specifies the catalog associated with the\n engagement invitation. Accepted values are AWS and Sandbox,\n which determine the environment in which the engagement invitation is managed.

", "smithy.api#required": {} } }, "Identifier": { "target": "com.amazonaws.partnercentralselling#EngagementInvitationArnOrIdentifier", "traits": { - "smithy.api#documentation": "

\n The Identifier parameter in the AcceptEngagementInvitationRequest specifies the unique \n identifier of the EngagementInvitation to be accepted. \n Providing the correct identifier ensures that the intended invitation is accepted.\n

", + "smithy.api#documentation": "

The Identifier parameter in the\n AcceptEngagementInvitationRequest specifies the unique identifier of\n the EngagementInvitation to be accepted. Providing the correct identifier\n ensures that the intended invitation is accepted.

", "smithy.api#required": {} } } @@ -3540,7 +3554,7 @@ "aws.iam#iamAction": { "documentation": "Grants permission to creating engagements in AWS Partner Central" }, - "smithy.api#documentation": "

\n The CreateEngagement action allows you to create an Engagement, \n which serves as a collaborative space between different parties such as AWS Partners and AWS Sellers. \n This action automatically adds the caller's AWS account as an active member of the newly created Engagement.\n

", + "smithy.api#documentation": "

The CreateEngagement action allows you to create an\n Engagement, which serves as a collaborative space between different\n parties such as AWS Partners and AWS Sellers. This action automatically adds the\n caller's AWS account as an active member of the newly created\n Engagement.

", "smithy.api#http": { "method": "POST", "uri": "/CreateEngagement", @@ -3584,7 +3598,7 @@ "aws.iam#iamAction": { "documentation": "Grants permission to creating engagement invitations in AWS Partner Central" }, - "smithy.api#documentation": "

\nThis action creates an invitation from a sender to a single receiver to join an engagement.\n

", + "smithy.api#documentation": "

This action creates an invitation from a sender to a single receiver to join an\n engagement.

", "smithy.api#http": { "method": "POST", "uri": "/CreateEngagementInvitation", @@ -3599,14 +3613,14 @@ "Catalog": { "target": "com.amazonaws.partnercentralselling#CatalogIdentifier", "traits": { - "smithy.api#documentation": "

\n Specifies the catalog related to the engagement. \n Accepted values are AWS and Sandbox, \n which determine the environment in which the engagement is managed.\n

", + "smithy.api#documentation": "

Specifies the catalog related to the engagement. Accepted values are AWS\n and Sandbox, which determine the environment in which the engagement is\n managed.

", "smithy.api#required": {} } }, "ClientToken": { "target": "com.amazonaws.partnercentralselling#ClientToken", "traits": { - "smithy.api#documentation": "

\n Specifies a unique, client-generated UUID to ensure that the request is handled exactly once. \n This token helps prevent duplicate invitation creations.\n

", + "smithy.api#documentation": "

Specifies a unique, client-generated UUID to ensure that the request is handled\n exactly once. This token helps prevent duplicate invitation creations.

", "smithy.api#idempotencyToken": {}, "smithy.api#required": {} } @@ -3614,14 +3628,14 @@ "EngagementIdentifier": { "target": "com.amazonaws.partnercentralselling#EngagementIdentifier", "traits": { - "smithy.api#documentation": "

\n The unique identifier of the Engagement associated with the invitation. \n This parameter ensures the invitation is created within the correct Engagement context.\n

", + "smithy.api#documentation": "

The unique identifier of the Engagement associated with the invitation.\n This parameter ensures the invitation is created within the correct\n Engagement context.

", "smithy.api#required": {} } }, "Invitation": { "target": "com.amazonaws.partnercentralselling#Invitation", "traits": { - "smithy.api#documentation": "

\nThe Invitation object all information necessary to initiate an engagement invitation to a partner. \nIt contains a personalized message from the sender, the invitation's receiver, and a payload. The Payload can \nbe the OpportunityInvitation, which includes detailed structures for sender contacts, partner responsibilities, customer \ninformation, and project details.

", + "smithy.api#documentation": "

The Invitation object all information necessary to initiate an\n engagement invitation to a partner. It contains a personalized message from the sender,\n the invitation's receiver, and a payload. The Payload can be the\n OpportunityInvitation, which includes detailed structures for sender\n contacts, partner responsibilities, customer information, and project details.

", "smithy.api#required": {} } } @@ -3636,14 +3650,14 @@ "Id": { "target": "com.amazonaws.partnercentralselling#EngagementInvitationIdentifier", "traits": { - "smithy.api#documentation": "

\n Unique identifier assigned to the newly created engagement invitation.\n

", + "smithy.api#documentation": "

Unique identifier assigned to the newly created engagement invitation.

", "smithy.api#required": {} } }, "Arn": { "target": "com.amazonaws.partnercentralselling#EngagementInvitationArn", "traits": { - "smithy.api#documentation": "

\n The Amazon Resource Name (ARN) that uniquely identifies the engagement\n invitation.\n

", + "smithy.api#documentation": "

The Amazon Resource Name (ARN) that uniquely identifies the engagement invitation.\n

", "smithy.api#required": {} } } @@ -3658,14 +3672,14 @@ "Catalog": { "target": "com.amazonaws.partnercentralselling#CatalogIdentifier", "traits": { - "smithy.api#documentation": "

\n The CreateEngagementRequest$Catalog parameter specifies the catalog related to the engagement. \n Accepted values are AWS and Sandbox, \n which determine the environment in which the engagement is managed.\n

", + "smithy.api#documentation": "

The CreateEngagementRequest$Catalog parameter specifies the catalog\n related to the engagement. Accepted values are AWS and\n Sandbox, which determine the environment in which the engagement is\n managed.

", "smithy.api#required": {} } }, "ClientToken": { "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "

\n The CreateEngagementRequest$ClientToken parameter specifies a unique, case-sensitive identifier to ensure that the request is handled exactly once. \n The value must not exceed sixty-four alphanumeric characters.\n

", + "smithy.api#documentation": "

The CreateEngagementRequest$ClientToken parameter specifies a unique,\n case-sensitive identifier to ensure that the request is handled exactly once. The value\n must not exceed sixty-four alphanumeric characters.

", "smithy.api#idempotencyToken": {}, "smithy.api#pattern": "^[!-~]{1,64}$", "smithy.api#required": {} @@ -3674,21 +3688,21 @@ "Title": { "target": "com.amazonaws.partnercentralselling#EngagementTitle", "traits": { - "smithy.api#documentation": "

\nSpecifies the title of the Engagement.\n

", + "smithy.api#documentation": "

Specifies the title of the Engagement.

", "smithy.api#required": {} } }, "Description": { "target": "com.amazonaws.partnercentralselling#EngagementDescription", "traits": { - "smithy.api#documentation": "

\nProvides a description of the Engagement.\n

", + "smithy.api#documentation": "

Provides a description of the Engagement.

", "smithy.api#required": {} } }, "Contexts": { "target": "com.amazonaws.partnercentralselling#EngagementContexts", "traits": { - "smithy.api#documentation": "

\n The Contexts field is a required array of objects, with a maximum of 5 contexts allowed, \n specifying detailed information about customer projects associated with the Engagement. \n Each context object contains a Type field indicating the context type, \n which must be CustomerProject in this version, and a Payload field containing the CustomerProject details. The CustomerProject object is composed of two main components: Customer and Project. The Customer object includes information such as CompanyName, WebsiteUrl, Industry, and CountryCode, providing essential details about the customer. The Project object contains Title, BusinessProblem, and TargetCompletionDate, offering insights into the specific project associated with the customer. This structure allows comprehensive context to be included within the Engagement, \n facilitating effective collaboration between parties by providing relevant customer and project information.\n

" + "smithy.api#documentation": "

The Contexts field is a required array of objects, with a maximum of 5\n contexts allowed, specifying detailed information about customer projects associated\n with the Engagement. Each context object contains a Type field indicating\n the context type, which must be CustomerProject in this version, and a\n Payload field containing the CustomerProject details. The\n CustomerProject object is composed of two main components:\n Customer and Project. The Customer object\n includes information such as CompanyName, WebsiteUrl,\n Industry, and CountryCode, providing essential details\n about the customer. The Project object contains Title,\n BusinessProblem, and TargetCompletionDate, offering\n insights into the specific project associated with the customer. This structure allows\n comprehensive context to be included within the Engagement, facilitating effective\n collaboration between parties by providing relevant customer and project\n information.

" } } }, @@ -3702,13 +3716,13 @@ "Id": { "target": "com.amazonaws.partnercentralselling#EngagementIdentifier", "traits": { - "smithy.api#documentation": "

\nUnique identifier assigned to the newly created engagement.\n

" + "smithy.api#documentation": "

Unique identifier assigned to the newly created engagement.

" } }, "Arn": { "target": "com.amazonaws.partnercentralselling#EngagementArn", "traits": { - "smithy.api#documentation": "

\nThe Amazon Resource Name (ARN) that identifies the engagement.\n

" + "smithy.api#documentation": "

The Amazon Resource Name (ARN) that identifies the engagement.

" } } }, @@ -3751,7 +3765,7 @@ "aws.iam#iamAction": { "documentation": "Grants permission to create new Opportunities on AWS Partner Central" }, - "smithy.api#documentation": "

Creates an Opportunity record in Partner Central. Use this operation to\n create a potential business opportunity for submission to Amazon Web Services. Creating\n an opportunity sets Lifecycle.ReviewStatus to Pending\n Submission.

\n

To submit an opportunity, follow these steps:

\n
    \n
  1. \n

    To create the opportunity, use CreateOpportunity.

    \n
  2. \n
  3. \n

    To associate a solution with the opportunity, use\n AssociateOpportunity.

    \n
  4. \n
  5. \n

    To submit the opportunity, use\n StartEngagementFromOpportunityTask.

    \n
  6. \n
\n

After submission, you can't edit the opportunity until the review is complete. But\n opportunities in the Pending Submission state must have complete details.\n You can update the opportunity while it's in the Pending Submission\n state.

\n

There's a set of mandatory fields to create opportunities, but consider providing\n optional fields to enrich the opportunity record.

", + "smithy.api#documentation": "

Creates an Opportunity record in Partner Central. Use this operation to\n create a potential business opportunity for submission to Amazon Web Services. Creating\n an opportunity sets Lifecycle.ReviewStatus to Pending\n Submission.

\n

To submit an opportunity, follow these steps:

\n
    \n
  1. \n

    To create the opportunity, use CreateOpportunity.

    \n
  2. \n
  3. \n

    To associate a solution with the opportunity, use\n AssociateOpportunity.

    \n
  4. \n
  5. \n

    To start the engagement with AWS, use\n StartEngagementFromOpportunity.

    \n
  6. \n
\n

After submission, you can't edit the opportunity until the review is complete. But\n opportunities in the Pending Submission state must have complete details.\n You can update the opportunity while it's in the Pending Submission\n state.

\n

There's a set of mandatory fields to create opportunities, but consider providing\n optional fields to enrich the opportunity record.

", "smithy.api#http": { "method": "POST", "uri": "/CreateOpportunity", @@ -3773,7 +3787,7 @@ "PrimaryNeedsFromAws": { "target": "com.amazonaws.partnercentralselling#PrimaryNeedsFromAws", "traits": { - "smithy.api#documentation": "

Identifies the type of support the partner needs from Amazon Web Services.

\n

Valid values:

\n
    \n
  • \n

    Cosell—Architectural Validation: Confirmation from Amazon Web Services that the\n partner's proposed solution architecture is aligned with Amazon Web Services best\n practices and poses minimal architectural risks.

    \n
  • \n
  • \n

    Cosell—Business Presentation: Request Amazon Web Services seller's\n participation in a joint customer presentation.

    \n
  • \n
  • \n

    Cosell—Competitive Information: Access to Amazon Web Services competitive\n resources and support for the partner's proposed solution.

    \n
  • \n
  • \n

    Cosell—Pricing Assistance: Connect with an Amazon Web Services seller for\n support situations where a partner may be receiving an upfront discount on a\n service (for example: EDP deals).

    \n
  • \n
  • \n

    Cosell—Technical Consultation: Connect with an Amazon Web Services Solutions\n Architect to address the partner's questions about the proposed solution.

    \n
  • \n
  • \n

    Cosell—Total Cost of Ownership Evaluation: Assistance with quoting different\n cost savings of proposed solutions on Amazon Web Services versus on-premises or a\n traditional hosting environment.

    \n
  • \n
  • \n

    Cosell—Deal Support: Request Amazon Web Services seller's support to progress\n the opportunity (for example: joint customer call, strategic\n positioning).

    \n
  • \n
  • \n

    Cosell—Support for Public Tender/RFx: Opportunity related to the public sector\n where the partner needs Amazon Web Services RFx support.

    \n
  • \n
  • \n

    Do Not Need Support from AWS Sales Rep: Indicates that a partner doesn't need\n support from an Amazon Web Services sales representative, and the partner solely\n manages the opportunity. It's possible to request coselling support on these\n opportunities at any stage during their lifecycles. This is also known as a\n for-visibility-only (FVO) opportunity.

    \n
  • \n
" + "smithy.api#documentation": "

Identifies the type of support the partner needs from Amazon Web Services.

\n

Valid values:

\n
    \n
  • \n

    Cosell—Architectural Validation: Confirmation from Amazon Web Services that the\n partner's proposed solution architecture is aligned with Amazon Web Services best\n practices and poses minimal architectural risks.

    \n
  • \n
  • \n

    Cosell—Business Presentation: Request Amazon Web Services seller's\n participation in a joint customer presentation.

    \n
  • \n
  • \n

    Cosell—Competitive Information: Access to Amazon Web Services competitive\n resources and support for the partner's proposed solution.

    \n
  • \n
  • \n

    Cosell—Pricing Assistance: Connect with an Amazon Web Services seller for\n support situations where a partner may be receiving an upfront discount on a\n service (for example: EDP deals).

    \n
  • \n
  • \n

    Cosell—Technical Consultation: Connect with an Amazon Web Services Solutions\n Architect to address the partner's questions about the proposed solution.

    \n
  • \n
  • \n

    Cosell—Total Cost of Ownership Evaluation: Assistance with quoting different\n cost savings of proposed solutions on Amazon Web Services versus on-premises or a\n traditional hosting environment.

    \n
  • \n
  • \n

    Cosell—Deal Support: Request Amazon Web Services seller's support to progress\n the opportunity (for example: joint customer call, strategic\n positioning).

    \n
  • \n
  • \n

    Cosell—Support for Public Tender/RFx: Opportunity related to the public sector\n where the partner needs Amazon Web Services RFx support.

    \n
  • \n
" } }, "NationalSecurity": { @@ -3917,7 +3931,7 @@ "aws.iam#iamAction": { "documentation": "Grants permission to creating resource snapshots in AWS Partner Central" }, - "smithy.api#documentation": "

\n This action allows you to create an immutable snapshot of a specific resource, such as an opportunity, \n within the context of an engagement. \n The snapshot captures a subset of the resource's data based on the schema defined by the provided template.

", + "smithy.api#documentation": "

This action allows you to create an immutable snapshot of a specific resource, such\n as an opportunity, within the context of an engagement. The snapshot captures a subset\n of the resource's data based on the schema defined by the provided template.

", "smithy.api#http": { "method": "POST", "uri": "/CreateResourceSnapshot", @@ -3958,7 +3972,7 @@ "aws.iam#iamAction": { "documentation": "Grants permission to creating resource snapshot jobs in AWS Partner Central" }, - "smithy.api#documentation": "

\n Use this action to create a job to generate a snapshot of the specified resource\n within an engagement. It initiates an asynchronous process to create a resource\n snapshot. The job creates a new snapshot only if the resource state has changed,\n adhering to the same access control and immutability rules as direct snapshot creation.\n

", + "smithy.api#documentation": "

Use this action to create a job to generate a snapshot of the specified resource\n within an engagement. It initiates an asynchronous process to create a resource\n snapshot. The job creates a new snapshot only if the resource state has changed,\n adhering to the same access control and immutability rules as direct snapshot\n creation.

", "smithy.api#http": { "method": "POST", "uri": "/CreateResourceSnapshotJob", @@ -3973,14 +3987,14 @@ "Catalog": { "target": "com.amazonaws.partnercentralselling#CatalogIdentifier", "traits": { - "smithy.api#documentation": "

\n Specifies the catalog in which to create the snapshot job. Valid values are\n AWS and Sandbox.\n

", + "smithy.api#documentation": "

Specifies the catalog in which to create the snapshot job. Valid values are\n AWS and Sandbox.

", "smithy.api#required": {} } }, "ClientToken": { "target": "com.amazonaws.partnercentralselling#ClientToken", "traits": { - "smithy.api#documentation": "

\n Specifies a unique, client-generated UUID to ensure that the request is handled exactly once. \n This token helps prevent duplicate snapshot job creations.\n

", + "smithy.api#documentation": "

A client-generated UUID used for idempotency check. The token helps prevent duplicate\n job creations.

", "smithy.api#idempotencyToken": {}, "smithy.api#pattern": "^[!-~]{1,64}$", "smithy.api#required": {} @@ -3989,30 +4003,36 @@ "EngagementIdentifier": { "target": "com.amazonaws.partnercentralselling#EngagementIdentifier", "traits": { - "smithy.api#documentation": "

\n Specifies the identifier of the engagement associated with the resource to be\n snapshotted.\n

", + "smithy.api#documentation": "

Specifies the identifier of the engagement associated with the resource to be\n snapshotted.

", "smithy.api#required": {} } }, "ResourceType": { "target": "com.amazonaws.partnercentralselling#ResourceType", "traits": { - "smithy.api#documentation": "

\n The type of resource for which the snapshot job is being created. Must be one of the\n supported resource types Opportunity.\n

", + "smithy.api#documentation": "

The type of resource for which the snapshot job is being created. Must be one of the\n supported resource types i.e. Opportunity\n

", "smithy.api#required": {} } }, "ResourceIdentifier": { "target": "com.amazonaws.partnercentralselling#ResourceIdentifier", "traits": { - "smithy.api#documentation": "

\n Specifies the identifier of the specific resource to be snapshotted. The format\n depends on the ResourceType.\n

", + "smithy.api#documentation": "

Specifies the identifier of the specific resource to be snapshotted. The format\n depends on the ResourceType.

", "smithy.api#required": {} } }, "ResourceSnapshotTemplateIdentifier": { "target": "com.amazonaws.partnercentralselling#ResourceTemplateName", "traits": { - "smithy.api#documentation": "

\n Specifies the name of the template that defines the schema for the snapshot.\n

", + "smithy.api#documentation": "

Specifies the name of the template that defines the schema for the snapshot.

", "smithy.api#required": {} } + }, + "Tags": { + "target": "com.amazonaws.partnercentralselling#TagList", + "traits": { + "smithy.api#documentation": "A list of objects specifying each tag name and value." + } } }, "traits": { @@ -4025,13 +4045,13 @@ "Id": { "target": "com.amazonaws.partnercentralselling#ResourceSnapshotJobIdentifier", "traits": { - "smithy.api#documentation": "

\n The unique identifier for the created snapshot job.\n

" + "smithy.api#documentation": "

The unique identifier for the created snapshot job.

" } }, "Arn": { "target": "com.amazonaws.partnercentralselling#ResourceSnapshotJobArn", "traits": { - "smithy.api#documentation": "

\n The Amazon Resource Name (ARN) of the created snapshot job.\n

" + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the created snapshot job.

" } } }, @@ -4045,42 +4065,42 @@ "Catalog": { "target": "com.amazonaws.partnercentralselling#CatalogIdentifier", "traits": { - "smithy.api#documentation": "

\n Specifies the catalog where the snapshot is created. Valid values are\n AWS and Sandbox.\n

", + "smithy.api#documentation": "

Specifies the catalog where the snapshot is created. Valid values are\n AWS and Sandbox.

", "smithy.api#required": {} } }, "EngagementIdentifier": { "target": "com.amazonaws.partnercentralselling#EngagementIdentifier", "traits": { - "smithy.api#documentation": "

\n The unique identifier of the engagement associated with this snapshot. This field\n links the snapshot to a specific engagement context.\n

", + "smithy.api#documentation": "

The unique identifier of the engagement associated with this snapshot. This field\n links the snapshot to a specific engagement context.

", "smithy.api#required": {} } }, "ResourceType": { "target": "com.amazonaws.partnercentralselling#ResourceType", "traits": { - "smithy.api#documentation": "

\n Specifies the type of resource for which the snapshot is being created. This field\n determines the structure and content of the snapshot. Must be one of the supported\n resource types, such as: Opportunity.\n

", + "smithy.api#documentation": "

Specifies the type of resource for which the snapshot is being created. This field\n determines the structure and content of the snapshot. Must be one of the supported\n resource types, such as: Opportunity.

", "smithy.api#required": {} } }, "ResourceIdentifier": { "target": "com.amazonaws.partnercentralselling#ResourceIdentifier", "traits": { - "smithy.api#documentation": "

\n The unique identifier of the specific resource to be snapshotted. The format and\n constraints of this identifier depend on the ResourceType specified. For example: For\n Opportunity type, it will be an opportunity ID.\n

", + "smithy.api#documentation": "

The unique identifier of the specific resource to be snapshotted. The format and\n constraints of this identifier depend on the ResourceType specified. For\n example: For Opportunity type, it will be an opportunity ID.

", "smithy.api#required": {} } }, "ResourceSnapshotTemplateIdentifier": { "target": "com.amazonaws.partnercentralselling#ResourceTemplateName", "traits": { - "smithy.api#documentation": "

\n The name of the template that defines the schema for the snapshot. This template\n determines which subset of the resource data will be included in the snapshot. Must\n correspond to an existing and valid template for the specified ResourceType.\n

", + "smithy.api#documentation": "

The name of the template that defines the schema for the snapshot. This template\n determines which subset of the resource data will be included in the snapshot. Must\n correspond to an existing and valid template for the specified\n ResourceType.

", "smithy.api#required": {} } }, "ClientToken": { "target": "com.amazonaws.partnercentralselling#ClientToken", "traits": { - "smithy.api#documentation": "

\n Specifies a unique, client-generated UUID to ensure that the request is handled exactly once. \n This token helps prevent duplicate snapshot creations.\n

", + "smithy.api#documentation": "

Specifies a unique, client-generated UUID to ensure that the request is handled\n exactly once. This token helps prevent duplicate snapshot creations.

", "smithy.api#idempotencyToken": {}, "smithy.api#pattern": "^[!-~]{1,64}$", "smithy.api#required": {} @@ -4097,13 +4117,13 @@ "Arn": { "target": "com.amazonaws.partnercentralselling#ResourceArn", "traits": { - "smithy.api#documentation": "

\n Specifies the Amazon Resource Name (ARN) that uniquely identifies the snapshot\n created.\n

" + "smithy.api#documentation": "

Specifies the Amazon Resource Name (ARN) that uniquely identifies the snapshot\n created.

" } }, "Revision": { "target": "com.amazonaws.partnercentralselling#ResourceSnapshotRevision", "traits": { - "smithy.api#documentation": "

\n Specifies the revision number of the created snapshot. This field provides important\n information about the snapshot's place in the sequence of snapshots for the given\n resource.\n

" + "smithy.api#documentation": "

Specifies the revision number of the created snapshot. This field provides important\n information about the snapshot's place in the sequence of snapshots for the given\n resource.

" } } }, @@ -5162,12 +5182,12 @@ "Project": { "target": "com.amazonaws.partnercentralselling#EngagementCustomerProjectDetails", "traits": { - "smithy.api#documentation": "

\n Information about the customer project associated with the Engagement.\n

" + "smithy.api#documentation": "

Information about the customer project associated with the Engagement.

" } } }, "traits": { - "smithy.api#documentation": "

\n The CustomerProjects structure in Engagements offers a flexible framework for managing\n customer-project relationships. It supports multiple customers per Engagement and\n multiple projects per customer, while also allowing for customers without projects and\n projects without specific customers.\n

\n

All Engagement members have full visibility of customers and their associated\n projects, enabling the capture of relevant context even when project details are not\n fully defined. This structure also facilitates targeted invitations, allowing partners\n to focus on specific customers and their business problems when sending Engagement\n invitations.

" + "smithy.api#documentation": "

The CustomerProjects structure in Engagements offers a flexible framework for managing\n customer-project relationships. It supports multiple customers per Engagement and\n multiple projects per customer, while also allowing for customers without projects and\n projects without specific customers.

\n

All Engagement members have full visibility of customers and their associated\n projects, enabling the capture of relevant context even when project details are not\n fully defined. This structure also facilitates targeted invitations, allowing partners\n to focus on specific customers and their business problems when sending Engagement\n invitations.

" } }, "com.amazonaws.partnercentralselling#CustomerSummary": { @@ -5208,6 +5228,9 @@ { "target": "com.amazonaws.partnercentralselling#AccessDeniedException" }, + { + "target": "com.amazonaws.partnercentralselling#ConflictException" + }, { "target": "com.amazonaws.partnercentralselling#ResourceNotFoundException" }, @@ -5222,7 +5245,7 @@ "aws.iam#iamAction": { "documentation": "Grants permission to deleting resource snapshot jobs on AWS Partner Central" }, - "smithy.api#documentation": "

\n Use this action to deletes a previously created resource snapshot job. The job must be\n in a stopped state before it can be deleted.\n

", + "smithy.api#documentation": "

Use this action to deletes a previously created resource snapshot job. The job must\n be in a stopped state before it can be deleted.

", "smithy.api#http": { "method": "POST", "uri": "/DeleteResourceSnapshotJob", @@ -5237,14 +5260,14 @@ "Catalog": { "target": "com.amazonaws.partnercentralselling#CatalogIdentifier", "traits": { - "smithy.api#documentation": "

\n Specifies the catalog from which to delete the snapshot job. Valid values are\n AWS and Sandbox. \n

", + "smithy.api#documentation": "

Specifies the catalog from which to delete the snapshot job. Valid values are\n AWS and Sandbox.

", "smithy.api#required": {} } }, "ResourceSnapshotJobIdentifier": { "target": "com.amazonaws.partnercentralselling#ResourceSnapshotJobIdentifier", "traits": { - "smithy.api#documentation": "

\n The unique identifier of the resource snapshot job to be deleted.\n

", + "smithy.api#documentation": "

The unique identifier of the resource snapshot job to be deleted.

", "smithy.api#required": {}, "smithy.api#resourceIdentifier": "Identifier" } @@ -5483,19 +5506,19 @@ "Type": { "target": "com.amazonaws.partnercentralselling#EngagementContextType", "traits": { - "smithy.api#documentation": "

\n Specifies the type of Engagement context. Valid values are \"CustomerProject\" or\n \"Document\", indicating whether the context relates to a customer project or a document\n respectively. \n

", + "smithy.api#documentation": "

Specifies the type of Engagement context. Valid values are \"CustomerProject\" or\n \"Document\", indicating whether the context relates to a customer project or a document\n respectively.

", "smithy.api#required": {} } }, "Payload": { "target": "com.amazonaws.partnercentralselling#EngagementContextPayload", "traits": { - "smithy.api#documentation": "

\n Contains the specific details of the Engagement context. The structure of this payload\n varies depending on the Type field. \n

" + "smithy.api#documentation": "

Contains the specific details of the Engagement context. The structure of this payload\n varies depending on the Type field.

" } } }, "traits": { - "smithy.api#documentation": "

\n Provides detailed context information for an Engagement. This structure allows for\n specifying the type of context and its associated payload. \n

" + "smithy.api#documentation": "

Provides detailed context information for an Engagement. This structure allows for\n specifying the type of context and its associated payload.

" } }, "com.amazonaws.partnercentralselling#EngagementContextPayload": { @@ -5504,12 +5527,12 @@ "CustomerProject": { "target": "com.amazonaws.partnercentralselling#CustomerProjectsContext", "traits": { - "smithy.api#documentation": "

\n Contains detailed information about a customer project when the context type is\n \"CustomerProject\". This field is present only when the Type in EngagementContextDetails\n is set to \"CustomerProject\".\n

" + "smithy.api#documentation": "

Contains detailed information about a customer project when the context type is\n \"CustomerProject\". This field is present only when the Type in EngagementContextDetails\n is set to \"CustomerProject\".

" } } }, "traits": { - "smithy.api#documentation": "

\n Represents the payload of an Engagement context. The structure of this payload varies\n based on the context type specified in the EngagementContextDetails.\n

" + "smithy.api#documentation": "

Represents the payload of an Engagement context. The structure of this payload varies\n based on the context type specified in the EngagementContextDetails.

" } }, "com.amazonaws.partnercentralselling#EngagementContextType": { @@ -5586,28 +5609,28 @@ "Title": { "target": "com.amazonaws.partnercentralselling#EngagementCustomerProjectTitle", "traits": { - "smithy.api#documentation": "

\n The title of the project.\n

", + "smithy.api#documentation": "

The title of the project.

", "smithy.api#required": {} } }, "BusinessProblem": { "target": "com.amazonaws.partnercentralselling#EngagementCustomerBusinessProblem", "traits": { - "smithy.api#documentation": "

\n A description of the business problem the project aims to solve.\n

", + "smithy.api#documentation": "

A description of the business problem the project aims to solve.

", "smithy.api#required": {} } }, "TargetCompletionDate": { "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "

\n The target completion date for the customer's project.\n

", + "smithy.api#documentation": "

The target completion date for the customer's project.

", "smithy.api#pattern": "^[1-9][0-9]{3}-(0[1-9]|1[012])-(0[1-9]|[12][0-9]|3[01])$", "smithy.api#required": {} } } }, "traits": { - "smithy.api#documentation": "

\n Provides comprehensive details about a customer project associated with an Engagement.\n This may include information such as project goals, timelines, and specific customer\n requirements.\n

" + "smithy.api#documentation": "

Provides comprehensive details about a customer project associated with an Engagement.\n This may include information such as project goals, timelines, and specific customer\n requirements.

" } }, "com.amazonaws.partnercentralselling#EngagementCustomerProjectTitle": { @@ -5793,7 +5816,7 @@ "EngagementId": { "target": "com.amazonaws.partnercentralselling#EngagementIdentifier", "traits": { - "smithy.api#documentation": "

\n The identifier of the Engagement associated with this invitation. This links the\n invitation to its parent Engagement. \n

" + "smithy.api#documentation": "

The identifier of the Engagement associated with this invitation. This links the\n invitation to its parent Engagement.

" } }, "EngagementTitle": { @@ -5871,24 +5894,24 @@ "CompanyName": { "target": "com.amazonaws.partnercentralselling#MemberCompanyName", "traits": { - "smithy.api#documentation": "

\n The official name of the member's company or organization.\n

" + "smithy.api#documentation": "

The official name of the member's company or organization.

" } }, "WebsiteUrl": { "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "

\n The URL of the member company's website. This offers a way to find more information\n about the member organization and serves as an additional identifier.\n

" + "smithy.api#documentation": "

The URL of the member company's website. This offers a way to find more information\n about the member organization and serves as an additional identifier.

" } }, "AccountId": { "target": "com.amazonaws.partnercentralselling#AwsAccount", "traits": { - "smithy.api#documentation": "

\n This is the unique identifier for the AWS account associated with the member\n organization. It's used for AWS-related operations and identity verification. \n

" + "smithy.api#documentation": "

This is the unique identifier for the AWS account associated with the member\n organization. It's used for AWS-related operations and identity verification.

" } } }, "traits": { - "smithy.api#documentation": "

Engagement members are the participants in an Engagement, which is likely a\n collaborative project or business opportunity within the AWS partner network. Members\n can be different partner organizations or AWS accounts that are working together on a\n specific engagement.

\n

Each member is represented by their AWS Account ID, Company Name, and associated\n details. Members have a status within the Engagement (PENDING, ACCEPTED, REJECTED, or\n WITHDRAWN), indicating their current state of participation. Only existing members of an\n Engagement can view the list of other members. This implies a level of privacy and\n access control within the Engagement structure.

" + "smithy.api#documentation": "

Engagement members are the participants in an Engagement, which is likely a\n collaborative project or business opportunity within the AWS partner network. Members\n can be different partner organizations or AWS accounts that are working together on a\n specific engagement.

\n

Each member is represented by their AWS Account ID, Company Name, and associated\n details. Members have a status within the Engagement (PENDING, ACCEPTED, REJECTED, or\n WITHDRAWN), indicating their current state of participation. Only existing members of an\n Engagement can view the list of other members. This implies a level of privacy and\n access control within the Engagement structure.

" } }, "com.amazonaws.partnercentralselling#EngagementMemberSummaries": { @@ -5903,18 +5926,18 @@ "CompanyName": { "target": "com.amazonaws.partnercentralselling#MemberCompanyName", "traits": { - "smithy.api#documentation": "

\n The official name of the member's company or organization.\n

" + "smithy.api#documentation": "

The official name of the member's company or organization.

" } }, "WebsiteUrl": { "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "

\n The URL of the member company's website. This offers a way to find more information\n about the member organization and serves as an additional identifier.\n

" + "smithy.api#documentation": "

The URL of the member company's website. This offers a way to find more information\n about the member organization and serves as an additional identifier.

" } } }, "traits": { - "smithy.api#documentation": "

\n The EngagementMemberSummary provides a snapshot of essential information about\n participants in an AWS Partner Central Engagement. This compact data structure\n encapsulates key details of each member, facilitating efficient collaboration and\n management within the Engagement.\n

" + "smithy.api#documentation": "

The EngagementMemberSummary provides a snapshot of essential information about\n participants in an AWS Partner Central Engagement. This compact data structure\n encapsulates key details of each member, facilitating efficient collaboration and\n management within the Engagement.

" } }, "com.amazonaws.partnercentralselling#EngagementMembers": { @@ -5944,37 +5967,37 @@ "Catalog": { "target": "com.amazonaws.partnercentralselling#CatalogIdentifier", "traits": { - "smithy.api#documentation": "

\n Indicates the environment in which the resource and engagement exist.\n

", + "smithy.api#documentation": "

Indicates the environment in which the resource and engagement exist.

", "smithy.api#required": {} } }, "EngagementId": { "target": "com.amazonaws.partnercentralselling#EngagementIdentifier", "traits": { - "smithy.api#documentation": "

\n A unique identifier for the engagement associated with the resource.\n

" + "smithy.api#documentation": "

A unique identifier for the engagement associated with the resource.

" } }, "ResourceType": { "target": "com.amazonaws.partnercentralselling#ResourceType", "traits": { - "smithy.api#documentation": "

\n Categorizes the type of resource associated with the engagement.\n

" + "smithy.api#documentation": "

Categorizes the type of resource associated with the engagement.

" } }, "ResourceId": { "target": "com.amazonaws.partnercentralselling#ResourceIdentifier", "traits": { - "smithy.api#documentation": "

\n A unique identifier for the specific resource. Varies depending on the resource\n type.\n

" + "smithy.api#documentation": "

A unique identifier for the specific resource. Varies depending on the resource type.\n

" } }, "CreatedBy": { "target": "com.amazonaws.partnercentralselling#AwsAccount", "traits": { - "smithy.api#documentation": "

\n The AWS account ID of the entity that created the association.\n

" + "smithy.api#documentation": "

The AWS account ID of the entity that owns the resource. Identifies the account\n responsible for or having primary control over the resource.

" } } }, "traits": { - "smithy.api#documentation": "

\n This provide a streamlined view of the relationships between engagements and\n resources. These summaries offer a crucial link between collaborative engagements and\n the specific resources involved, such as opportunities.These summaries are particularly\n valuable for partners navigating complex engagements with multiple resources. They\n enable quick insights into resource distribution across engagements, support efficient\n resource management, and help maintain a clear overview of collaborative activities.\n

" + "smithy.api#documentation": "

This provide a streamlined view of the relationships between engagements and\n resources. These summaries offer a crucial link between collaborative engagements and\n the specific resources involved, such as opportunities.These summaries are particularly\n valuable for partners navigating complex engagements with multiple resources. They\n enable quick insights into resource distribution across engagements, support efficient\n resource management, and help maintain a clear overview of collaborative activities.\n

" } }, "com.amazonaws.partnercentralselling#EngagementResourceAssociationSummaryList": { @@ -6012,20 +6035,20 @@ "SortOrder": { "target": "com.amazonaws.partnercentralselling#SortOrder", "traits": { - "smithy.api#documentation": "

\n The order in which to sort the results.\n

", + "smithy.api#documentation": "

The order in which to sort the results.

", "smithy.api#required": {} } }, "SortBy": { "target": "com.amazonaws.partnercentralselling#EngagementSortName", "traits": { - "smithy.api#documentation": "

\n The field by which to sort the results.\n

", + "smithy.api#documentation": "

The field by which to sort the results.

", "smithy.api#required": {} } } }, "traits": { - "smithy.api#documentation": "

\n Specifies the sorting parameters for listing Engagements.\n

" + "smithy.api#documentation": "

Specifies the sorting parameters for listing Engagements.

" } }, "com.amazonaws.partnercentralselling#EngagementSortName": { @@ -6045,42 +6068,42 @@ "Arn": { "target": "com.amazonaws.partnercentralselling#EngagementArn", "traits": { - "smithy.api#documentation": "

\n The Amazon Resource Name (ARN) of the created engagement.\n

" + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the created Engagement.

" } }, "Id": { "target": "com.amazonaws.partnercentralselling#EngagementIdentifier", "traits": { - "smithy.api#documentation": "

\n The unique identifier for the engagement.\n

" + "smithy.api#documentation": "

The unique identifier for the Engagement.

" } }, "Title": { "target": "com.amazonaws.partnercentralselling#EngagementTitle", "traits": { - "smithy.api#documentation": "

\n The title of the engagement.\n

" + "smithy.api#documentation": "

The title of the Engagement.

" } }, "CreatedAt": { "target": "com.amazonaws.partnercentralselling#DateTime", "traits": { - "smithy.api#documentation": "

\n The date and time when the engagement was created.\n

" + "smithy.api#documentation": "

The date and time when the Engagement was created.

" } }, "CreatedBy": { "target": "com.amazonaws.partnercentralselling#AwsAccount", "traits": { - "smithy.api#documentation": "

\n The AWS account ID of the engagement creator.\n

" + "smithy.api#documentation": "

The AWS Account ID of the Engagement creator.

" } }, "MemberCount": { "target": "smithy.api#Integer", "traits": { - "smithy.api#documentation": "

\n The number of members in the engagement.\n

" + "smithy.api#documentation": "

The number of members in the Engagement.

" } } }, "traits": { - "smithy.api#documentation": "

\n An object that contains an Engagement's subset of fields.\n

" + "smithy.api#documentation": "

An object that contains an Engagement's subset of fields.

" } }, "com.amazonaws.partnercentralselling#EngagementSummaryList": { @@ -6137,7 +6160,7 @@ "EstimationUrl": { "target": "com.amazonaws.partnercentralselling#WebsiteUrl", "traits": { - "smithy.api#documentation": "

\n A URL providing additional information or context about the spend estimation.\n

" + "smithy.api#documentation": "

A URL providing additional information or context about the spend estimation.

" } } }, @@ -6354,7 +6377,7 @@ "aws.iam#iamAction": { "documentation": "Grants permission to retrieval of engagement details in AWS Partner Central" }, - "smithy.api#documentation": "

\n Use this action to retrieve the engagement record for a given\n EngagementIdentifier.\n

", + "smithy.api#documentation": "

Use this action to retrieve the engagement record for a given\n EngagementIdentifier.

", "smithy.api#http": { "method": "POST", "uri": "/GetEngagement", @@ -6451,7 +6474,7 @@ "EngagementId": { "target": "com.amazonaws.partnercentralselling#EngagementIdentifier", "traits": { - "smithy.api#documentation": "

\n The identifier of the engagement associated with this invitation.This ID links the\n invitation to its corresponding engagement.\n

" + "smithy.api#documentation": "

The identifier of the engagement associated with this invitation.This ID links the\n invitation to its corresponding engagement.

" } }, "EngagementTitle": { @@ -6521,19 +6544,19 @@ "InvitationMessage": { "target": "com.amazonaws.partnercentralselling#InvitationMessage", "traits": { - "smithy.api#documentation": "

\n The message sent to the invited partner when the invitation was created.\n

" + "smithy.api#documentation": "

The message sent to the invited partner when the invitation was created.

" } }, "EngagementDescription": { "target": "com.amazonaws.partnercentralselling#EngagementDescription", "traits": { - "smithy.api#documentation": "

\n The description of the engagement associated with this invitation.\n

" + "smithy.api#documentation": "

The description of the engagement associated with this invitation.

" } }, "ExistingMembers": { "target": "com.amazonaws.partnercentralselling#EngagementMemberSummaries", "traits": { - "smithy.api#documentation": "

\n A list of active members currently part of the Engagement. This array contains a\n maximum of 10 members, each represented by an object with the following\n properties.\n

\n
    \n
  • \n

    \n CompanyName: The name of the member's company. \n

    \n
  • \n
  • \n

    \n WebsiteUrl: The website URL of the member's company. \n

    \n
  • \n
" + "smithy.api#documentation": "

A list of active members currently part of the Engagement. This array contains a\n maximum of 10 members, each represented by an object with the following\n properties.

\n
    \n
  • \n

    CompanyName: The name of the member's company.

    \n
  • \n
  • \n

    WebsiteUrl: The website URL of the member's company.

    \n
  • \n
" } } }, @@ -6547,14 +6570,14 @@ "Catalog": { "target": "com.amazonaws.partnercentralselling#CatalogIdentifier", "traits": { - "smithy.api#documentation": "

\n Specifies the catalog related to the engagement request. Valid values are\n AWS and Sandbox.\n

", + "smithy.api#documentation": "

Specifies the catalog related to the engagement request. Valid values are\n AWS and Sandbox.

", "smithy.api#required": {} } }, "Identifier": { "target": "com.amazonaws.partnercentralselling#EngagementArnOrIdentifier", "traits": { - "smithy.api#documentation": "

\n Specifies the identifier of the Engagement record to retrieve.\n

", + "smithy.api#documentation": "

Specifies the identifier of the Engagement record to retrieve.

", "smithy.api#required": {} } } @@ -6569,49 +6592,49 @@ "Id": { "target": "com.amazonaws.partnercentralselling#EngagementIdentifier", "traits": { - "smithy.api#documentation": "

\n The unique resource identifier of the engagement retrieved.\n

" + "smithy.api#documentation": "

The unique resource identifier of the engagement retrieved.

" } }, "Arn": { "target": "com.amazonaws.partnercentralselling#EngagementArn", "traits": { - "smithy.api#documentation": "

\n The Amazon Resource Name (ARN) of the engagement retrieved.\n

" + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the engagement retrieved.

" } }, "Title": { "target": "com.amazonaws.partnercentralselling#EngagementTitle", "traits": { - "smithy.api#documentation": "

\n The title of the engagement. It provides a brief, descriptive name for the engagement\n that is meaningful and easily recognizable.\n

" + "smithy.api#documentation": "

The title of the engagement. It provides a brief, descriptive name for the engagement\n that is meaningful and easily recognizable.

" } }, "Description": { "target": "com.amazonaws.partnercentralselling#EngagementDescription", "traits": { - "smithy.api#documentation": "

\n A more detailed description of the engagement. This provides additional context or\n information about the engagement's purpose or scope. \n

" + "smithy.api#documentation": "

A more detailed description of the engagement. This provides additional context or\n information about the engagement's purpose or scope.

" } }, "CreatedAt": { "target": "com.amazonaws.partnercentralselling#DateTime", "traits": { - "smithy.api#documentation": "

\n The date and time when the Engagement was created, presented in ISO 8601 format (UTC).\n For example: \"2023-05-01T20:37:46Z\". This timestamp helps track the lifecycle of the\n Engagement.\n

" + "smithy.api#documentation": "

The date and time when the Engagement was created, presented in ISO 8601 format (UTC).\n For example: \"2023-05-01T20:37:46Z\". This timestamp helps track the lifecycle of the\n Engagement.

" } }, "CreatedBy": { "target": "com.amazonaws.partnercentralselling#AwsAccount", "traits": { - "smithy.api#documentation": "

\n The AWS account ID of the user who originally created the engagement. This field helps\n in tracking the origin of the engagement.\n

" + "smithy.api#documentation": "

The AWS account ID of the user who originally created the engagement. This field helps\n in tracking the origin of the engagement.

" } }, "MemberCount": { "target": "smithy.api#Integer", "traits": { - "smithy.api#documentation": "

\n Specifies the current count of members participating in the Engagement. This count\n includes all active members regardless of their roles or permissions within the\n Engagement.\n

" + "smithy.api#documentation": "

Specifies the current count of members participating in the Engagement. This count\n includes all active members regardless of their roles or permissions within the\n Engagement.

" } }, "Contexts": { "target": "com.amazonaws.partnercentralselling#EngagementContexts", "traits": { - "smithy.api#documentation": "

\n A list of context objects associated with the engagement. Each context provides\n additional information related to the Engagement, such as customer projects or\n documents.\n

" + "smithy.api#documentation": "

A list of context objects associated with the engagement. Each context provides\n additional information related to the Engagement, such as customer projects or\n documents.

" } } }, @@ -6695,7 +6718,7 @@ "PrimaryNeedsFromAws": { "target": "com.amazonaws.partnercentralselling#PrimaryNeedsFromAws", "traits": { - "smithy.api#documentation": "

Identifies the type of support the partner needs from Amazon Web Services.

\n

Valid values:

\n
    \n
  • \n

    Cosell—Architectural Validation: Confirmation from Amazon Web Services that the\n partner's proposed solution architecture is aligned with Amazon Web Services best\n practices and poses minimal architectural risks.

    \n
  • \n
  • \n

    Cosell—Business Presentation: Request Amazon Web Services seller's\n participation in a joint customer presentation.

    \n
  • \n
  • \n

    Cosell—Competitive Information: Access to Amazon Web Services competitive\n resources and support for the partner's proposed solution.

    \n
  • \n
  • \n

    Cosell—Pricing Assistance: Connect with an Amazon Web Services seller for\n support situations where a partner may be receiving an upfront discount on a\n service (for example: EDP deals).

    \n
  • \n
  • \n

    Cosell—Technical Consultation: Connect with an Amazon Web Services Solutions\n Architect to address the partner's questions about the proposed solution.

    \n
  • \n
  • \n

    Cosell—Total Cost of Ownership Evaluation: Assistance with quoting different\n cost savings of proposed solutions on Amazon Web Services versus on-premises or a\n traditional hosting environment.

    \n
  • \n
  • \n

    Cosell—Deal Support: Request Amazon Web Services seller's support to progress\n the opportunity (for example: joint customer call, strategic\n positioning).

    \n
  • \n
  • \n

    Cosell—Support for Public Tender/RFx: Opportunity related to the public sector\n where the partner needs Amazon Web Services RFx support.

    \n
  • \n
  • \n

    Do Not Need Support from Amazon Web Services Sales Rep: Indicates that a\n partner doesn't need support from an Amazon Web Services sales representative,\n and the partner solely manages the opportunity. It's possible to request\n coselling support on these opportunities at any stage during their lifecycle.\n Also known as, for-visibility-only (FVO) opportunity.

    \n
  • \n
" + "smithy.api#documentation": "

Identifies the type of support the partner needs from Amazon Web Services.

\n

Valid values:

\n
    \n
  • \n

    Cosell—Architectural Validation: Confirmation from Amazon Web Services that the\n partner's proposed solution architecture is aligned with Amazon Web Services best\n practices and poses minimal architectural risks.

    \n
  • \n
  • \n

    Cosell—Business Presentation: Request Amazon Web Services seller's\n participation in a joint customer presentation.

    \n
  • \n
  • \n

    Cosell—Competitive Information: Access to Amazon Web Services competitive\n resources and support for the partner's proposed solution.

    \n
  • \n
  • \n

    Cosell—Pricing Assistance: Connect with an Amazon Web Services seller for\n support situations where a partner may be receiving an upfront discount on a\n service (for example: EDP deals).

    \n
  • \n
  • \n

    Cosell—Technical Consultation: Connect with an Amazon Web Services Solutions\n Architect to address the partner's questions about the proposed solution.

    \n
  • \n
  • \n

    Cosell—Total Cost of Ownership Evaluation: Assistance with quoting different\n cost savings of proposed solutions on Amazon Web Services versus on-premises or a\n traditional hosting environment.

    \n
  • \n
  • \n

    Cosell—Deal Support: Request Amazon Web Services seller's support to progress\n the opportunity (for example: joint customer call, strategic\n positioning).

    \n
  • \n
  • \n

    Cosell—Support for Public Tender/RFx: Opportunity related to the public sector\n where the partner needs Amazon Web Services RFx support.

    \n
  • \n
" } }, "NationalSecurity": { @@ -6861,7 +6884,7 @@ "aws.iam#iamAction": { "documentation": "Grants permission to retrieving resource snapshot job details in AWS Partner Central" }, - "smithy.api#documentation": "

\n Use this action to retrieves information about a specific resource snapshot\n job.\n

", + "smithy.api#documentation": "

Use this action to retrieves information about a specific resource snapshot\n job.

", "smithy.api#http": { "method": "POST", "uri": "/GetResourceSnapshotJob", @@ -6876,14 +6899,14 @@ "Catalog": { "target": "com.amazonaws.partnercentralselling#CatalogIdentifier", "traits": { - "smithy.api#documentation": "

\n Specifies the catalog related to the request. Valid values are:\n

\n
    \n
  • \n

    AWS: Retrieves the snapshot job from the production AWS environment.

    \n
  • \n
  • \n

    Sandbox: Retrieves the snapshot job from a sandbox environment used for\n testing or development purposes.

    \n
  • \n
", + "smithy.api#documentation": "

Specifies the catalog related to the request. Valid values are:

\n
    \n
  • \n

    AWS: Retrieves the snapshot job from the production AWS environment.

    \n
  • \n
  • \n

    Sandbox: Retrieves the snapshot job from a sandbox environment used for\n testing or development purposes.

    \n
  • \n
", "smithy.api#required": {} } }, "ResourceSnapshotJobIdentifier": { "target": "com.amazonaws.partnercentralselling#ResourceSnapshotJobIdentifier", "traits": { - "smithy.api#documentation": "

\n The unique identifier of the resource snapshot job to be retrieved. This identifier is\n crucial for pinpointing the specific job you want to query.\n

", + "smithy.api#documentation": "

The unique identifier of the resource snapshot job to be retrieved. This identifier is\n crucial for pinpointing the specific job you want to query.

", "smithy.api#required": {}, "smithy.api#resourceIdentifier": "Identifier" } @@ -6899,74 +6922,74 @@ "Catalog": { "target": "com.amazonaws.partnercentralselling#CatalogIdentifier", "traits": { - "smithy.api#documentation": "

\n The catalog in which the snapshot job was created. This will match the catalog\n specified in the request.\n

", + "smithy.api#documentation": "

The catalog in which the snapshot job was created. This will match the Catalog\n specified in the request.

", "smithy.api#required": {} } }, "Id": { "target": "com.amazonaws.partnercentralselling#ResourceSnapshotJobIdentifier", "traits": { - "smithy.api#documentation": "

\n The unique identifier of the snapshot job. This matches the\n ResourceSnapshotJobIdentifier provided in the request. \n

" + "smithy.api#documentation": "

The unique identifier of the snapshot job. This matches the\n ResourceSnapshotJobIdentifier provided in the request.

" } }, "Arn": { "target": "com.amazonaws.partnercentralselling#ResourceSnapshotJobArn", "traits": { - "smithy.api#documentation": "

\n he Amazon Resource Name (ARN) of the snapshot job. This globally unique identifier\n can be used for resource-specific operations across AWS services. \n

" + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the snapshot job. This globally unique identifier\n can be used for resource-specific operations across AWS services.

" } }, "EngagementId": { "target": "com.amazonaws.partnercentralselling#EngagementIdentifier", "traits": { - "smithy.api#documentation": "

\n The identifier of the engagement associated with this snapshot job. This links the job\n to a specific engagement context. \n

" + "smithy.api#documentation": "

The identifier of the engagement associated with this snapshot job. This links the job\n to a specific engagement context.

" } }, "ResourceType": { "target": "com.amazonaws.partnercentralselling#ResourceType", "traits": { - "smithy.api#documentation": "

\n The type of resource being snapshotted. This would have Opportunity as a value as it\n is dependent on the supported resource type. \n

" + "smithy.api#documentation": "

The type of resource being snapshotted. This would have \"Opportunity\" as a value as it\n is dependent on the supported resource type.

" } }, "ResourceId": { "target": "com.amazonaws.partnercentralselling#ResourceIdentifier", "traits": { - "smithy.api#documentation": "

\n The identifier of the specific resource being snapshotted. The format may vary\n depending on the ResourceType.\n

" + "smithy.api#documentation": "

The identifier of the specific resource being snapshotted. The format might vary\n depending on the ResourceType.

" } }, "ResourceArn": { "target": "com.amazonaws.partnercentralselling#ResourceArn", "traits": { - "smithy.api#documentation": "

\n The Amazon Resource Name (ARN) of the resource being snapshotted. This provides a\n globally unique identifier for the resource across AWS.\n

" + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the resource being snapshotted. This provides a\n globally unique identifier for the resource across AWS.

" } }, "ResourceSnapshotTemplateName": { "target": "com.amazonaws.partnercentralselling#ResourceTemplateName", "traits": { - "smithy.api#documentation": "

\n The name of the template used for creating the snapshot. This is the same as the\n template name. It defines the structure and content of the snapshot.\n

" + "smithy.api#documentation": "

The name of the template used for creating the snapshot. This is the same as the\n template name. It defines the structure and content of the snapshot.

" } }, "CreatedAt": { "target": "com.amazonaws.partnercentralselling#DateTime", "traits": { - "smithy.api#documentation": "

\n The date and time when the snapshot job was created, in ISO 8601 format (UTC).\n Example: \"2023-05-01T20:37:46Z\"\n

" + "smithy.api#documentation": "

The date and time when the snapshot job was created in ISO 8601 format (UTC).\n Example: \"2023-05-01T20:37:46Z\"

" } }, "Status": { "target": "com.amazonaws.partnercentralselling#ResourceSnapshotJobStatus", "traits": { - "smithy.api#documentation": "

\n The current status of the snapshot job. Valid values:\n

\n
    \n
  • \n

    STOPPED: The job is not currently running.

    \n
  • \n
  • \n

    RUNNING: The job is actively executing.

    \n
  • \n
" + "smithy.api#documentation": "

The current status of the snapshot job. Valid values:

\n
    \n
  • \n

    STOPPED: The job is not currently running.

    \n
  • \n
  • \n

    RUNNING: The job is actively executing.

    \n
  • \n
" } }, "LastSuccessfulExecutionDate": { "target": "com.amazonaws.partnercentralselling#DateTime", "traits": { - "smithy.api#documentation": "

\n The date and time of the last successful execution of the job, in ISO 8601 format\n (UTC). Example: \"2023-05-01T20:37:46Z\"\n

" + "smithy.api#documentation": "

The date and time of the last successful execution of the job, in ISO 8601 format\n (UTC). Example: \"2023-05-01T20:37:46Z\"

" } }, "LastFailure": { "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "

\n If the job has encountered any failures, this field contains the error message from\n the most recent failure. This can be useful for troubleshooting issues with the job.\n

" + "smithy.api#documentation": "

If the job has encountered any failures, this field contains the error message from\n the most recent failure. This can be useful for troubleshooting issues with the job.\n

" } } }, @@ -7036,7 +7059,7 @@ "Arn": { "target": "com.amazonaws.partnercentralselling#ResourceArn", "traits": { - "smithy.api#documentation": "

\n The Amazon Resource Name (ARN) of the snapshot. This globally unique identifier can be\n used for resource-specific operations across AWS services.\n

" + "smithy.api#documentation": "

The Amazon Resource Name (ARN) that uniquely identifies the resource snapshot.

" } }, "CreatedBy": { @@ -7060,25 +7083,25 @@ "ResourceType": { "target": "com.amazonaws.partnercentralselling#ResourceType", "traits": { - "smithy.api#documentation": "

\n The type of the resource that was snapshotted. Matches the ResourceType specified in\n the request.\n

" + "smithy.api#documentation": "

The type of the resource that was snapshotted. Matches the ResourceType specified in\n the request.

" } }, "ResourceId": { "target": "com.amazonaws.partnercentralselling#ResourceIdentifier", "traits": { - "smithy.api#documentation": "

\n The identifier of the specific resource that was snapshotted. Matches the\n ResourceIdentifier specified in the request.\n

" + "smithy.api#documentation": "

The identifier of the specific resource that was snapshotted. Matches the\n ResourceIdentifier specified in the request.

" } }, "ResourceSnapshotTemplateName": { "target": "com.amazonaws.partnercentralselling#ResourceTemplateName", "traits": { - "smithy.api#documentation": "

\n The name of the view used for this snapshot. This is the same as the template\n name.\n

" + "smithy.api#documentation": "

The name of the view used for this snapshot. This is the same as the template\n name.

" } }, "Revision": { "target": "com.amazonaws.partnercentralselling#ResourceSnapshotRevision", "traits": { - "smithy.api#documentation": "

\n The revision number of this snapshot. This is a positive integer that is sequential\n and unique within the context of a resource view.\n

" + "smithy.api#documentation": "

The revision number of this snapshot. This is a positive integer that is sequential\n and unique within the context of a resource view.

" } }, "Payload": { @@ -7118,7 +7141,7 @@ "aws.iam#iamAction": { "documentation": "Grants permission to retrieving system settings settings in AWS Partner Central" }, - "smithy.api#documentation": "

Retrieves the currently set system settings, which include the IAM Role used for resource snapshot jobs.

", + "smithy.api#documentation": "

Retrieves the currently set system settings, which include the IAM Role used for\n resource snapshot jobs.

", "smithy.api#http": { "method": "POST", "uri": "/GetSellingSystemSettings", @@ -7133,7 +7156,7 @@ "Catalog": { "target": "com.amazonaws.partnercentralselling#CatalogIdentifier", "traits": { - "smithy.api#documentation": "

Specifies the catalog in which the settings are defined. Acceptable values include\n AWS for production and Sandbox for testing\n environments.

", + "smithy.api#documentation": "

Specifies the catalog in which the settings are defined. Acceptable values include\n AWS for production and Sandbox for testing\n environments.

", "smithy.api#required": {} } } @@ -7148,7 +7171,7 @@ "Catalog": { "target": "com.amazonaws.partnercentralselling#CatalogIdentifier", "traits": { - "smithy.api#documentation": "

Specifies the catalog in which the settings are defined. Acceptable values include\n AWS for production and Sandbox for testing\n environments.

", + "smithy.api#documentation": "

Specifies the catalog in which the settings are defined. Acceptable values include\n AWS for production and Sandbox for testing\n environments.

", "smithy.api#required": {} } }, @@ -7355,7 +7378,7 @@ "Message": { "target": "com.amazonaws.partnercentralselling#InvitationMessage", "traits": { - "smithy.api#documentation": "

\n A message accompanying the invitation.\n

", + "smithy.api#documentation": "

A message accompanying the invitation.

", "smithy.api#required": {} } }, @@ -7373,7 +7396,7 @@ } }, "traits": { - "smithy.api#documentation": "

\n The Invitation structure represents an invitation exchanged between partners and AWS.\n It includes a message, receiver information, and a payload providing context for the\n invitation.\n

" + "smithy.api#documentation": "

The Invitation structure represents an invitation exchanged between partners and AWS.\n It includes a message, receiver information, and a payload providing context for the\n invitation.

" } }, "com.amazonaws.partnercentralselling#InvitationMessage": { @@ -7558,25 +7581,25 @@ "TargetCloseDate": { "target": "com.amazonaws.partnercentralselling#Date", "traits": { - "smithy.api#documentation": "

\n The projected launch date of the opportunity shared through a snapshot.\n

" + "smithy.api#documentation": "

The projected launch date of the opportunity shared through a snapshot.

" } }, "ReviewStatus": { "target": "com.amazonaws.partnercentralselling#ReviewStatus", "traits": { - "smithy.api#documentation": "

\n Defines the approval status of the opportunity shared through a snapshot.\n

" + "smithy.api#documentation": "

Defines the approval status of the opportunity shared through a snapshot.

" } }, "Stage": { "target": "com.amazonaws.partnercentralselling#Stage", "traits": { - "smithy.api#documentation": "

\n Defines the current stage of the opportunity shared through a snapshot.\n

" + "smithy.api#documentation": "

Defines the current stage of the opportunity shared through a snapshot.

" } }, "NextSteps": { "target": "com.amazonaws.partnercentralselling#PiiString", "traits": { - "smithy.api#documentation": "

\n Describes the next steps for the opportunity shared through a snapshot.\n

", + "smithy.api#documentation": "

Describes the next steps for the opportunity shared through a snapshot.

", "smithy.api#length": { "max": 255 } @@ -7584,7 +7607,7 @@ } }, "traits": { - "smithy.api#documentation": "

\n Provides the lifecycle view of an opportunity resource shared through a\n snapshot.\n

" + "smithy.api#documentation": "

Provides the lifecycle view of an opportunity resource shared through a snapshot.\n

" } }, "com.amazonaws.partnercentralselling#LifeCycleSummary": { @@ -7652,60 +7675,60 @@ "TaskId": { "target": "com.amazonaws.partnercentralselling#TaskIdentifier", "traits": { - "smithy.api#documentation": "

\n Unique identifier of the task.\n

" + "smithy.api#documentation": "

Unique identifier of the task.

" } }, "TaskArn": { "target": "com.amazonaws.partnercentralselling#TaskArn", "traits": { - "smithy.api#documentation": "

\n The Amazon Resource Name (ARN) that uniquely identifies the task.\n

" + "smithy.api#documentation": "

The Amazon Resource Name (ARN) that uniquely identifies the task.

" } }, "StartTime": { "target": "com.amazonaws.partnercentralselling#DateTime", "traits": { - "smithy.api#documentation": "

\n Task start timestamp.\n

" + "smithy.api#documentation": "

Task start timestamp.

" } }, "TaskStatus": { "target": "com.amazonaws.partnercentralselling#TaskStatus", "traits": { - "smithy.api#documentation": "

\n Status of the task.\n

" + "smithy.api#documentation": "

Status of the task.

" } }, "Message": { "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "

\n Detailed message describing the failure and possible recovery steps.\n

" + "smithy.api#documentation": "

Detailed message describing the failure and possible recovery steps.

" } }, "ReasonCode": { "target": "com.amazonaws.partnercentralselling#ReasonCode", "traits": { - "smithy.api#documentation": "

\n A code pointing to the specific reason for the failure.\n

" + "smithy.api#documentation": "

A code pointing to the specific reason for the failure.

" } }, "OpportunityId": { "target": "com.amazonaws.partnercentralselling#OpportunityIdentifier", "traits": { - "smithy.api#documentation": "

\n Unique identifier of opportunity that was created.\n

" + "smithy.api#documentation": "

Unique identifier of opportunity that was created.

" } }, "ResourceSnapshotJobId": { "target": "com.amazonaws.partnercentralselling#ResourceSnapshotJobIdentifier", "traits": { - "smithy.api#documentation": "

\n Unique identifier of the resource snapshot job that was created.\n

" + "smithy.api#documentation": "

Unique identifier of the resource snapshot job that was created.

" } }, "EngagementInvitationId": { "target": "com.amazonaws.partnercentralselling#EngagementInvitationIdentifier", "traits": { - "smithy.api#documentation": "

\n The unique identifier of the engagement invitation that was accepted.\n

" + "smithy.api#documentation": "

The unique identifier of the engagement invitation that was accepted.

" } } }, "traits": { - "smithy.api#documentation": "

\n Specifies a subset of fields associated with tasks related to accepting an engagement\n invitation.\n

" + "smithy.api#documentation": "

Specifies a subset of fields associated with tasks related to accepting an engagement\n invitation.

" } }, "com.amazonaws.partnercentralselling#ListEngagementByAcceptingInvitationTasks": { @@ -7737,7 +7760,7 @@ "aws.iam#iamAction": { "documentation": "Grants permission to listing engagements by accepting invitation tasks in AWS Partner\nCentral" }, - "smithy.api#documentation": "

\n Lists all in-progress, completed, or failed StartEngagementByAcceptingInvitationTask\n tasks that were initiated by the caller's account. \n

", + "smithy.api#documentation": "

Lists all in-progress, completed, or failed StartEngagementByAcceptingInvitationTask\n tasks that were initiated by the caller's account.

", "smithy.api#http": { "method": "POST", "uri": "/ListEngagementByAcceptingInvitationTasks", @@ -7758,7 +7781,7 @@ "MaxResults": { "target": "smithy.api#Integer", "traits": { - "smithy.api#documentation": "

\n Use this parameter to control the number of items returned in each request, which can\n be useful for performance tuning and managing large result sets.\n

", + "smithy.api#documentation": "

Use this parameter to control the number of items returned in each request, which can\n be useful for performance tuning and managing large result sets.

", "smithy.api#range": { "min": 1, "max": 1000 @@ -7768,7 +7791,7 @@ "NextToken": { "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "

\n Use this parameter for pagination when the result set spans multiple pages. This value\n is obtained from the NextToken field in the response of a previous call to this API.\n

", + "smithy.api#documentation": "

Use this parameter for pagination when the result set spans multiple pages. This\n value is obtained from the NextToken field in the response of a previous call to this\n API.

", "smithy.api#length": { "min": 1, "max": 2048 @@ -7778,38 +7801,38 @@ "Sort": { "target": "com.amazonaws.partnercentralselling#ListTasksSortBase", "traits": { - "smithy.api#documentation": "

\n Specifies the sorting criteria for the returned results. This allows you to order the\n tasks based on specific attributes.\n

" + "smithy.api#documentation": "

Specifies the sorting criteria for the returned results. This allows you to order the\n tasks based on specific attributes.

" } }, "Catalog": { "target": "com.amazonaws.partnercentralselling#CatalogIdentifier", "traits": { - "smithy.api#documentation": "

\n Specifies the catalog related to the request. Valid values are:\n

\n
    \n
  • \n

    AWS: Retrieves the request from the production AWS environment.

    \n
  • \n
  • \n

    Sandbox: Retrieves the request from a sandbox environment used for testing or\n development purposes.

    \n
  • \n
", + "smithy.api#documentation": "

Specifies the catalog related to the request. Valid values are:

\n
    \n
  • \n

    AWS: Retrieves the request from the production AWS environment.

    \n
  • \n
  • \n

    Sandbox: Retrieves the request from a sandbox environment used for testing or\n development purposes.

    \n
  • \n
", "smithy.api#required": {} } }, "TaskStatus": { "target": "com.amazonaws.partnercentralselling#TaskStatuses", "traits": { - "smithy.api#documentation": "

\n Filters the tasks based on their current status. This allows you to focus on tasks in\n specific states.\n

" + "smithy.api#documentation": "

Filters the tasks based on their current status. This allows you to focus on tasks in\n specific states.

" } }, "OpportunityIdentifier": { "target": "com.amazonaws.partnercentralselling#OpportunityIdentifiers", "traits": { - "smithy.api#documentation": "

\n Filters tasks by the identifiers of the opportunities they created or are associated\n with.\n

" + "smithy.api#documentation": "

Filters tasks by the identifiers of the opportunities they created or are associated\n with.

" } }, "EngagementInvitationIdentifier": { "target": "com.amazonaws.partnercentralselling#EngagementInvitationIdentifiers", "traits": { - "smithy.api#documentation": "

\n Filters tasks by the identifiers of the engagement invitations they are\n processing.\n

" + "smithy.api#documentation": "

Filters tasks by the identifiers of the engagement invitations they are processing.\n

" } }, "TaskIdentifier": { "target": "com.amazonaws.partnercentralselling#TaskIdentifiers", "traits": { - "smithy.api#documentation": "

\n Filters tasks by their unique identifiers. Use this when you want to retrieve\n information about specific tasks. \n

" + "smithy.api#documentation": "

Filters tasks by their unique identifiers. Use this when you want to retrieve\n information about specific tasks.

" } } }, @@ -7823,13 +7846,13 @@ "TaskSummaries": { "target": "com.amazonaws.partnercentralselling#ListEngagementByAcceptingInvitationTaskSummaries", "traits": { - "smithy.api#documentation": "

\n An array of EngagementByAcceptingInvitationTaskSummary objects, each representing a\n task that matches the specified filters. The array may be empty if no tasks match the\n criteria.\n

" + "smithy.api#documentation": "

An array of EngagementByAcceptingInvitationTaskSummary objects, each\n representing a task that matches the specified filters. The array may be empty if no\n tasks match the criteria.

" } }, "NextToken": { "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "

\n A token used for pagination to retrieve the next page of results.If there are more\n results available, this field will contain a token that can be used in a subsequent API\n call to retrieve the next page. If there are no more results, this field will be null or\n an empty string.\n

" + "smithy.api#documentation": "

A token used for pagination to retrieve the next page of results.If there are more\n results available, this field will contain a token that can be used in a subsequent API\n call to retrieve the next page. If there are no more results, this field will be null or\n an empty string.

" } } }, @@ -7849,66 +7872,66 @@ "TaskId": { "target": "com.amazonaws.partnercentralselling#TaskIdentifier", "traits": { - "smithy.api#documentation": "

\n A unique identifier for a specific task.\n

" + "smithy.api#documentation": "

A unique identifier for a specific task.

" } }, "TaskArn": { "target": "com.amazonaws.partnercentralselling#TaskArn", "traits": { - "smithy.api#documentation": "

\n The Amazon Resource Name (ARN) uniquely identifying this task within AWS. This ARN can\n be used for referencing the task in other AWS services or APIs.\n

" + "smithy.api#documentation": "

The Amazon Resource Name (ARN) uniquely identifying this task within AWS. This ARN\n can be used for referencing the task in other AWS services or APIs.

" } }, "StartTime": { "target": "com.amazonaws.partnercentralselling#DateTime", "traits": { - "smithy.api#documentation": "

\n The timestamp indicating when the task was initiated, in RFC 3339 5.6 date-time\n format.\n

" + "smithy.api#documentation": "

The timestamp indicating when the task was initiated, in RFC 3339 5.6 date-time\n format.

" } }, "TaskStatus": { "target": "com.amazonaws.partnercentralselling#TaskStatus", "traits": { - "smithy.api#documentation": "

\n The current status of the task.\n

" + "smithy.api#documentation": "

The current status of the task.

" } }, "Message": { "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "

\n A detailed message providing additional information about the task, especially useful\n in case of failures. This field may contain error details or other relevant information\n about the task's execution\n

" + "smithy.api#documentation": "

A detailed message providing additional information about the task, especially useful\n in case of failures. This field may contain error details or other relevant information\n about the task's execution

" } }, "ReasonCode": { "target": "com.amazonaws.partnercentralselling#ReasonCode", "traits": { - "smithy.api#documentation": "

\n A code indicating the specific reason for a task failure. This field is populated when\n the task status is FAILED and provides a categorized reason for the failure.\n

" + "smithy.api#documentation": "

A code indicating the specific reason for a task failure. This field is populated\n when the task status is FAILED and provides a categorized reason for the failure.\n

" } }, "OpportunityId": { "target": "com.amazonaws.partnercentralselling#OpportunityIdentifier", "traits": { - "smithy.api#documentation": "

\n The unique identifier of the original Opportunity from which the Engagement is being\n created. This field helps track the source of the Engagement creation task. \n

" + "smithy.api#documentation": "

The unique identifier of the original Opportunity from which the Engagement is being\n created. This field helps track the source of the Engagement creation task.

" } }, "ResourceSnapshotJobId": { "target": "com.amazonaws.partnercentralselling#ResourceSnapshotJobIdentifier", "traits": { - "smithy.api#documentation": "

\n The identifier of the resource snapshot job associated with this task, if a snapshot\n was created as part of the Engagement creation process.\n

" + "smithy.api#documentation": "

The identifier of the resource snapshot job associated with this task, if a snapshot\n was created as part of the Engagement creation process.

" } }, "EngagementId": { "target": "com.amazonaws.partnercentralselling#EngagementIdentifier", "traits": { - "smithy.api#documentation": "

\n The unique identifier of the engagement created as a result of the task. This field is\n populated when the task is completed successfully. \n

" + "smithy.api#documentation": "

The unique identifier of the engagement created as a result of the task. This field\n is populated when the task is completed successfully.

" } }, "EngagementInvitationId": { "target": "com.amazonaws.partnercentralselling#EngagementInvitationIdentifier", "traits": { - "smithy.api#documentation": "

\n The unique identifier of the engagement identifier created as a result of the task. This field is\n populated when the task is completed successfully.\n

" + "smithy.api#documentation": "

The unique identifier of the Engagement Invitation.

" } } }, "traits": { - "smithy.api#documentation": "

\n Provides a summary of a task related to creating an engagement from an opportunity.\n This structure contains key information about the task's status, associated identifiers,\n and any failure details.\n

" + "smithy.api#documentation": "

Provides a summary of a task related to creating an engagement from an opportunity.\n This structure contains key information about the task's status, associated identifiers,\n and any failure details.

" } }, "com.amazonaws.partnercentralselling#ListEngagementFromOpportunityTasks": { @@ -7940,7 +7963,7 @@ "aws.iam#iamAction": { "documentation": "Grants permission to listing engagements from opportunity tasks in AWS Partner Central" }, - "smithy.api#documentation": "

\n Lists all in-progress, completed, or failed EngagementFromOpportunity tasks that were\n initiated by the caller's account.\n

", + "smithy.api#documentation": "

Lists all in-progress, completed, or failed EngagementFromOpportunity\n tasks that were initiated by the caller's account.

", "smithy.api#http": { "method": "POST", "uri": "/ListEngagementFromOpportunityTasks", @@ -7961,7 +7984,7 @@ "MaxResults": { "target": "smithy.api#Integer", "traits": { - "smithy.api#documentation": "

\n Specifies the maximum number of results to return in a single page of the response.Use\n this parameter to control the number of items returned in each request, which can be\n useful for performance tuning and managing large result sets. \n

", + "smithy.api#documentation": "

Specifies the maximum number of results to return in a single page of the\n response.Use this parameter to control the number of items returned in each request,\n which can be useful for performance tuning and managing large result sets.

", "smithy.api#range": { "min": 1, "max": 1000 @@ -7971,7 +7994,7 @@ "NextToken": { "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "

\n The token for requesting the next page of results. This value is obtained from the\n NextToken field in the response of a previous call to this API. Use this parameter for\n pagination when the result set spans multiple pages.\n

", + "smithy.api#documentation": "

The token for requesting the next page of results. This value is obtained from the\n NextToken field in the response of a previous call to this API. Use this parameter for\n pagination when the result set spans multiple pages.

", "smithy.api#length": { "min": 1, "max": 2048 @@ -7981,38 +8004,38 @@ "Sort": { "target": "com.amazonaws.partnercentralselling#ListTasksSortBase", "traits": { - "smithy.api#documentation": "

\n Specifies the sorting criteria for the returned results. This allows you to order the\n tasks based on specific attributes. \n

" + "smithy.api#documentation": "

Specifies the sorting criteria for the returned results. This allows you to order the\n tasks based on specific attributes.

" } }, "Catalog": { "target": "com.amazonaws.partnercentralselling#CatalogIdentifier", "traits": { - "smithy.api#documentation": "

\n Specifies the catalog related to the request. Valid values are:\n

\n
    \n
  • \n

    AWS: Retrieves the request from the production AWS environment.

    \n
  • \n
  • \n

    Sandbox: Retrieves the request from a sandbox environment used for testing or\n development purposes.

    \n
  • \n
", + "smithy.api#documentation": "

Specifies the catalog related to the request. Valid values are:

\n
    \n
  • \n

    AWS: Retrieves the request from the production AWS environment.

    \n
  • \n
  • \n

    Sandbox: Retrieves the request from a sandbox environment used for testing or\n development purposes.

    \n
  • \n
", "smithy.api#required": {} } }, "TaskStatus": { "target": "com.amazonaws.partnercentralselling#TaskStatuses", "traits": { - "smithy.api#documentation": "

\n Filters the tasks based on their current status. This allows you to focus on tasks in\n specific states.\n

" + "smithy.api#documentation": "

Filters the tasks based on their current status. This allows you to focus on tasks in\n specific states.

" } }, "TaskIdentifier": { "target": "com.amazonaws.partnercentralselling#TaskIdentifiers", "traits": { - "smithy.api#documentation": "

\n Filters tasks by their unique identifiers. Use this when you want to retrieve\n information about specific tasks. \n

" + "smithy.api#documentation": "

Filters tasks by their unique identifiers. Use this when you want to retrieve\n information about specific tasks.

" } }, "OpportunityIdentifier": { "target": "com.amazonaws.partnercentralselling#OpportunityIdentifiers", "traits": { - "smithy.api#documentation": "

\n The identifier of the original opportunity associated with this task.\n

" + "smithy.api#documentation": "

The identifier of the original opportunity associated with this task.

" } }, "EngagementIdentifier": { "target": "com.amazonaws.partnercentralselling#EngagementIdentifiers", "traits": { - "smithy.api#documentation": "

\n Filters tasks by the identifiers of the engagements they created or are associated\n with.\n

" + "smithy.api#documentation": "

Filters tasks by the identifiers of the engagements they created or are associated\n with.

" } } }, @@ -8026,13 +8049,13 @@ "TaskSummaries": { "target": "com.amazonaws.partnercentralselling#ListEngagementFromOpportunityTaskSummaries", "traits": { - "smithy.api#documentation": "

\n TaskSummaries An array of TaskSummary objects containing details about each\n task.\n

" + "smithy.api#documentation": "

TaskSummaries An array of TaskSummary objects containing details about each task.\n

" } }, "NextToken": { "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "

\n A token used for pagination to retrieve the next page of results. If there are more\n results available, this field will contain a token that can be used in a subsequent API\n call to retrieve the next page. If there are no more results, this field will be null or\n an empty string.\n

" + "smithy.api#documentation": "

A token used for pagination to retrieve the next page of results. If there are more\n results available, this field will contain a token that can be used in a subsequent API\n call to retrieve the next page. If there are no more results, this field will be null or\n an empty string.

" } } }, @@ -8131,19 +8154,19 @@ "Status": { "target": "com.amazonaws.partnercentralselling#InvitationStatusList", "traits": { - "smithy.api#documentation": "

\n Status values to filter the invitations.\n

" + "smithy.api#documentation": "

Status values to filter the invitations.

" } }, "EngagementIdentifier": { "target": "com.amazonaws.partnercentralselling#EngagementIdentifiers", "traits": { - "smithy.api#documentation": "

\n Retrieves a list of engagement invitation summaries based on specified filters. The\n ListEngagementInvitations operation allows you to view all invitations that you have\n sent or received. You must specify the ParticipantType to filter invitations where you\n are either the SENDER or the RECEIVER. Invitations will automatically expire if not\n accepted within 15 days.\n

" + "smithy.api#documentation": "

Retrieves a list of engagement invitation summaries based on specified filters. The\n ListEngagementInvitations operation allows you to view all invitations that you have\n sent or received. You must specify the ParticipantType to filter invitations where you\n are either the SENDER or the RECEIVER. Invitations will automatically expire if not\n accepted within 15 days.

" } }, "SenderAwsAccountId": { "target": "com.amazonaws.partnercentralselling#AwsAccountIdOrAliasList", "traits": { - "smithy.api#documentation": "

\n List of sender AWS account IDs to filter the invitations.\n

" + "smithy.api#documentation": "

List of sender AWS account IDs to filter the invitations.

" } } }, @@ -8200,7 +8223,7 @@ "aws.iam#iamAction": { "documentation": "Grants permission to listing engagement members in AWS Partner Central" }, - "smithy.api#documentation": "

\n Retrieves the details of member partners in an engagement. This operation can only be\n invoked by members of the engagement. The ListEngagementMembers operation allows you to\n fetch information about the members of a specific engagement. This action is restricted\n to members of the engagement being queried.\n

", + "smithy.api#documentation": "

Retrieves the details of member partners in an Engagement. This operation can only be\n invoked by members of the Engagement. The ListEngagementMembers operation\n allows you to fetch information about the members of a specific Engagement. This action\n is restricted to members of the Engagement being queried.

", "smithy.api#http": { "method": "POST", "uri": "/ListEngagementMembers", @@ -8221,14 +8244,14 @@ "Catalog": { "target": "com.amazonaws.partnercentralselling#CatalogIdentifier", "traits": { - "smithy.api#documentation": "

\n The catalog related to the request. \n

", + "smithy.api#documentation": "

The catalog related to the request.

", "smithy.api#required": {} } }, "Identifier": { "target": "com.amazonaws.partnercentralselling#EngagementArnOrIdentifier", "traits": { - "smithy.api#documentation": "

\n Identifier of the engagement record to retrieve members from.\n

", + "smithy.api#documentation": "

Identifier of the Engagement record to retrieve members from.

", "smithy.api#required": {} } }, @@ -8236,13 +8259,13 @@ "target": "com.amazonaws.partnercentralselling#MemberPageSize", "traits": { "smithy.api#default": 5, - "smithy.api#documentation": "

\n The maximum number of results to return in a single call.\n

" + "smithy.api#documentation": "

The maximum number of results to return in a single call.

" } }, "NextToken": { "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "

\n The token for the next set of results.\n

" + "smithy.api#documentation": "

The token for the next set of results.

" } } }, @@ -8256,14 +8279,14 @@ "EngagementMemberList": { "target": "com.amazonaws.partnercentralselling#EngagementMembers", "traits": { - "smithy.api#documentation": "

\nProvides a list of engagement members.\n

", + "smithy.api#documentation": "

Provides a list of engagement members.

", "smithy.api#required": {} } }, "NextToken": { "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "

\n A pagination token used to retrieve the next set of results. If there are more results\n available than can be returned in a single response, this token will be present. Use\n this token in a subsequent request to retrieve the next page of results. If there are no\n more results, this value will be null.\n

" + "smithy.api#documentation": "

A pagination token used to retrieve the next set of results. If there are more results\n available than can be returned in a single response, this token will be present. Use\n this token in a subsequent request to retrieve the next page of results. If there are no\n more results, this value will be null.

" } } }, @@ -8300,7 +8323,7 @@ "aws.iam#iamAction": { "documentation": "Grants permission to listing engagement resource associations in AWS Partner Central" }, - "smithy.api#documentation": "

\n Lists the associations between resources and engagements where the caller is a member\n and has at least one snapshot in the engagement. \n

", + "smithy.api#documentation": "

Lists the associations between resources and engagements where the caller is a member\n and has at least one snapshot in the engagement.

", "smithy.api#http": { "method": "POST", "uri": "/ListEngagementResourceAssociations", @@ -8321,7 +8344,7 @@ "Catalog": { "target": "com.amazonaws.partnercentralselling#CatalogIdentifier", "traits": { - "smithy.api#documentation": "

\n Specifies the catalog in which to search for engagement-resource associations.\n

", + "smithy.api#documentation": "

Specifies the catalog in which to search for engagement-resource associations. Valid\n Values: \"AWS\" or \"Sandbox\"

\n
    \n
  • \n

    \n AWS for production environments.

    \n
  • \n
  • \n

    \n Sandbox for testing and development purposes.

    \n
  • \n
", "smithy.api#required": {} } }, @@ -8329,7 +8352,7 @@ "target": "com.amazonaws.partnercentralselling#PageSize", "traits": { "smithy.api#default": 100, - "smithy.api#documentation": "

\n Limits the number of results returned in a single call. Use this to control the number\n of results returned, especially useful for pagination.\n

", + "smithy.api#documentation": "

Limits the number of results returned in a single call. Use this to control the number\n of results returned, especially useful for pagination.

", "smithy.api#range": { "min": 1, "max": 1000 @@ -8339,31 +8362,31 @@ "NextToken": { "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "

\n A token used for pagination of results. Include this token in subsequent requests to\n retrieve the next set of results.\n

" + "smithy.api#documentation": "

A token used for pagination of results. Include this token in subsequent requests to\n retrieve the next set of results.

" } }, "EngagementIdentifier": { "target": "com.amazonaws.partnercentralselling#EngagementIdentifier", "traits": { - "smithy.api#documentation": "

\n Filters the results to include only associations related to the specified engagement.\n Use this when you want to find all resources associated with a specific engagement.\n

" + "smithy.api#documentation": "

Filters the results to include only associations related to the specified engagement.\n Use this when you want to find all resources associated with a specific\n engagement.

" } }, "ResourceType": { "target": "com.amazonaws.partnercentralselling#ResourceType", "traits": { - "smithy.api#documentation": "

\n Filters the results to include only associations with resources of the specified type.\n

" + "smithy.api#documentation": "

Filters the results to include only associations with resources of the specified\n type.

" } }, "ResourceIdentifier": { "target": "com.amazonaws.partnercentralselling#ResourceIdentifier", "traits": { - "smithy.api#documentation": "

\n Filters the results to include only associations with the specified resource. Varies\n depending on the resource type. Use this when you want to find all engagements\n associated with a specific resource.\n

" + "smithy.api#documentation": "

Filters the results to include only associations with the specified resource. Varies\n depending on the resource type. Use this when you want to find all engagements\n associated with a specific resource.

" } }, "CreatedBy": { "target": "com.amazonaws.partnercentralselling#AwsAccount", "traits": { - "smithy.api#documentation": "

\n Filters the results to include only associations with resources owned by the specified\n AWS account. Use this when you want to find associations related to resources owned by a\n particular account.\n

" + "smithy.api#documentation": "

Filters the response to include only snapshots of resources owned by the specified\n AWS account ID. Use this when you want to find associations related to resources owned\n by a particular account.

" } } }, @@ -8377,14 +8400,14 @@ "EngagementResourceAssociationSummaries": { "target": "com.amazonaws.partnercentralselling#EngagementResourceAssociationSummaryList", "traits": { - "smithy.api#documentation": "

\n A list of engagement-resource association summaries.\n

", + "smithy.api#documentation": "

A list of engagement-resource association summaries.

", "smithy.api#required": {} } }, "NextToken": { "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "

\n A token to retrieve the next set of results. Use this token in a subsequent request to\n retrieve additional results if the response was truncated.\n

" + "smithy.api#documentation": "

A token to retrieve the next set of results. Use this token in a subsequent request\n to retrieve additional results if the response was truncated.

" } } }, @@ -8421,7 +8444,7 @@ "aws.iam#iamAction": { "documentation": "Grants permission to listing engagements in AWS Partner Central" }, - "smithy.api#documentation": "

\n This action allows users to retrieve a list of engagement records from Partner\n Central. This action can be used to manage and track various engagements across\n different stages of the partner selling process.\n

", + "smithy.api#documentation": "

This action allows users to retrieve a list of Engagement records from Partner\n Central. This action can be used to manage and track various engagements across\n different stages of the partner selling process.

", "smithy.api#http": { "method": "POST", "uri": "/ListEngagements", @@ -8442,45 +8465,45 @@ "Catalog": { "target": "com.amazonaws.partnercentralselling#CatalogIdentifier", "traits": { - "smithy.api#documentation": "

\n Specifies the catalog related to the request.\n

", + "smithy.api#documentation": "

Specifies the catalog related to the request.

", "smithy.api#required": {} } }, "CreatedBy": { "target": "com.amazonaws.partnercentralselling#AwsAccountList", "traits": { - "smithy.api#documentation": "

\n A list of AWS account IDs. When specified, the response includes engagements created\n by these accounts. This filter is useful for finding engagements created by specific\n team members.\n

" + "smithy.api#documentation": "

A list of AWS account IDs. When specified, the response includes engagements created\n by these accounts. This filter is useful for finding engagements created by specific\n team members.

" } }, "ExcludeCreatedBy": { "target": "com.amazonaws.partnercentralselling#AwsAccountList", "traits": { - "smithy.api#documentation": "

\n An array of strings representing AWS Account IDs. Use this to exclude engagements\n created by specific users.\n

" + "smithy.api#documentation": "

An array of strings representing AWS Account IDs. Use this to exclude engagements\n created by specific users.

" } }, "Sort": { "target": "com.amazonaws.partnercentralselling#EngagementSort", "traits": { - "smithy.api#documentation": "

\n An object that specifies the sort order of the results.\n

" + "smithy.api#documentation": "

\n An object that specifies the sort order of the results.\n

" } }, "MaxResults": { "target": "com.amazonaws.partnercentralselling#EngagementPageSize", "traits": { "smithy.api#default": 20, - "smithy.api#documentation": "

\n The maximum number of results to return in a single call.\n

" + "smithy.api#documentation": "

The maximum number of results to return in a single call.

" } }, "NextToken": { "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "

\n The token for the next set of results. This value is returned from a previous\n call.\n

" + "smithy.api#documentation": "

The token for the next set of results. This value is returned from a previous\n call.

" } }, "EngagementIdentifier": { "target": "com.amazonaws.partnercentralselling#EngagementIdentifiers", "traits": { - "smithy.api#documentation": "

\n An array of strings representing engagement identifiers to retrieve.\n

" + "smithy.api#documentation": "

An array of strings representing engagement identifiers to retrieve.

" } } }, @@ -8494,14 +8517,14 @@ "EngagementSummaryList": { "target": "com.amazonaws.partnercentralselling#EngagementSummaryList", "traits": { - "smithy.api#documentation": "

\n An array of engagement summary objects.\n

", + "smithy.api#documentation": "

An array of engagement summary objects.

", "smithy.api#required": {} } }, "NextToken": { "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "

\n The token to retrieve the next set of results. This field will be null if there are no\n more results.\n

" + "smithy.api#documentation": "

The token to retrieve the next set of results. This field will be null if there are no\n more results.

" } } }, @@ -8564,6 +8587,10 @@ "failure": { "errorId": "com.amazonaws.partnercentralselling#ValidationException" } + }, + "vendorParamsShape": "aws.test#AwsVendorParams", + "vendorParams": { + "region": "us-east-1" } } ] @@ -8677,6 +8704,9 @@ { "target": "com.amazonaws.partnercentralselling#AccessDeniedException" }, + { + "target": "com.amazonaws.partnercentralselling#ResourceNotFoundException" + }, { "target": "com.amazonaws.partnercentralselling#ThrottlingException" }, @@ -8691,7 +8721,7 @@ "aws.iam#iamAction": { "documentation": "Grants permission to listing resource snapshot jobs in AWS Partner Central" }, - "smithy.api#documentation": "

\n Lists resource snapshot jobs owned by the customer. This operation supports various\n filtering scenarios, including listing all jobs owned by the caller, jobs for a specific\n engagement, jobs with a specific status, or any combination of these filters.\n

", + "smithy.api#documentation": "

Lists resource snapshot jobs owned by the customer. This operation supports various\n filtering scenarios, including listing all jobs owned by the caller, jobs for a specific\n engagement, jobs with a specific status, or any combination of these filters.

", "smithy.api#http": { "method": "POST", "uri": "/ListResourceSnapshotJobs", @@ -8712,7 +8742,7 @@ "Catalog": { "target": "com.amazonaws.partnercentralselling#CatalogIdentifier", "traits": { - "smithy.api#documentation": "

\n Specifies the catalog related to the request.\n

", + "smithy.api#documentation": "

Specifies the catalog related to the request.

", "smithy.api#required": {} } }, @@ -8720,7 +8750,7 @@ "target": "com.amazonaws.partnercentralselling#PageSize", "traits": { "smithy.api#default": 100, - "smithy.api#documentation": "

\n The maximum number of results to return in a single call. If omitted, defaults to\n 50.\n

", + "smithy.api#documentation": "

The maximum number of results to return in a single call. If omitted, defaults to 50.\n

", "smithy.api#range": { "min": 1, "max": 1000 @@ -8730,25 +8760,25 @@ "NextToken": { "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "

\n The token for the next set of results.\n

" + "smithy.api#documentation": "

The token for the next set of results.

" } }, "EngagementIdentifier": { "target": "com.amazonaws.partnercentralselling#EngagementIdentifier", "traits": { - "smithy.api#documentation": "

\n The identifier of the engagement to filter the response.\n

" + "smithy.api#documentation": "

The identifier of the engagement to filter the response.

" } }, "Status": { "target": "com.amazonaws.partnercentralselling#ResourceSnapshotJobStatus", "traits": { - "smithy.api#documentation": "

\n The status of the jobs to filter the response.\n

" + "smithy.api#documentation": "

The status of the jobs to filter the response.

" } }, "Sort": { "target": "com.amazonaws.partnercentralselling#SortObject", "traits": { - "smithy.api#documentation": "

\n Configures the sorting of the response. If omitted, results are sorted by CreatedDate\n in descending order.\n

" + "smithy.api#documentation": "

Configures the sorting of the response. If omitted, results are sorted by\n CreatedDate in descending order.

" } } }, @@ -8762,14 +8792,14 @@ "ResourceSnapshotJobSummaries": { "target": "com.amazonaws.partnercentralselling#ResourceSnapshotJobSummaryList", "traits": { - "smithy.api#documentation": "

\n An array of resource snapshot job summary objects.\n

", + "smithy.api#documentation": "

An array of resource snapshot job summary objects.

", "smithy.api#required": {} } }, "NextToken": { "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "

\n The token to retrieve the next set of results. If there are no additional results,\n this value is null. \n

" + "smithy.api#documentation": "

The token to retrieve the next set of results. If there are no additional results,\n this value is null.

" } } }, @@ -8806,7 +8836,7 @@ "aws.iam#iamAction": { "documentation": "Grants permission to listing resource snapshots in AWS Partner Central" }, - "smithy.api#documentation": "

\n Retrieves a list of resource view snapshots based on specified criteria. \n

", + "smithy.api#documentation": "

Retrieves a list of resource view snapshots based on specified criteria. This\n operation supports various use cases, including:

\n
    \n
  • \n

    Fetching all snapshots associated with an engagement.

    \n
  • \n
  • \n

    Retrieving snapshots of a specific resource type within an engagement.

    \n
  • \n
  • \n

    Obtaining snapshots for a particular resource using a specified\n template.

    \n
  • \n
  • \n

    Accessing the latest snapshot of a resource within an engagement.

    \n
  • \n
  • \n

    Filtering snapshots by resource owner.

    \n
  • \n
", "smithy.api#http": { "method": "POST", "uri": "/ListResourceSnapshots", @@ -8827,7 +8857,7 @@ "Catalog": { "target": "com.amazonaws.partnercentralselling#CatalogIdentifier", "traits": { - "smithy.api#documentation": "

\n Specifies the catalog related to the request.\n

", + "smithy.api#documentation": "

Specifies the catalog related to the request.

", "smithy.api#required": {} } }, @@ -8835,7 +8865,7 @@ "target": "com.amazonaws.partnercentralselling#PageSize", "traits": { "smithy.api#default": 100, - "smithy.api#documentation": "

\n The maximum number of results to return in a single call.\n

", + "smithy.api#documentation": "

The maximum number of results to return in a single call.

", "smithy.api#range": { "min": 1, "max": 1000 @@ -8845,38 +8875,38 @@ "NextToken": { "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "

\n The token for the next set of results.\n

" + "smithy.api#documentation": "

The token for the next set of results.

" } }, "EngagementIdentifier": { "target": "com.amazonaws.partnercentralselling#EngagementIdentifier", "traits": { - "smithy.api#documentation": "

\n The unique identifier of the engagement associated with the snapshots.\n

", + "smithy.api#documentation": "

The unique identifier of the engagement associated with the snapshots.

", "smithy.api#required": {} } }, "ResourceType": { "target": "com.amazonaws.partnercentralselling#ResourceType", "traits": { - "smithy.api#documentation": "

\n Filters the response to include only snapshots of the specified resource type.\n

" + "smithy.api#documentation": "

Filters the response to include only snapshots of the specified resource type.\n

" } }, "ResourceIdentifier": { "target": "com.amazonaws.partnercentralselling#ResourceIdentifier", "traits": { - "smithy.api#documentation": "

\n Filters the response to include only snapshots of the specified resource.\n

" + "smithy.api#documentation": "

Filters the response to include only snapshots of the specified resource.

" } }, "ResourceSnapshotTemplateIdentifier": { "target": "com.amazonaws.partnercentralselling#ResourceTemplateName", "traits": { - "smithy.api#documentation": "

\n Filters the response to include only snapshots created using the specified\n template.\n

" + "smithy.api#documentation": "

Filters the response to include only snapshots created using the specified\n template.

" } }, "CreatedBy": { "target": "com.amazonaws.partnercentralselling#AwsAccount", "traits": { - "smithy.api#documentation": "

\n Filters the response to include only snapshots of resources created by the specified AWS\n account.\n

" + "smithy.api#documentation": "

Filters the response to include only snapshots of resources owned by the specified\n AWS account.

" } } }, @@ -8890,14 +8920,14 @@ "ResourceSnapshotSummaries": { "target": "com.amazonaws.partnercentralselling#ResourceSnapshotSummaryList", "traits": { - "smithy.api#documentation": "

\n An array of resource snapshot summary objects.\n

", + "smithy.api#documentation": "

An array of resource snapshot summary objects.

", "smithy.api#required": {} } }, "NextToken": { "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "

\n The token to retrieve the next set of results. If there are no additional results,\n this value is null. \n

" + "smithy.api#documentation": "

The token to retrieve the next set of results. If there are no additional results,\n this value is null.

" } } }, @@ -9030,26 +9060,91 @@ "smithy.api#output": {} } }, + "com.amazonaws.partnercentralselling#ListTagsForResource": { + "type": "operation", + "input": { + "target": "com.amazonaws.partnercentralselling#ListTagsForResourceRequest" + }, + "output": { + "target": "com.amazonaws.partnercentralselling#ListTagsForResourceResponse" + }, + "errors": [ + { + "target": "com.amazonaws.partnercentralselling#AccessDeniedException" + }, + { + "target": "com.amazonaws.partnercentralselling#InternalServerException" + }, + { + "target": "com.amazonaws.partnercentralselling#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.partnercentralselling#ThrottlingException" + }, + { + "target": "com.amazonaws.partnercentralselling#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Returns a list of tags for a resource.

", + "smithy.api#http": { + "method": "POST", + "uri": "/ListTagsForResource", + "code": 200 + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.partnercentralselling#ListTagsForResourceRequest": { + "type": "structure", + "members": { + "ResourceArn": { + "target": "com.amazonaws.partnercentralselling#TaggableResourceArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the resource for which you want to retrieve\n tags.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.partnercentralselling#ListTagsForResourceResponse": { + "type": "structure", + "members": { + "Tags": { + "target": "com.amazonaws.partnercentralselling#TagList", + "traits": { + "smithy.api#documentation": "

A map of the key-value pairs for the tag or tags assigned to the specified resource.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.partnercentralselling#ListTasksSortBase": { "type": "structure", "members": { "SortOrder": { "target": "com.amazonaws.partnercentralselling#SortOrder", "traits": { - "smithy.api#documentation": "

\n Determines the order in which the sorted results are presented.\n

", + "smithy.api#documentation": "

Determines the order in which the sorted results are presented.

", "smithy.api#required": {} } }, "SortBy": { "target": "com.amazonaws.partnercentralselling#ListTasksSortName", "traits": { - "smithy.api#documentation": "

\n Specifies the field by which the task list should be sorted.\n

", + "smithy.api#documentation": "

Specifies the field by which the task list should be sorted.

", "smithy.api#required": {} } } }, "traits": { - "smithy.api#documentation": "

\n Defines the sorting parameters for listing tasks. This structure allows for specifying\n the field to sort by and the order of sorting.\n

" + "smithy.api#documentation": "

Defines the sorting parameters for listing tasks. This structure allows for\n specifying the field to sort by and the order of sorting.

" } }, "com.amazonaws.partnercentralselling#ListTasksSortName": { @@ -9443,7 +9538,7 @@ "Arn": { "target": "com.amazonaws.partnercentralselling#OpportunityArn", "traits": { - "smithy.api#documentation": "

\n The Amazon Resource Name (ARN) for the opportunity. This globally unique identifier\n can be used for IAM policies and cross-service references. \n

" + "smithy.api#documentation": "

The Amazon Resource Name (ARN) for the opportunity. This globally unique identifier\n can be used for IAM policies and cross-service references.

" } }, "PartnerOpportunityIdentifier": { @@ -9499,25 +9594,25 @@ "OpportunityType": { "target": "com.amazonaws.partnercentralselling#OpportunityType", "traits": { - "smithy.api#documentation": "

\n Specifies the opportunity type.\n

" + "smithy.api#documentation": "

Specifies the opportunity type.

" } }, "Lifecycle": { "target": "com.amazonaws.partnercentralselling#LifeCycleForView", "traits": { - "smithy.api#documentation": "

\n Contains information about the opportunity's lifecycle, including its current stage,\n status, and important dates such as creation and last modification times.\n

" + "smithy.api#documentation": "

Contains information about the opportunity's lifecycle, including its current stage,\n status, and important dates such as creation and last modification times.

" } }, "OpportunityTeam": { "target": "com.amazonaws.partnercentralselling#PartnerOpportunityTeamMembersList", "traits": { - "smithy.api#documentation": "

\n Represents the internal team handling the opportunity. Specify the members involved in\n collaborating on an opportunity within the partner's organization. \n

" + "smithy.api#documentation": "

Represents the internal team handling the opportunity. Specify the members involved\n in collaborating on an opportunity within the partner's organization.

" } }, "PrimaryNeedsFromAws": { "target": "com.amazonaws.partnercentralselling#PrimaryNeedsFromAws", "traits": { - "smithy.api#documentation": "

\n Identifies the type of support the partner needs from AWS.\n

" + "smithy.api#documentation": "

Identifies the type of support the partner needs from AWS.

" } }, "Customer": { @@ -9526,7 +9621,7 @@ "Project": { "target": "com.amazonaws.partnercentralselling#ProjectView", "traits": { - "smithy.api#documentation": "

\n Contains summary information about the project associated with the opportunity,\n including project name, description, timeline, and other relevant details.\n

" + "smithy.api#documentation": "

Contains summary information about the project associated with the opportunity,\n including project name, description, timeline, and other relevant details.

" } }, "RelatedEntityIdentifiers": { @@ -9534,7 +9629,7 @@ } }, "traits": { - "smithy.api#documentation": "

\n Provides a comprehensive view of an opportunity summary, including lifecycle\n information, team details, opportunity type, primary needs from AWS, and associated\n project information.\n

" + "smithy.api#documentation": "

Provides a comprehensive view of an opportunity summary, including lifecycle\n information, team details, opportunity type, primary needs from AWS, and associated\n project information.

" } }, "com.amazonaws.partnercentralselling#OpportunityType": { @@ -9888,31 +9983,31 @@ "DeliveryModels": { "target": "com.amazonaws.partnercentralselling#DeliveryModels", "traits": { - "smithy.api#documentation": "

\n Describes the deployment or consumption model for the partner solution or offering.\n This field indicates how the project's solution will be delivered or implemented for the\n customer.\n

" + "smithy.api#documentation": "

Describes the deployment or consumption model for the partner solution or offering.\n This field indicates how the project's solution will be delivered or implemented for the\n customer.

" } }, "ExpectedCustomerSpend": { "target": "com.amazonaws.partnercentralselling#ExpectedCustomerSpendList", "traits": { - "smithy.api#documentation": "

\n Provides information about the anticipated customer spend related to this project.\n This may include details such as amount, frequency, and currency of expected\n expenditure.\n

" + "smithy.api#documentation": "

Provides information about the anticipated customer spend related to this project.\n This may include details such as amount, frequency, and currency of expected\n expenditure.

" } }, "CustomerUseCase": { "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "

\n Specifies the proposed solution focus or type of workload for the project.\n

" + "smithy.api#documentation": "

Specifies the proposed solution focus or type of workload for the project.

" } }, "SalesActivities": { "target": "com.amazonaws.partnercentralselling#SalesActivities", "traits": { - "smithy.api#documentation": "

\n Lists the pre-sales activities that have occurred with the end-customer related to the\n opportunity. This field is conditionally mandatory when the project is qualified for\n Co-Sell and helps drive assignment priority on the AWS side. It provides insight into\n the engagement level with the customer. \n

" + "smithy.api#documentation": "

Lists the pre-sales activities that have occurred with the end-customer related to\n the opportunity. This field is conditionally mandatory when the project is qualified for\n Co-Sell and helps drive assignment priority on the AWS side. It provides insight into\n the engagement level with the customer.

" } }, "OtherSolutionDescription": { "target": "com.amazonaws.partnercentralselling#PiiString", "traits": { - "smithy.api#documentation": "

\n Offers a description of other solutions if the standard solutions do not adequately\n cover the project's scope.\n

", + "smithy.api#documentation": "

Offers a description of other solutions if the standard solutions do not adequately\n cover the project's scope.

", "smithy.api#length": { "max": 255 } @@ -9920,7 +10015,7 @@ } }, "traits": { - "smithy.api#documentation": "

\n Provides the project view of an opportunity resource shared through a snapshot.\n

" + "smithy.api#documentation": "

Provides the project view of an opportunity resource shared through a snapshot.\n

" } }, "com.amazonaws.partnercentralselling#PutSellingSystemSettings": { @@ -9949,7 +10044,7 @@ "aws.iam#iamAction": { "documentation": "Grants permission to put system settings settings in AWS Partner Central" }, - "smithy.api#documentation": "

Updates the currently set system settings, which include the IAM Role used for resource snapshot jobs.

", + "smithy.api#documentation": "

Updates the currently set system settings, which include the IAM Role used for\n resource snapshot jobs.

", "smithy.api#http": { "method": "POST", "uri": "/PutSellingSystemSettings", @@ -9964,7 +10059,7 @@ "Catalog": { "target": "com.amazonaws.partnercentralselling#CatalogIdentifier", "traits": { - "smithy.api#documentation": "

Specifies the catalog in which the settings will be updated. Acceptable values include\n AWS for production and Sandbox for testing\n environments.

", + "smithy.api#documentation": "

Specifies the catalog in which the settings will be updated. Acceptable values include\n AWS for production and Sandbox for testing\n environments.

", "smithy.api#required": {} } }, @@ -9985,7 +10080,7 @@ "Catalog": { "target": "com.amazonaws.partnercentralselling#CatalogIdentifier", "traits": { - "smithy.api#documentation": "

Specifies the catalog in which the settings are defined. Acceptable values include\n AWS for production and Sandbox for testing\n environments.

", + "smithy.api#documentation": "

Specifies the catalog in which the settings are defined. Acceptable values include\n AWS for production and Sandbox for testing\n environments.

", "smithy.api#required": {} } }, @@ -10069,6 +10164,12 @@ "smithy.api#enumValue": "EngagementInvitationConflict" } }, + "INTERNAL_ERROR": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "InternalError" + } + }, "OPPORTUNITY_VALIDATION_FAILED": { "target": "smithy.api#Unit", "traits": { @@ -10099,12 +10200,6 @@ "smithy.api#enumValue": "ResourceSnapshotConflict" } }, - "INTERNAL_ERROR": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "InternalError" - } - }, "SERVICE_QUOTA_EXCEEDED": { "target": "smithy.api#Unit", "traits": { @@ -10210,6 +10305,9 @@ { "target": "com.amazonaws.partnercentralselling#AccessDeniedException" }, + { + "target": "com.amazonaws.partnercentralselling#ConflictException" + }, { "target": "com.amazonaws.partnercentralselling#InternalServerException" }, @@ -10471,30 +10569,30 @@ "Id": { "target": "com.amazonaws.partnercentralselling#ResourceSnapshotJobIdentifier", "traits": { - "smithy.api#documentation": "

\n The unique identifier for the resource snapshot job within the AWS Partner Central\n system. This ID is used for direct references to the job within the service.\n

" + "smithy.api#documentation": "

The unique identifier for the resource snapshot job within the AWS Partner Central\n system. This ID is used for direct references to the job within the service.

" } }, "Arn": { "target": "com.amazonaws.partnercentralselling#ResourceSnapshotJobArn", "traits": { - "smithy.api#documentation": "

\n The Amazon Resource Name (ARN) for the resource snapshot job.\n

" + "smithy.api#documentation": "

The Amazon Resource Name (ARN) for the resource snapshot job.

" } }, "EngagementId": { "target": "com.amazonaws.partnercentralselling#EngagementIdentifier", "traits": { - "smithy.api#documentation": "

\n The unique identifier for the engagement within the AWS Partner Central\n system. This ID is used for direct references to the engagement within the service.\n

" + "smithy.api#documentation": "

The unique identifier of the Engagement.

" } }, "Status": { "target": "com.amazonaws.partnercentralselling#ResourceSnapshotJobStatus", "traits": { - "smithy.api#documentation": "

\n Represents the current status of the resource snapshot job.\n

" + "smithy.api#documentation": "

The current status of the snapshot job.

\n

Valid values:

\n
    \n
  • \n

    STOPPED: The job is not currently running.

    \n
  • \n
  • \n

    RUNNING: The job is actively executing.

    \n
  • \n
" } } }, "traits": { - "smithy.api#documentation": "

\n An object that contains a Resource Snapshot Job's subset of fields.\n

" + "smithy.api#documentation": "

An object that contains a Resource Snapshot Job's subset of fields.\n

" } }, "com.amazonaws.partnercentralselling#ResourceSnapshotJobSummaryList": { @@ -10509,12 +10607,12 @@ "OpportunitySummary": { "target": "com.amazonaws.partnercentralselling#OpportunitySummaryView", "traits": { - "smithy.api#documentation": "

\n An object that contains an opportunity's subset of fields. \n

" + "smithy.api#documentation": "

An object that contains an opportunity's subset of fields.

" } } }, "traits": { - "smithy.api#documentation": "

\n Represents the payload of a resource snapshot. This structure is designed to\n accommodate different types of resource snapshots, currently supporting opportunity\n summaries.\n

" + "smithy.api#documentation": "

Represents the payload of a resource snapshot. This structure is designed to\n accommodate different types of resource snapshots, currently supporting opportunity\n summaries.

" } }, "com.amazonaws.partnercentralselling#ResourceSnapshotRevision": { @@ -10531,13 +10629,13 @@ "Arn": { "target": "com.amazonaws.partnercentralselling#ResourceSnapshotArn", "traits": { - "smithy.api#documentation": "

\n The Amazon Resource Name (ARN) of the snapshot. This globally unique identifier can be\n used for cross-service references and in IAM policies.\n

" + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the snapshot. This globally unique identifier can\n be used for cross-service references and in IAM policies.

" } }, "Revision": { "target": "com.amazonaws.partnercentralselling#ResourceSnapshotRevision", "traits": { - "smithy.api#documentation": "

\n The revision number of the snapshot. This integer value is incremented each time the\n snapshot is updated, allowing for version tracking of the resource snapshot.\n

" + "smithy.api#documentation": "

The revision number of the snapshot. This integer value is incremented each time the\n snapshot is updated, allowing for version tracking of the resource snapshot.

" } }, "ResourceType": { @@ -10549,7 +10647,7 @@ "ResourceId": { "target": "com.amazonaws.partnercentralselling#ResourceIdentifier", "traits": { - "smithy.api#documentation": "

The identifier of the specific resource snapshotted. The format might vary depending\n on the ResourceType.

" + "smithy.api#documentation": "

The identifier of the specific resource snapshotted. The format might vary depending\n on the ResourceType.

" } }, "ResourceSnapshotTemplateName": { @@ -10561,12 +10659,12 @@ "CreatedBy": { "target": "com.amazonaws.partnercentralselling#AwsAccount", "traits": { - "smithy.api#documentation": "

The AWS account ID of the principal (user or role) who created the snapshot. This\n helps in tracking the origin of the snapshot.

" + "smithy.api#documentation": "

The AWS account ID of the entity that owns the resource from which the snapshot was\n created.

" } } }, "traits": { - "smithy.api#documentation": "

\n Provides a concise summary of a resource snapshot, including its unique identifier and\n version information. This structure is used to quickly reference and identify specific\n versions of resource snapshots. \n

" + "smithy.api#documentation": "

Provides a concise summary of a resource snapshot, including its unique identifier\n and version information. This structure is used to quickly reference and identify\n specific versions of resource snapshots.

" } }, "com.amazonaws.partnercentralselling#ResourceSnapshotSummaryList": { @@ -10913,7 +11011,7 @@ "Arn": { "target": "com.amazonaws.partnercentralselling#SolutionArn", "traits": { - "smithy.api#documentation": "

\n The SolutionBase structure provides essential information about a solution.\n

" + "smithy.api#documentation": "

The SolutionBase structure provides essential information about a solution.

" } }, "Name": { @@ -11064,18 +11162,18 @@ "SortBy": { "target": "com.amazonaws.partnercentralselling#SortBy", "traits": { - "smithy.api#documentation": "

\n Specifies the field by which to sort the resource snapshot jobs.\n

" + "smithy.api#documentation": "

Specifies the field by which to sort the resource snapshot jobs.

" } }, "SortOrder": { "target": "com.amazonaws.partnercentralselling#SortOrder", "traits": { - "smithy.api#documentation": "

\n Determines the order in which the sorted results are presented.\n

" + "smithy.api#documentation": "

Determines the order in which the sorted results are presented.

" } } }, "traits": { - "smithy.api#documentation": "

\n Defines the sorting parameters for listing resource snapshot jobs. This structure\n allows you to specify the field to sort by and the order of sorting. \n

" + "smithy.api#documentation": "

Defines the sorting parameters for listing resource snapshot jobs. This structure\n allows you to specify the field to sort by and the order of sorting.

" } }, "com.amazonaws.partnercentralselling#SortOrder": { @@ -11223,6 +11321,12 @@ "smithy.api#documentation": "

Specifies the unique identifier of the EngagementInvitation to be\n accepted. Providing the correct identifier helps ensure that the correct engagement is\n processed.

", "smithy.api#required": {} } + }, + "Tags": { + "target": "com.amazonaws.partnercentralselling#TagList", + "traits": { + "smithy.api#documentation": "A list of objects specifying each tag name and value." + } } }, "traits": { @@ -11277,7 +11381,7 @@ "ResourceSnapshotJobId": { "target": "com.amazonaws.partnercentralselling#ResourceSnapshotJobIdentifier", "traits": { - "smithy.api#documentation": "

\n The identifier of the resource snapshot job created as part of this task.\n

" + "smithy.api#documentation": "

The identifier of the Resource Snapshot Job created as part of this task.

" } }, "EngagementInvitationId": { @@ -11378,6 +11482,12 @@ "traits": { "smithy.api#required": {} } + }, + "Tags": { + "target": "com.amazonaws.partnercentralselling#TagList", + "traits": { + "smithy.api#documentation": "A list of objects specifying each tag name and value." + } } }, "traits": { @@ -11432,19 +11542,19 @@ "ResourceSnapshotJobId": { "target": "com.amazonaws.partnercentralselling#ResourceSnapshotJobIdentifier", "traits": { - "smithy.api#documentation": "

\n The identifier of the resource snapshot job created to add the opportunity resource\n snapshot to the Engagement. Only populated if TaskStatus is COMPLETE.\n

" + "smithy.api#documentation": "

The identifier of the resource snapshot job created to add the opportunity resource\n snapshot to the Engagement. Only populated if TaskStatus is COMPLETE

" } }, "EngagementId": { "target": "com.amazonaws.partnercentralselling#EngagementIdentifier", "traits": { - "smithy.api#documentation": "

\n The identifier of the newly created engagement. Only populated if TaskStatus is\n COMPLETE.\n

" + "smithy.api#documentation": "

The identifier of the newly created Engagement. Only populated if TaskStatus is\n COMPLETE.

" } }, "EngagementInvitationId": { "target": "com.amazonaws.partnercentralselling#EngagementInvitationIdentifier", "traits": { - "smithy.api#documentation": "

\n The identifier of the new engagement invitation. Only populated if TaskStatus is\n COMPLETE.\n

" + "smithy.api#documentation": "

The identifier of the new Engagement invitation. Only populated if TaskStatus is\n COMPLETE.

" } } }, @@ -11478,7 +11588,7 @@ "aws.iam#iamAction": { "documentation": "Grants permission to starting resource snapshot jobs in AWS Partner Central" }, - "smithy.api#documentation": "

\n Starts a resource snapshot job that has been previously created.\n

", + "smithy.api#documentation": "

Starts a resource snapshot job that has been previously created.

", "smithy.api#http": { "method": "POST", "uri": "/StartResourceSnapshotJob", @@ -11493,14 +11603,14 @@ "Catalog": { "target": "com.amazonaws.partnercentralselling#CatalogIdentifier", "traits": { - "smithy.api#documentation": "

\n Specifies the catalog related to the request.\n

", + "smithy.api#documentation": "

Specifies the catalog related to the request. Valid values are:

\n
    \n
  • \n

    AWS: Starts the request from the production AWS environment.

    \n
  • \n
  • \n

    Sandbox: Starts the request from a sandbox environment used for testing or\n development purposes.

    \n
  • \n
", "smithy.api#required": {} } }, "ResourceSnapshotJobIdentifier": { "target": "com.amazonaws.partnercentralselling#ResourceSnapshotJobIdentifier", "traits": { - "smithy.api#documentation": "

\n The identifier of the resource snapshot job to start.\n

", + "smithy.api#documentation": "

The identifier of the resource snapshot job to start.

", "smithy.api#required": {}, "smithy.api#resourceIdentifier": "Identifier" } @@ -11536,7 +11646,7 @@ "aws.iam#iamAction": { "documentation": "Grants permission to stopping resource snapshot jobs in AWS Partner Central" }, - "smithy.api#documentation": "

\n Stops a resource snapshot job. The job must be started prior to being stopped.\n

", + "smithy.api#documentation": "

Stops a resource snapshot job. The job must be started prior to being stopped.

", "smithy.api#http": { "method": "POST", "uri": "/StopResourceSnapshotJob", @@ -11551,14 +11661,14 @@ "Catalog": { "target": "com.amazonaws.partnercentralselling#CatalogIdentifier", "traits": { - "smithy.api#documentation": "

\n Specifies the catalog related to the request. \n

", + "smithy.api#documentation": "

Specifies the catalog related to the request. Valid values are:

\n
    \n
  • \n

    AWS: Stops the request from the production AWS environment.

    \n
  • \n
  • \n

    Sandbox: Stops the request from a sandbox environment used for testing or\n development purposes.

    \n
  • \n
", "smithy.api#required": {} } }, "ResourceSnapshotJobIdentifier": { "target": "com.amazonaws.partnercentralselling#ResourceSnapshotJobIdentifier", "traits": { - "smithy.api#documentation": "

\n The identifier of the job to stop.\n

", + "smithy.api#documentation": "

The identifier of the job to stop.

", "smithy.api#required": {}, "smithy.api#resourceIdentifier": "Identifier" } @@ -11606,7 +11716,7 @@ "aws.iam#iamAction": { "documentation": "Grants permission to submit Opportunities on AWS Partner Central" }, - "smithy.api#documentation": "

\n Use this action to submit an opportunity that was previously created by partner for\n AWS review. After you perform this action, the opportunity becomes non-editable until it\n is reviewed by AWS and has LifeCycle.ReviewStatus as either\n Approved or Action Required. \n

", + "smithy.api#documentation": "

Use this action to submit an Opportunity that was previously created by partner for\n AWS review. After you perform this action, the Opportunity becomes non-editable until it\n is reviewed by AWS and has LifeCycle.ReviewStatus as either\n Approved or Action Required.

", "smithy.api#http": { "method": "POST", "uri": "/SubmitOpportunity", @@ -11620,14 +11730,14 @@ "Catalog": { "target": "com.amazonaws.partnercentralselling#CatalogIdentifier", "traits": { - "smithy.api#documentation": "

\n Specifies the catalog related to the request. \n

", + "smithy.api#documentation": "

Specifies the catalog related to the request. Valid values are:

\n
    \n
  • \n

    AWS: Submits the opportunity request from the production AWS\n environment.

    \n
  • \n
  • \n

    Sandbox: Submits the opportunity request from a sandbox environment used for\n testing or development purposes.

    \n
  • \n
", "smithy.api#required": {} } }, "Identifier": { "target": "com.amazonaws.partnercentralselling#OpportunityIdentifier", "traits": { - "smithy.api#documentation": "

\n The identifier of the opportunity previously created by partner and needs to be\n submitted.\n

", + "smithy.api#documentation": "

The identifier of the Opportunity previously created by partner and needs to be\n submitted.

", "smithy.api#required": {}, "smithy.api#resourceIdentifier": "Identifier" } @@ -11635,14 +11745,133 @@ "InvolvementType": { "target": "com.amazonaws.partnercentralselling#SalesInvolvementType", "traits": { - "smithy.api#documentation": "

\n Specifies the level of AWS sellers' involvement on the opportunity. \n

", + "smithy.api#documentation": "

Specifies the level of AWS sellers' involvement on the opportunity. Valid\n values:

\n
    \n
  • \n

    \n Co-sell: Indicates the user wants to co-sell with AWS. Share the\n opportunity with AWS to receive deal assistance and support.

    \n
  • \n
  • \n

    \n For Visibility Only: Indicates that the user does not need\n support from AWS Sales Rep. Share this opportunity with AWS for visibility only,\n you will not receive deal assistance and support.

    \n
  • \n
", "smithy.api#required": {} } }, "Visibility": { "target": "com.amazonaws.partnercentralselling#Visibility", "traits": { - "smithy.api#documentation": "

\n Determines whether to restrict visibility of the opportunity from AWS sales. Default\n value is Full. \n

" + "smithy.api#documentation": "

Determines whether to restrict visibility of the opportunity from AWS sales. Default\n value is Full. Valid values:

\n
    \n
  • \n

    \n Full: The opportunity is fully visible to AWS sales.

    \n
  • \n
  • \n

    \n Limited: The opportunity has restricted visibility to AWS\n sales.

    \n
  • \n
" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.partnercentralselling#Tag": { + "type": "structure", + "members": { + "Key": { + "target": "com.amazonaws.partnercentralselling#TagKey", + "traits": { + "smithy.api#documentation": "

The key in the tag.

", + "smithy.api#required": {} + } + }, + "Value": { + "target": "com.amazonaws.partnercentralselling#TagValue", + "traits": { + "smithy.api#documentation": "

The value in the tag.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The key-value pair assigned to a specified resource.

" + } + }, + "com.amazonaws.partnercentralselling#TagKey": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 128 + }, + "smithy.api#pattern": "^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)$" + } + }, + "com.amazonaws.partnercentralselling#TagKeyList": { + "type": "list", + "member": { + "target": "com.amazonaws.partnercentralselling#TagKey" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 50 + } + } + }, + "com.amazonaws.partnercentralselling#TagList": { + "type": "list", + "member": { + "target": "com.amazonaws.partnercentralselling#Tag" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 200 + } + } + }, + "com.amazonaws.partnercentralselling#TagResource": { + "type": "operation", + "input": { + "target": "com.amazonaws.partnercentralselling#TagResourceRequest" + }, + "output": { + "target": "com.amazonaws.partnercentralselling#TagResourceResponse" + }, + "errors": [ + { + "target": "com.amazonaws.partnercentralselling#AccessDeniedException" + }, + { + "target": "com.amazonaws.partnercentralselling#ConflictException" + }, + { + "target": "com.amazonaws.partnercentralselling#InternalServerException" + }, + { + "target": "com.amazonaws.partnercentralselling#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.partnercentralselling#ThrottlingException" + }, + { + "target": "com.amazonaws.partnercentralselling#ValidationException" + } + ], + "traits": { + "aws.iam#iamAction": { + "documentation": "Allows users to add new tags to a resource. Supported resource: ResourceSnapshotJob" + }, + "smithy.api#documentation": "

Assigns one or more tags (key-value pairs) to the specified resource.

", + "smithy.api#http": { + "method": "POST", + "uri": "/TagResource", + "code": 200 + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.partnercentralselling#TagResourceRequest": { + "type": "structure", + "members": { + "ResourceArn": { + "target": "com.amazonaws.partnercentralselling#TaggableResourceArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the resource that you want to tag.

", + "smithy.api#required": {} + } + }, + "Tags": { + "target": "com.amazonaws.partnercentralselling#TagList", + "traits": { + "smithy.api#documentation": "

A map of the key-value pairs of the tag or tags to assign to the resource.

", + "smithy.api#required": {} } } }, @@ -11650,6 +11879,32 @@ "smithy.api#input": {} } }, + "com.amazonaws.partnercentralselling#TagResourceResponse": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.partnercentralselling#TagValue": { + "type": "string", + "traits": { + "smithy.api#length": { + "max": 256 + }, + "smithy.api#pattern": "^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)$" + } + }, + "com.amazonaws.partnercentralselling#TaggableResourceArn": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 1000 + }, + "smithy.api#pattern": "^arn:[\\w+=/,.@-]+:partnercentral:[\\w+=/,.@-]*:[0-9]{12}:catalog/([a-zA-Z]+)/[\\w+=,.@-]+(/[\\w+=,.@-]+)*$" + } + }, "com.amazonaws.partnercentralselling#TaskArn": { "type": "string", "traits": { @@ -11728,6 +11983,73 @@ "smithy.api#httpError": 429 } }, + "com.amazonaws.partnercentralselling#UntagResource": { + "type": "operation", + "input": { + "target": "com.amazonaws.partnercentralselling#UntagResourceRequest" + }, + "output": { + "target": "com.amazonaws.partnercentralselling#UntagResourceResponse" + }, + "errors": [ + { + "target": "com.amazonaws.partnercentralselling#AccessDeniedException" + }, + { + "target": "com.amazonaws.partnercentralselling#ConflictException" + }, + { + "target": "com.amazonaws.partnercentralselling#InternalServerException" + }, + { + "target": "com.amazonaws.partnercentralselling#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.partnercentralselling#ThrottlingException" + }, + { + "target": "com.amazonaws.partnercentralselling#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Removes a tag or tags from a resource.

", + "smithy.api#http": { + "method": "POST", + "uri": "/UntagResource", + "code": 200 + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.partnercentralselling#UntagResourceRequest": { + "type": "structure", + "members": { + "ResourceArn": { + "target": "com.amazonaws.partnercentralselling#TaggableResourceArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the resource that you want to untag.

", + "smithy.api#required": {} + } + }, + "TagKeys": { + "target": "com.amazonaws.partnercentralselling#TagKeyList", + "traits": { + "smithy.api#documentation": "

The keys of the key-value pairs for the tag or tags you want to remove from the\n specified resource.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.partnercentralselling#UntagResourceResponse": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.partnercentralselling#UpdateOpportunity": { "type": "operation", "input": { @@ -11784,7 +12106,7 @@ "PrimaryNeedsFromAws": { "target": "com.amazonaws.partnercentralselling#PrimaryNeedsFromAws", "traits": { - "smithy.api#documentation": "

Identifies the type of support the partner needs from Amazon Web Services.

\n

Valid values:

\n
    \n
  • \n

    Cosell—Architectural Validation: Confirmation from Amazon Web Services that the\n partner's proposed solution architecture is aligned with Amazon Web Services best\n practices and poses minimal architectural risks.

    \n
  • \n
  • \n

    Cosell—Business Presentation: Request Amazon Web Services seller's\n participation in a joint customer presentation.

    \n
  • \n
  • \n

    Cosell—Competitive Information: Access to Amazon Web Services competitive\n resources and support for the partner's proposed solution.

    \n
  • \n
  • \n

    Cosell—Pricing Assistance: Connect with an AWS seller for support situations\n where a partner may be receiving an upfront discount on a service (for example:\n EDP deals).

    \n
  • \n
  • \n

    Cosell—Technical Consultation: Connection with an Amazon Web Services Solutions\n Architect to address the partner's questions about the proposed solution.

    \n
  • \n
  • \n

    Cosell—Total Cost of Ownership Evaluation: Assistance with quoting different\n cost savings of proposed solutions on Amazon Web Services versus on-premises or a\n traditional hosting environment.

    \n
  • \n
  • \n

    Cosell—Deal Support: Request Amazon Web Services seller's support to progress\n the opportunity (for example: joint customer call, strategic\n positioning).

    \n
  • \n
  • \n

    Cosell—Support for Public Tender/RFx: Opportunity related to the public sector\n where the partner needs RFx support from Amazon Web Services.

    \n
  • \n
  • \n

    Do Not Need Support from AWS Sales Rep: Indicates that a partner doesn't need\n support from an Amazon Web Services Sales representative. The opportunity is\n managed solely by the partner. It's possible to request coselling support on\n these opportunities at any stage during their lifecycle. Also known as,\n for-visibility-only (FVO) opportunity.

    \n
  • \n
" + "smithy.api#documentation": "

Identifies the type of support the partner needs from Amazon Web Services.

\n

Valid values:

\n
    \n
  • \n

    Cosell—Architectural Validation: Confirmation from Amazon Web Services that the\n partner's proposed solution architecture is aligned with Amazon Web Services best\n practices and poses minimal architectural risks.

    \n
  • \n
  • \n

    Cosell—Business Presentation: Request Amazon Web Services seller's\n participation in a joint customer presentation.

    \n
  • \n
  • \n

    Cosell—Competitive Information: Access to Amazon Web Services competitive\n resources and support for the partner's proposed solution.

    \n
  • \n
  • \n

    Cosell—Pricing Assistance: Connect with an AWS seller for support situations\n where a partner may be receiving an upfront discount on a service (for example:\n EDP deals).

    \n
  • \n
  • \n

    Cosell—Technical Consultation: Connection with an Amazon Web Services Solutions\n Architect to address the partner's questions about the proposed solution.

    \n
  • \n
  • \n

    Cosell—Total Cost of Ownership Evaluation: Assistance with quoting different\n cost savings of proposed solutions on Amazon Web Services versus on-premises or a\n traditional hosting environment.

    \n
  • \n
  • \n

    Cosell—Deal Support: Request Amazon Web Services seller's support to progress\n the opportunity (for example: joint customer call, strategic\n positioning).

    \n
  • \n
  • \n

    Cosell—Support for Public Tender/RFx: Opportunity related to the public sector\n where the partner needs RFx support from Amazon Web Services.

    \n
  • \n
" } }, "NationalSecurity": { diff --git a/models/qconnect.json b/models/qconnect.json index 5341c7344e..193ad79626 100644 --- a/models/qconnect.json +++ b/models/qconnect.json @@ -1299,6 +1299,12 @@ "traits": { "smithy.api#documentation": "

The association configurations for overriding behavior on this AI Agent.

" } + }, + "locale": { + "target": "com.amazonaws.qconnect#NonEmptyString", + "traits": { + "smithy.api#documentation": "

The locale to which specifies the language and region settings that determine the response\n language for QueryAssistant.

\n \n

Changing this locale to anything other than en_US will turn off\n recommendations triggered by contact transcripts for agent assistance, as this feature is\n not supported in multiple languages.

\n
" + } } }, "traits": { @@ -7805,7 +7811,7 @@ "type": { "target": "com.amazonaws.qconnect#GuardrailPiiEntityType", "traits": { - "smithy.api#documentation": "

Configure AI Guardrail type when the PII entity is detected.

\n

The following PIIs are used to block or mask sensitive information:

\n
    \n
  • \n

    \n General\n

    \n
      \n
    • \n

      \n ADDRESS\n

      \n

      A physical address, such as \"100 Main Street, Anytown, USA\" or \"Suite #12,\n Building 123\". An address can include information such as the street, building,\n location, city, state, country, county, zip code, precinct, and neighborhood.

      \n
    • \n
    • \n

      \n AGE\n

      \n

      An individual's age, including the quantity and unit of time. For example, in the\n phrase \"I am 40 years old,\" Guarrails recognizes \"40 years\" as an age.

      \n
    • \n
    • \n

      \n NAME\n

      \n

      An individual's name. This entity type does not include titles, such as Dr., Mr.,\n Mrs., or Miss. AI Guardrail doesn't apply this entity type to names that are part of\n organizations or addresses. For example, AI Guardrail recognizes the \"John Doe\n Organization\" as an organization, and it recognizes \"Jane Doe Street\" as an address.\n

      \n
    • \n
    • \n

      \n EMAIL\n

      \n

      An email address, such as marymajor@email.com.

      \n
    • \n
    • \n

      \n PHONE\n

      \n

      A phone number. This entity type also includes fax and pager numbers.

      \n
    • \n
    • \n

      \n USERNAME\n

      \n

      A user name that identifies an account, such as a login name, screen name, nick\n name, or handle.

      \n
    • \n
    • \n

      \n PASSWORD\n

      \n

      An alphanumeric string that is used as a password, such as \"*\n very20special#pass*\".

      \n
    • \n
    • \n

      \n DRIVER_ID\n

      \n

      The number assigned to a driver's license, which is an official document\n permitting an individual to operate one or more motorized vehicles on a public road. A\n driver's license number consists of alphanumeric characters.

      \n
    • \n
    • \n

      \n LICENSE_PLATE\n

      \n

      A license plate for a vehicle is issued by the state or country where the vehicle\n is registered. The format for passenger vehicles is typically five to eight digits,\n consisting of upper-case letters and numbers. The format varies depending on the\n location of the issuing state or country.

      \n
    • \n
    • \n

      \n VEHICLE_IDENTIFICATION_NUMBER\n

      \n

      A Vehicle Identification Number (VIN) uniquely identifies a vehicle. VIN content\n and format are defined in the ISO 3779 specification. Each\n country has specific codes and formats for VINs.

      \n
    • \n
    \n
  • \n
  • \n

    \n Finance\n

    \n
      \n
    • \n

      \n REDIT_DEBIT_CARD_CVV\n

      \n

      A three-digit card verification code (CVV) that is present on VISA, MasterCard,\n and Discover credit and debit cards. For American Express credit or debit cards, the\n CVV is a four-digit numeric code.

      \n
    • \n
    • \n

      \n CREDIT_DEBIT_CARD_EXPIRY\n

      \n

      The expiration date for a credit or debit card. This number is usually four digits\n long and is often formatted as month/year or\n MM/YY. AI Guardrail recognizes expiration dates such as\n 01/21, 01/2021, and Jan\n 2021.

      \n
    • \n
    • \n

      \n CREDIT_DEBIT_CARD_NUMBER\n

      \n

      The number for a credit or debit card. These numbers can vary from 13 to 16 digits\n in length. However, Amazon Comprehend also recognizes credit or debit card numbers\n when only the last four digits are present.

      \n
    • \n
    • \n

      \n PIN\n

      \n

      A four-digit personal identification number (PIN) with which you can access your\n bank account.

      \n
    • \n
    • \n

      \n INTERNATIONAL_BANK_ACCOUNT_NUMBER\n

      \n

      An International Bank Account Number has specific formats in each country. For\n more information, see \n www.iban.com/structure.

      \n
    • \n
    • \n

      \n SWIFT_CODE\n

      \n

      A SWIFT code is a standard format of Bank Identifier Code (BIC) used to specify a\n particular bank or branch. Banks use these codes for money transfers such as\n international wire transfers.

      \n

      SWIFT codes consist of eight or 11 characters. The 11-digit codes refer to\n specific branches, while eight-digit codes (or 11-digit codes ending in 'XXX') refer\n to the head or primary office.

      \n
    • \n
    \n
  • \n
  • \n

    \n IT\n

    \n
      \n
    • \n

      \n IP_ADDRESS\n

      \n

      An IPv4 address, such as 198.51.100.0.

      \n
    • \n
    • \n

      \n MAC_ADDRESS\n

      \n

      A media access control (MAC) address is a unique identifier\n assigned to a network interface controller (NIC).

      \n
    • \n
    • \n

      \n URL\n

      \n

      A web address, such as www.example.com.

      \n
    • \n
    • \n

      \n AWS_ACCESS_KEY\n

      \n

      A unique identifier that's associated with a secret access key; you use the access\n key ID and secret access key to sign programmatic Amazon Web Services requests\n cryptographically.

      \n
    • \n
    • \n

      \n AWS_SECRET_KEY\n

      \n

      A unique identifier that's associated with an access key. You use the access key\n ID and secret access key to sign programmatic Amazon Web Services requests\n cryptographically.

      \n
    • \n
    \n
  • \n
  • \n

    \n USA specific\n

    \n
      \n
    • \n

      \n US_BANK_ACCOUNT_NUMBER\n

      \n

      A US bank account number, which is typically 10 to 12 digits long.

      \n
    • \n
    • \n

      \n US_BANK_ROUTING_NUMBER\n

      \n

      A US bank account routing number. These are typically nine digits long,

      \n
    • \n
    • \n

      \n US_INDIVIDUAL_TAX_IDENTIFICATION_NUMBER\n

      \n

      A US Individual Taxpayer Identification Number (ITIN) is a nine-digit number that\n starts with a \"9\" and contain a \"7\" or \"8\" as the fourth digit. An ITIN can be\n formatted with a space or a dash after the third and forth digits.

      \n
    • \n
    • \n

      \n US_PASSPORT_NUMBER\n

      \n

      A US passport number. Passport numbers range from six to nine alphanumeric\n characters.

      \n
    • \n
    • \n

      \n US_SOCIAL_SECURITY_NUMBER\n

      \n

      A US Social Security Number (SSN) is a nine-digit number that is issued to US\n citizens, permanent residents, and temporary working residents.

      \n
    • \n
    \n
  • \n
  • \n

    \n Canada specific\n

    \n
      \n
    • \n

      \n CA_HEALTH_NUMBER\n

      \n

      A Canadian Health Service Number is a 10-digit unique identifier, required for\n individuals to access healthcare benefits.

      \n
    • \n
    • \n

      \n CA_SOCIAL_INSURANCE_NUMBER\n

      \n

      A Canadian Social Insurance Number (SIN) is a nine-digit unique identifier,\n required for individuals to access government programs and benefits.

      \n

      The SIN is formatted as three groups of three digits, such as \n 123-456-789. A SIN can be validated through a simple check-digit process\n called the Luhn\n algorithm .

      \n
    • \n
    \n
  • \n
  • \n

    \n UK Specific\n

    \n
      \n
    • \n

      \n UK_NATIONAL_HEALTH_SERVICE_NUMBER\n

      \n

      A UK National Health Service Number is a 10-17 digit number, such as 485\n 555 3456. The current system formats the 10-digit number with spaces\n after the third and sixth digits. The final digit is an error-detecting\n checksum.

      \n
    • \n
    • \n

      \n UK_NATIONAL_INSURANCE_NUMBER\n

      \n

      A UK National Insurance Number (NINO) provides individuals with access to National\n Insurance (social security) benefits. It is also used for some purposes in the UK tax\n system.

      \n

      The number is nine digits long and starts with two letters, followed by six\n numbers and one letter. A NINO can be formatted with a space or a dash after the two\n letters and after the second, forth, and sixth digits.

      \n
    • \n
    • \n

      \n UK_UNIQUE_TAXPAYER_REFERENCE_NUMBER\n

      \n

      A UK Unique Taxpayer Reference (UTR) is a 10-digit number that identifies a\n taxpayer or a business.

      \n
    • \n
    \n
  • \n
  • \n

    \n Custom\n

    \n
      \n
    • \n

      \n Regex filter - You can use a regular expressions to\n define patterns for an AI Guardrail to recognize and act upon such as serial number,\n booking ID etc..

      \n
    • \n
    \n
  • \n
", + "smithy.api#documentation": "

Configure AI Guardrail type when the PII entity is detected.

\n

The following PIIs are used to block or mask sensitive information:

\n
    \n
  • \n

    \n General\n

    \n
      \n
    • \n

      \n ADDRESS\n

      \n

      A physical address, such as \"100 Main Street, Anytown, USA\" or \"Suite #12,\n Building 123\". An address can include information such as the street, building,\n location, city, state, country, county, zip code, precinct, and neighborhood.

      \n
    • \n
    • \n

      \n AGE\n

      \n

      An individual's age, including the quantity and unit of time. For example, in the\n phrase \"I am 40 years old,\" Guarrails recognizes \"40 years\" as an age.

      \n
    • \n
    • \n

      \n NAME\n

      \n

      An individual's name. This entity type does not include titles, such as Dr., Mr.,\n Mrs., or Miss. AI Guardrail doesn't apply this entity type to names that are part of\n organizations or addresses. For example, AI Guardrail recognizes the \"John Doe\n Organization\" as an organization, and it recognizes \"Jane Doe Street\" as an address.\n

      \n
    • \n
    • \n

      \n EMAIL\n

      \n

      An email address, such as marymajor@email.com.

      \n
    • \n
    • \n

      \n PHONE\n

      \n

      A phone number. This entity type also includes fax and pager numbers.

      \n
    • \n
    • \n

      \n USERNAME\n

      \n

      A user name that identifies an account, such as a login name, screen name, nick\n name, or handle.

      \n
    • \n
    • \n

      \n PASSWORD\n

      \n

      An alphanumeric string that is used as a password, such as \"*\n very20special#pass*\".

      \n
    • \n
    • \n

      \n DRIVER_ID\n

      \n

      The number assigned to a driver's license, which is an official document\n permitting an individual to operate one or more motorized vehicles on a public road. A\n driver's license number consists of alphanumeric characters.

      \n
    • \n
    • \n

      \n LICENSE_PLATE\n

      \n

      A license plate for a vehicle is issued by the state or country where the vehicle\n is registered. The format for passenger vehicles is typically five to eight digits,\n consisting of upper-case letters and numbers. The format varies depending on the\n location of the issuing state or country.

      \n
    • \n
    • \n

      \n VEHICLE_IDENTIFICATION_NUMBER\n

      \n

      A Vehicle Identification Number (VIN) uniquely identifies a vehicle. VIN content\n and format are defined in the ISO 3779 specification. Each\n country has specific codes and formats for VINs.

      \n
    • \n
    \n
  • \n
  • \n

    \n Finance\n

    \n
      \n
    • \n

      \n CREDIT_DEBIT_CARD_CVV\n

      \n

      A three-digit card verification code (CVV) that is present on VISA, MasterCard,\n and Discover credit and debit cards. For American Express credit or debit cards, the\n CVV is a four-digit numeric code.

      \n
    • \n
    • \n

      \n CREDIT_DEBIT_CARD_EXPIRY\n

      \n

      The expiration date for a credit or debit card. This number is usually four digits\n long and is often formatted as month/year or\n MM/YY. AI Guardrail recognizes expiration dates such as\n 01/21, 01/2021, and Jan\n 2021.

      \n
    • \n
    • \n

      \n CREDIT_DEBIT_CARD_NUMBER\n

      \n

      The number for a credit or debit card. These numbers can vary from 13 to 16 digits\n in length. However, Amazon Comprehend also recognizes credit or debit card numbers\n when only the last four digits are present.

      \n
    • \n
    • \n

      \n PIN\n

      \n

      A four-digit personal identification number (PIN) with which you can access your\n bank account.

      \n
    • \n
    • \n

      \n INTERNATIONAL_BANK_ACCOUNT_NUMBER\n

      \n

      An International Bank Account Number has specific formats in each country. For\n more information, see \n www.iban.com/structure.

      \n
    • \n
    • \n

      \n SWIFT_CODE\n

      \n

      A SWIFT code is a standard format of Bank Identifier Code (BIC) used to specify a\n particular bank or branch. Banks use these codes for money transfers such as\n international wire transfers.

      \n

      SWIFT codes consist of eight or 11 characters. The 11-digit codes refer to\n specific branches, while eight-digit codes (or 11-digit codes ending in 'XXX') refer\n to the head or primary office.

      \n
    • \n
    \n
  • \n
  • \n

    \n IT\n

    \n
      \n
    • \n

      \n IP_ADDRESS\n

      \n

      An IPv4 address, such as 198.51.100.0.

      \n
    • \n
    • \n

      \n MAC_ADDRESS\n

      \n

      A media access control (MAC) address is a unique identifier\n assigned to a network interface controller (NIC).

      \n
    • \n
    • \n

      \n URL\n

      \n

      A web address, such as www.example.com.

      \n
    • \n
    • \n

      \n AWS_ACCESS_KEY\n

      \n

      A unique identifier that's associated with a secret access key; you use the access\n key ID and secret access key to sign programmatic Amazon Web Services requests\n cryptographically.

      \n
    • \n
    • \n

      \n AWS_SECRET_KEY\n

      \n

      A unique identifier that's associated with an access key. You use the access key\n ID and secret access key to sign programmatic Amazon Web Services requests\n cryptographically.

      \n
    • \n
    \n
  • \n
  • \n

    \n USA specific\n

    \n
      \n
    • \n

      \n US_BANK_ACCOUNT_NUMBER\n

      \n

      A US bank account number, which is typically 10 to 12 digits long.

      \n
    • \n
    • \n

      \n US_BANK_ROUTING_NUMBER\n

      \n

      A US bank account routing number. These are typically nine digits long,

      \n
    • \n
    • \n

      \n US_INDIVIDUAL_TAX_IDENTIFICATION_NUMBER\n

      \n

      A US Individual Taxpayer Identification Number (ITIN) is a nine-digit number that\n starts with a \"9\" and contain a \"7\" or \"8\" as the fourth digit. An ITIN can be\n formatted with a space or a dash after the third and forth digits.

      \n
    • \n
    • \n

      \n US_PASSPORT_NUMBER\n

      \n

      A US passport number. Passport numbers range from six to nine alphanumeric\n characters.

      \n
    • \n
    • \n

      \n US_SOCIAL_SECURITY_NUMBER\n

      \n

      A US Social Security Number (SSN) is a nine-digit number that is issued to US\n citizens, permanent residents, and temporary working residents.

      \n
    • \n
    \n
  • \n
  • \n

    \n Canada specific\n

    \n
      \n
    • \n

      \n CA_HEALTH_NUMBER\n

      \n

      A Canadian Health Service Number is a 10-digit unique identifier, required for\n individuals to access healthcare benefits.

      \n
    • \n
    • \n

      \n CA_SOCIAL_INSURANCE_NUMBER\n

      \n

      A Canadian Social Insurance Number (SIN) is a nine-digit unique identifier,\n required for individuals to access government programs and benefits.

      \n

      The SIN is formatted as three groups of three digits, such as \n 123-456-789. A SIN can be validated through a simple check-digit process\n called the Luhn\n algorithm .

      \n
    • \n
    \n
  • \n
  • \n

    \n UK Specific\n

    \n
      \n
    • \n

      \n UK_NATIONAL_HEALTH_SERVICE_NUMBER\n

      \n

      A UK National Health Service Number is a 10-17 digit number, such as 485\n 555 3456. The current system formats the 10-digit number with spaces\n after the third and sixth digits. The final digit is an error-detecting\n checksum.

      \n
    • \n
    • \n

      \n UK_NATIONAL_INSURANCE_NUMBER\n

      \n

      A UK National Insurance Number (NINO) provides individuals with access to National\n Insurance (social security) benefits. It is also used for some purposes in the UK tax\n system.

      \n

      The number is nine digits long and starts with two letters, followed by six\n numbers and one letter. A NINO can be formatted with a space or a dash after the two\n letters and after the second, forth, and sixth digits.

      \n
    • \n
    • \n

      \n UK_UNIQUE_TAXPAYER_REFERENCE_NUMBER\n

      \n

      A UK Unique Taxpayer Reference (UTR) is a 10-digit number that identifies a\n taxpayer or a business.

      \n
    • \n
    \n
  • \n
  • \n

    \n Custom\n

    \n
      \n
    • \n

      \n Regex filter - You can use a regular expressions to\n define patterns for an AI Guardrail to recognize and act upon such as serial number,\n booking ID etc..

      \n
    • \n
    \n
  • \n
", "smithy.api#required": {} } }, @@ -10442,6 +10448,12 @@ "traits": { "smithy.api#documentation": "

The association configurations for overriding behavior on this AI Agent.

" } + }, + "locale": { + "target": "com.amazonaws.qconnect#NonEmptyString", + "traits": { + "smithy.api#documentation": "

The locale to which specifies the language and region settings that determine the response\n language for QueryAssistant.

" + } } }, "traits": { diff --git a/models/quicksight.json b/models/quicksight.json index c949cbe8c3..f8962fea7d 100644 --- a/models/quicksight.json +++ b/models/quicksight.json @@ -9141,6 +9141,12 @@ "traits": { "smithy.api#documentation": "

When you create the dataset, Amazon QuickSight adds the dataset to these folders.

" } + }, + "PerformanceConfiguration": { + "target": "com.amazonaws.quicksight#PerformanceConfiguration", + "traits": { + "smithy.api#documentation": "

The configuration for the performance optimization of the dataset that contains a UniqueKey configuration.

" + } } }, "traits": { @@ -13418,6 +13424,12 @@ "traits": { "smithy.api#documentation": "

The parameters that are declared in a dataset.

" } + }, + "PerformanceConfiguration": { + "target": "com.amazonaws.quicksight#PerformanceConfiguration", + "traits": { + "smithy.api#documentation": "

The performance optimization configuration of a dataset.

" + } } }, "traits": { @@ -24169,6 +24181,23 @@ "smithy.api#documentation": "

The configuration of destination parameter values.

\n

This is a union type structure. For this structure to be valid, only one of the attributes can be defined.

" } }, + "com.amazonaws.quicksight#DigitGroupingStyle": { + "type": "enum", + "members": { + "DEFAULT": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DEFAULT" + } + }, + "LAKHS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "LAKHS" + } + } + } + }, "com.amazonaws.quicksight#DimensionField": { "type": "structure", "members": { @@ -39020,6 +39049,18 @@ "traits": { "smithy.api#enumValue": "TRILLIONS" } + }, + "LAKHS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "LAKHS" + } + }, + "CRORES": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "CRORES" + } } } }, @@ -40442,6 +40483,20 @@ } } }, + "com.amazonaws.quicksight#PerformanceConfiguration": { + "type": "structure", + "members": { + "UniqueKeys": { + "target": "com.amazonaws.quicksight#UniqueKeyList", + "traits": { + "smithy.api#documentation": "

A UniqueKey configuration.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The configuration for the performance optimization of the dataset that contains a UniqueKey configuration.

" + } + }, "com.amazonaws.quicksight#PeriodOverPeriodComputation": { "type": "structure", "members": { @@ -51602,7 +51657,7 @@ "traits": { "smithy.api#length": { "min": 0, - "max": 100 + "max": 201 } } }, @@ -51945,11 +52000,23 @@ } } }, + "com.amazonaws.quicksight#TableUnaggregatedFieldList": { + "type": "list", + "member": { + "target": "com.amazonaws.quicksight#UnaggregatedField" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 201 + } + } + }, "com.amazonaws.quicksight#TableUnaggregatedFieldWells": { "type": "structure", "members": { "Values": { - "target": "com.amazonaws.quicksight#UnaggregatedFieldList", + "target": "com.amazonaws.quicksight#TableUnaggregatedFieldList", "traits": { "smithy.api#documentation": "

The values field well for a pivot table. Values are unaggregated for an unaggregated table.

" } @@ -53174,6 +53241,12 @@ "traits": { "smithy.api#documentation": "

Determines the visibility of the thousands separator.

" } + }, + "GroupingStyle": { + "target": "com.amazonaws.quicksight#DigitGroupingStyle", + "traits": { + "smithy.api#documentation": "

Determines the way numbers are styled to accommodate different readability standards. The DEFAULT value uses the standard international grouping system and groups numbers by the thousands. The LAKHS value uses the Indian numbering system and groups numbers by lakhs and crores.

" + } } }, "traits": { @@ -56274,6 +56347,45 @@ "smithy.api#pattern": "^[^\\u0000-\\u00FF]$" } }, + "com.amazonaws.quicksight#UniqueKey": { + "type": "structure", + "members": { + "ColumnNames": { + "target": "com.amazonaws.quicksight#UniqueKeyColumnNameList", + "traits": { + "smithy.api#documentation": "

The name of the column that is referenced in the UniqueKey configuration.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

A UniqueKey configuration that references a dataset column.

" + } + }, + "com.amazonaws.quicksight#UniqueKeyColumnNameList": { + "type": "list", + "member": { + "target": "com.amazonaws.quicksight#ColumnName" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 1 + } + } + }, + "com.amazonaws.quicksight#UniqueKeyList": { + "type": "list", + "member": { + "target": "com.amazonaws.quicksight#UniqueKey" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 1 + } + } + }, "com.amazonaws.quicksight#UniqueValuesComputation": { "type": "structure", "members": { @@ -58187,6 +58299,12 @@ "traits": { "smithy.api#documentation": "

The parameter declarations of the dataset.

" } + }, + "PerformanceConfiguration": { + "target": "com.amazonaws.quicksight#PerformanceConfiguration", + "traits": { + "smithy.api#documentation": "

The configuration for the performance optimization of the dataset that contains a UniqueKey configuration.

" + } } }, "traits": { diff --git a/models/rds.json b/models/rds.json index 7906ba4e51..0924f5cd5b 100644 --- a/models/rds.json +++ b/models/rds.json @@ -2906,13 +2906,13 @@ "EnableLogTypes": { "target": "com.amazonaws.rds#LogTypeList", "traits": { - "smithy.api#documentation": "

The list of log types to enable.

" + "smithy.api#documentation": "

The list of log types to enable.

\n

The following values are valid for each DB engine:

\n
    \n
  • \n

    Aurora MySQL - audit | error | general | slowquery\n

    \n
  • \n
  • \n

    Aurora PostgreSQL - postgresql\n

    \n
  • \n
  • \n

    RDS for MySQL - error | general | slowquery\n

    \n
  • \n
  • \n

    RDS for PostgreSQL - postgresql | upgrade\n

    \n
  • \n
" } }, "DisableLogTypes": { "target": "com.amazonaws.rds#LogTypeList", "traits": { - "smithy.api#documentation": "

The list of log types to disable.

" + "smithy.api#documentation": "

The list of log types to disable.

\n

The following values are valid for each DB engine:

\n
    \n
  • \n

    Aurora MySQL - audit | error | general | slowquery\n

    \n
  • \n
  • \n

    Aurora PostgreSQL - postgresql\n

    \n
  • \n
  • \n

    RDS for MySQL - error | general | slowquery\n

    \n
  • \n
  • \n

    RDS for PostgreSQL - postgresql | upgrade\n

    \n
  • \n
" } } }, @@ -4402,7 +4402,7 @@ "EnableHttpEndpoint": { "target": "com.amazonaws.rds#BooleanOptional", "traits": { - "smithy.api#documentation": "

Specifies whether to enable the HTTP endpoint for the DB cluster. By default, the HTTP endpoint \n isn't enabled.

\n

When enabled, the HTTP endpoint provides a connectionless web service API (RDS Data API) for running\n SQL queries on the DB cluster. You can also query your database\n from inside the RDS console with the RDS query editor.

\n

RDS Data API is supported with the following DB clusters:

\n
    \n
  • \n

    Aurora PostgreSQL Serverless v2 and provisioned

    \n
  • \n
  • \n

    Aurora PostgreSQL and Aurora MySQL Serverless v1

    \n
  • \n
\n

For more information, see Using RDS Data API in the \n Amazon Aurora User Guide.

\n

Valid for Cluster Type: Aurora DB clusters only

" + "smithy.api#documentation": "

Specifies whether to enable the HTTP endpoint for the DB cluster. By default, the HTTP endpoint \n isn't enabled.

\n

When enabled, the HTTP endpoint provides a connectionless web service API (RDS Data API) for running\n SQL queries on the DB cluster. You can also query your database\n from inside the RDS console with the RDS query editor.

\n

For more information, see Using RDS Data API in the \n Amazon Aurora User Guide.

\n

Valid for Cluster Type: Aurora DB clusters only

" } }, "CopyTagsToSnapshot": { @@ -4462,43 +4462,43 @@ "AutoMinorVersionUpgrade": { "target": "com.amazonaws.rds#BooleanOptional", "traits": { - "smithy.api#documentation": "

Specifies whether minor engine upgrades are applied automatically to the DB cluster during the maintenance window. \n By default, minor engine upgrades are applied automatically.

\n

Valid for Cluster Type: Multi-AZ DB clusters only

" + "smithy.api#documentation": "

Specifies whether minor engine upgrades are applied automatically to the DB cluster during the maintenance window. \n By default, minor engine upgrades are applied automatically.

\n

Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB cluster

" } }, "MonitoringInterval": { "target": "com.amazonaws.rds#IntegerOptional", "traits": { - "smithy.api#documentation": "

The interval, in seconds, between points when Enhanced Monitoring metrics are collected for the DB cluster. To turn off \n collecting Enhanced Monitoring metrics, specify 0.

\n

If MonitoringRoleArn is specified, also set MonitoringInterval\n to a value other than 0.

\n

Valid for Cluster Type: Multi-AZ DB clusters only

\n

Valid Values: 0 | 1 | 5 | 10 | 15 | 30 | 60\n

\n

Default: 0\n

" + "smithy.api#documentation": "

The interval, in seconds, between points when Enhanced Monitoring metrics are collected for the DB cluster. To turn off \n collecting Enhanced Monitoring metrics, specify 0.

\n

If MonitoringRoleArn is specified, also set MonitoringInterval\n to a value other than 0.

\n

Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters

\n

Valid Values: 0 | 1 | 5 | 10 | 15 | 30 | 60\n

\n

Default: 0\n

" } }, "MonitoringRoleArn": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) for the IAM role that permits RDS to send Enhanced Monitoring metrics to Amazon CloudWatch Logs. \n An example is arn:aws:iam:123456789012:role/emaccess. For information on creating a monitoring role,\n see Setting \n up and enabling Enhanced Monitoring in the Amazon RDS User Guide.

\n

If MonitoringInterval is set to a value other than 0, supply a MonitoringRoleArn value.

\n

Valid for Cluster Type: Multi-AZ DB clusters only

" + "smithy.api#documentation": "

The Amazon Resource Name (ARN) for the IAM role that permits RDS to send Enhanced Monitoring metrics to Amazon CloudWatch Logs. \n An example is arn:aws:iam:123456789012:role/emaccess. For information on creating a monitoring role,\n see Setting \n up and enabling Enhanced Monitoring in the Amazon RDS User Guide.

\n

If MonitoringInterval is set to a value other than 0, supply a MonitoringRoleArn value.

\n

Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters

" } }, "DatabaseInsightsMode": { "target": "com.amazonaws.rds#DatabaseInsightsMode", "traits": { - "smithy.api#documentation": "

Specifies the mode of Database Insights to enable for the cluster.

" + "smithy.api#documentation": "

The mode of Database Insights to enable for the DB cluster.

\n

If you set this value to advanced, you must also set the PerformanceInsightsEnabled\n parameter to true and the PerformanceInsightsRetentionPeriod parameter to 465.

\n

Valid for Cluster Type: Aurora DB clusters only

" } }, "EnablePerformanceInsights": { "target": "com.amazonaws.rds#BooleanOptional", "traits": { - "smithy.api#documentation": "

Specifies whether to turn on Performance Insights for the DB cluster.

\n

For more information, see \n Using Amazon Performance Insights in the Amazon RDS User Guide.

\n

Valid for Cluster Type: Multi-AZ DB clusters only

" + "smithy.api#documentation": "

Specifies whether to turn on Performance Insights for the DB cluster.

\n

For more information, see \n Using Amazon Performance Insights in the Amazon RDS User Guide.

\n

Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters

" } }, "PerformanceInsightsKMSKeyId": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

The Amazon Web Services KMS key identifier for encryption of Performance Insights data.

\n

The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key.

\n

If you don't specify a value for PerformanceInsightsKMSKeyId, then Amazon RDS \n uses your default KMS key. There is a default KMS key for your Amazon Web Services account. \n Your Amazon Web Services account has a different default KMS key for each Amazon Web Services Region.

\n

Valid for Cluster Type: Multi-AZ DB clusters only

" + "smithy.api#documentation": "

The Amazon Web Services KMS key identifier for encryption of Performance Insights data.

\n

The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key.

\n

If you don't specify a value for PerformanceInsightsKMSKeyId, then Amazon RDS \n uses your default KMS key. There is a default KMS key for your Amazon Web Services account. \n Your Amazon Web Services account has a different default KMS key for each Amazon Web Services Region.

\n

Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters

" } }, "PerformanceInsightsRetentionPeriod": { "target": "com.amazonaws.rds#IntegerOptional", "traits": { - "smithy.api#documentation": "

The number of days to retain Performance Insights data.

\n

Valid for Cluster Type: Multi-AZ DB clusters only

\n

Valid Values:

\n
    \n
  • \n

    \n 7\n

    \n
  • \n
  • \n

    \n month * 31, where month is a number of months from 1-23. \n Examples: 93 (3 months * 31), 341 (11 months * 31), 589 (19 months * 31)

    \n
  • \n
  • \n

    \n 731\n

    \n
  • \n
\n

Default: 7 days

\n

If you specify a retention period that isn't valid, such as 94, Amazon RDS issues an error.

" + "smithy.api#documentation": "

The number of days to retain Performance Insights data.

\n

Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters

\n

Valid Values:

\n
    \n
  • \n

    \n 7\n

    \n
  • \n
  • \n

    \n month * 31, where month is a number of months from 1-23. \n Examples: 93 (3 months * 31), 341 (11 months * 31), 589 (19 months * 31)

    \n
  • \n
  • \n

    \n 731\n

    \n
  • \n
\n

Default: 7 days

\n

If you specify a retention period that isn't valid, such as 94, Amazon RDS issues an error.

" } }, "EnableLimitlessDatabase": { @@ -5232,7 +5232,7 @@ "DatabaseInsightsMode": { "target": "com.amazonaws.rds#DatabaseInsightsMode", "traits": { - "smithy.api#documentation": "

Specifies the mode of Database Insights to enable for the instance.

" + "smithy.api#documentation": "

The mode of Database Insights to enable for the DB instance.

\n

This setting only applies to Amazon Aurora DB instances.

\n \n

Currently, this value is inherited from the DB cluster and can't be changed.

\n
" } }, "EnablePerformanceInsights": { @@ -5591,7 +5591,7 @@ "DatabaseInsightsMode": { "target": "com.amazonaws.rds#DatabaseInsightsMode", "traits": { - "smithy.api#documentation": "

Specifies the mode of Database Insights.

" + "smithy.api#documentation": "

The mode of Database Insights to enable for the read replica.

\n \n

Currently, this setting is not supported.

\n
" } }, "EnablePerformanceInsights": { @@ -5711,7 +5711,7 @@ "AllocatedStorage": { "target": "com.amazonaws.rds#IntegerOptional", "traits": { - "smithy.api#documentation": "

The amount of storage (in gibibytes) to allocate initially for the read replica.\n Follow the allocation rules specified in CreateDBInstance.

\n \n

Be sure to allocate enough storage for your read replica so that the create operation can succeed.\n You can also allocate additional storage for future growth.

\n
" + "smithy.api#documentation": "

The amount of storage (in gibibytes) to allocate initially for the read replica.\n Follow the allocation rules specified in CreateDBInstance.

\n

This setting isn't valid for RDS for SQL Server.

\n \n

Be sure to allocate enough storage for your read replica so that the create operation can succeed.\n You can also allocate additional storage for future growth.

\n
" } }, "SourceDBClusterIdentifier": { @@ -7528,43 +7528,43 @@ "AutoMinorVersionUpgrade": { "target": "com.amazonaws.rds#Boolean", "traits": { - "smithy.api#documentation": "

Indicates whether minor version patches are applied automatically.

\n

This setting is only for non-Aurora Multi-AZ DB clusters.

" + "smithy.api#documentation": "

Indicates whether minor version patches are applied automatically.

\n

This setting is for Aurora DB clusters and Multi-AZ DB clusters.

" } }, "MonitoringInterval": { "target": "com.amazonaws.rds#IntegerOptional", "traits": { - "smithy.api#documentation": "

The interval, in seconds, between points when Enhanced Monitoring metrics are collected for the DB cluster.

\n

This setting is only for non-Aurora Multi-AZ DB clusters.

" + "smithy.api#documentation": "

The interval, in seconds, between points when Enhanced Monitoring metrics are collected for the DB cluster.

\n

This setting is only for -Aurora DB clusters and Multi-AZ DB clusters.

" } }, "MonitoringRoleArn": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

The ARN for the IAM role that permits RDS to send Enhanced Monitoring metrics to Amazon CloudWatch Logs.

\n

This setting is only for non-Aurora Multi-AZ DB clusters.

" + "smithy.api#documentation": "

The ARN for the IAM role that permits RDS to send Enhanced Monitoring metrics to Amazon CloudWatch Logs.

\n

This setting is only for Aurora DB clusters and Multi-AZ DB clusters.

" } }, "DatabaseInsightsMode": { "target": "com.amazonaws.rds#DatabaseInsightsMode", "traits": { - "smithy.api#documentation": "

The mode of Database Insights that is enabled for the cluster.

" + "smithy.api#documentation": "

The mode of Database Insights that is enabled for the DB cluster.

" } }, "PerformanceInsightsEnabled": { "target": "com.amazonaws.rds#BooleanOptional", "traits": { - "smithy.api#documentation": "

Indicates whether Performance Insights is enabled for the DB cluster.

\n

This setting is only for non-Aurora Multi-AZ DB clusters.

" + "smithy.api#documentation": "

Indicates whether Performance Insights is enabled for the DB cluster.

\n

This setting is only for Aurora DB clusters and Multi-AZ DB clusters.

" } }, "PerformanceInsightsKMSKeyId": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

The Amazon Web Services KMS key identifier for encryption of Performance Insights data.

\n

The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key.

\n

This setting is only for non-Aurora Multi-AZ DB clusters.

" + "smithy.api#documentation": "

The Amazon Web Services KMS key identifier for encryption of Performance Insights data.

\n

The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key.

\n

This setting is only for Aurora DB clusters and Multi-AZ DB clusters.

" } }, "PerformanceInsightsRetentionPeriod": { "target": "com.amazonaws.rds#IntegerOptional", "traits": { - "smithy.api#documentation": "

The number of days to retain Performance Insights data.

\n

This setting is only for non-Aurora Multi-AZ DB clusters.

\n

Valid Values:

\n
    \n
  • \n

    \n 7\n

    \n
  • \n
  • \n

    \n month * 31, where month is a number of months from 1-23. \n Examples: 93 (3 months * 31), 341 (11 months * 31), 589 (19 months * 31)

    \n
  • \n
  • \n

    \n 731\n

    \n
  • \n
\n

Default: 7 days

" + "smithy.api#documentation": "

The number of days to retain Performance Insights data.

\n

This setting is only for Aurora DB clusters and Multi-AZ DB clusters.

\n

Valid Values:

\n
    \n
  • \n

    \n 7\n

    \n
  • \n
  • \n

    \n month * 31, where month is a number of months from 1-23. \n Examples: 93 (3 months * 31), 341 (11 months * 31), 589 (19 months * 31)

    \n
  • \n
  • \n

    \n 731\n

    \n
  • \n
\n

Default: 7 days

" } }, "ServerlessV2ScalingConfiguration": { @@ -14071,13 +14071,13 @@ "Source": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

A specific source to return parameters for.

\n

Valid Values:

\n
    \n
  • \n

    \n customer\n

    \n
  • \n
  • \n

    \n engine\n

    \n
  • \n
  • \n

    \n service\n

    \n
  • \n
" + "smithy.api#documentation": "

A specific source to return parameters for.

\n

Valid Values:

\n
    \n
  • \n

    \n engine-default\n

    \n
  • \n
  • \n

    \n system\n

    \n
  • \n
  • \n

    \n user\n

    \n
  • \n
" } }, "Filters": { "target": "com.amazonaws.rds#FilterList", "traits": { - "smithy.api#documentation": "

This parameter isn't currently supported.

" + "smithy.api#documentation": "

A filter that specifies one or more DB cluster parameters to describe.

\n

The only supported filter is parameter-name. The results list only includes information about the DB cluster parameters with these names.

" } }, "MaxRecords": { @@ -15537,7 +15537,7 @@ "Filters": { "target": "com.amazonaws.rds#FilterList", "traits": { - "smithy.api#documentation": "

This parameter isn't currently supported.

" + "smithy.api#documentation": "

A filter that specifies one or more DB parameters to describe.

\n

The only supported filter is parameter-name. The results list only includes information about the DB parameters with these names.

" } }, "MaxRecords": { @@ -16735,7 +16735,7 @@ "Filters": { "target": "com.amazonaws.rds#FilterList", "traits": { - "smithy.api#documentation": "

This parameter isn't currently supported.

" + "smithy.api#documentation": "

A filter that specifies one or more parameters to describe.

\n

The only supported filter is parameter-name. The results list only includes information about the parameters with these names.

" } }, "MaxRecords": { @@ -21871,7 +21871,7 @@ "AutoMinorVersionUpgrade": { "target": "com.amazonaws.rds#BooleanOptional", "traits": { - "smithy.api#documentation": "

Specifies whether minor engine upgrades are applied automatically to the DB cluster during the maintenance window. \n By default, minor engine upgrades are applied automatically.

\n

Valid for Cluster Type: Multi-AZ DB clusters only

" + "smithy.api#documentation": "

Specifies whether minor engine upgrades are applied automatically to the DB cluster during the maintenance window. \n By default, minor engine upgrades are applied automatically.

\n

Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters

" } }, "MonitoringInterval": { @@ -21889,7 +21889,7 @@ "DatabaseInsightsMode": { "target": "com.amazonaws.rds#DatabaseInsightsMode", "traits": { - "smithy.api#documentation": "

Specifies the mode of Database Insights to enable for the cluster.

" + "smithy.api#documentation": "

Specifies the mode of Database Insights to enable for the DB cluster.

\n

If you change the value from standard to advanced, you must set the \n PerformanceInsightsEnabled parameter to true and the \n PerformanceInsightsRetentionPeriod parameter to 465.

\n

If you change the value from advanced to standard, you must \n set the PerformanceInsightsEnabled parameter to false.

\n

Valid for Cluster Type: Aurora DB clusters only

" } }, "EnablePerformanceInsights": { @@ -21901,13 +21901,13 @@ "PerformanceInsightsKMSKeyId": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

The Amazon Web Services KMS key identifier for encryption of Performance Insights data.

\n

The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key.

\n

If you don't specify a value for PerformanceInsightsKMSKeyId, then Amazon RDS \n uses your default KMS key. There is a default KMS key for your Amazon Web Services account. \n Your Amazon Web Services account has a different default KMS key for each Amazon Web Services Region.

\n

Valid for Cluster Type: Multi-AZ DB clusters only

" + "smithy.api#documentation": "

The Amazon Web Services KMS key identifier for encryption of Performance Insights data.

\n

The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key.

\n

If you don't specify a value for PerformanceInsightsKMSKeyId, then Amazon RDS \n uses your default KMS key. There is a default KMS key for your Amazon Web Services account. \n Your Amazon Web Services account has a different default KMS key for each Amazon Web Services Region.

\n

Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters

" } }, "PerformanceInsightsRetentionPeriod": { "target": "com.amazonaws.rds#IntegerOptional", "traits": { - "smithy.api#documentation": "

The number of days to retain Performance Insights data.

\n

Valid for Cluster Type: Multi-AZ DB clusters only

\n

Valid Values:

\n
    \n
  • \n

    \n 7\n

    \n
  • \n
  • \n

    \n month * 31, where month is a number of months from 1-23. \n Examples: 93 (3 months * 31), 341 (11 months * 31), 589 (19 months * 31)

    \n
  • \n
  • \n

    \n 731\n

    \n
  • \n
\n

Default: 7 days

\n

If you specify a retention period that isn't valid, such as 94, Amazon RDS issues an error.

" + "smithy.api#documentation": "

The number of days to retain Performance Insights data.

\n

Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters

\n

Valid Values:

\n
    \n
  • \n

    \n 7\n

    \n
  • \n
  • \n

    \n month * 31, where month is a number of months from 1-23. \n Examples: 93 (3 months * 31), 341 (11 months * 31), 589 (19 months * 31)

    \n
  • \n
  • \n

    \n 731\n

    \n
  • \n
\n

Default: 7 days

\n

If you specify a retention period that isn't valid, such as 94, Amazon RDS issues an error.

" } }, "ServerlessV2ScalingConfiguration": { @@ -22388,7 +22388,7 @@ "Iops": { "target": "com.amazonaws.rds#IntegerOptional", "traits": { - "smithy.api#documentation": "

The new Provisioned IOPS (I/O operations per second) value for the RDS instance.

\n

Changing this setting doesn't result in an outage and\n the change is applied during the next maintenance window\n unless the ApplyImmediately parameter is enabled for this request.\n If you are migrating from Provisioned IOPS to standard storage, set this value to 0. \n The DB instance will require a reboot for the change in storage type to take effect.

\n

If you choose to migrate your DB instance from using standard storage to using\n Provisioned IOPS, or from using Provisioned IOPS to using standard storage, the process\n can take time. The duration of the migration depends on several factors such as database\n load, storage size, storage type (standard or Provisioned IOPS), amount of IOPS\n provisioned (if any), and the number of prior scale storage operations. Typical\n migration times are under 24 hours, but the process can take up to several days in some\n cases. During the migration, the DB instance is available for use, but might experience\n performance degradation. While the migration takes place, nightly backups for the\n instance are suspended. No other Amazon RDS operations can take place for the instance,\n including modifying the instance, rebooting the instance, deleting the instance,\n creating a read replica for the instance, and creating a DB snapshot of the instance.

\n

Constraints:

\n
    \n
  • \n

    For RDS for MariaDB, RDS for MySQL, RDS for Oracle, and RDS for PostgreSQL - The value supplied must be at least 10% greater than the current value. \n Values that are not at least 10% greater than the existing value are rounded up so that they are 10% greater than the current value.

    \n
  • \n
  • \n

    When you increase the Provisioned IOPS, you must also specify the\n AllocatedStorage parameter. You can use the current value for\n AllocatedStorage.

    \n
  • \n
\n

Default: Uses existing setting

" + "smithy.api#documentation": "

The new Provisioned IOPS (I/O operations per second) value for the RDS instance.

\n

Changing this setting doesn't result in an outage and\n the change is applied during the next maintenance window\n unless the ApplyImmediately parameter is enabled for this request.\n If you are migrating from Provisioned IOPS to standard storage, set this value to 0. \n The DB instance will require a reboot for the change in storage type to take effect.

\n

If you choose to migrate your DB instance from using standard storage to Provisioned\n IOPS (io1), or from Provisioned IOPS to standard storage, the process can take time. The\n duration of the migration depends on several factors such as database load, storage\n size, storage type (standard or Provisioned IOPS), amount of IOPS provisioned (if any),\n and the number of prior scale storage operations. Typical migration times are under 24\n hours, but the process can take up to several days in some cases. During the migration,\n the DB instance is available for use, but might experience performance degradation.\n While the migration takes place, nightly backups for the instance are suspended. No\n other Amazon RDS operations can take place for the instance, including modifying the\n instance, rebooting the instance, deleting the instance, creating a read replica for the\n instance, and creating a DB snapshot of the instance.

\n

\n

Constraints:

\n
    \n
  • \n

    For RDS for MariaDB, RDS for MySQL, RDS for Oracle, and RDS for PostgreSQL - The value supplied must be at least 10% greater than the current value. \n Values that are not at least 10% greater than the existing value are rounded up so that they are 10% greater than the current value.

    \n
  • \n
  • \n

    When you increase the Provisioned IOPS, you must also specify the\n AllocatedStorage parameter. You can use the current value for\n AllocatedStorage.

    \n
  • \n
\n

Default: Uses existing setting

" } }, "OptionGroupName": { @@ -22406,7 +22406,7 @@ "StorageType": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

The storage type to associate with the DB instance.

\n

If you specify io1, io2, or gp3 \n you must also include a value for the Iops parameter.

\n

If you choose to migrate your DB instance from using standard storage to using\n Provisioned IOPS, or from using Provisioned IOPS to using standard storage, the process\n can take time. The duration of the migration depends on several factors such as database\n load, storage size, storage type (standard or Provisioned IOPS), amount of IOPS\n provisioned (if any), and the number of prior scale storage operations. Typical\n migration times are under 24 hours, but the process can take up to several days in some\n cases. During the migration, the DB instance is available for use, but might experience\n performance degradation. While the migration takes place, nightly backups for the\n instance are suspended. No other Amazon RDS operations can take place for the instance,\n including modifying the instance, rebooting the instance, deleting the instance,\n creating a read replica for the instance, and creating a DB snapshot of the instance.

\n

Valid Values: gp2 | gp3 | io1 | io2 | standard\n

\n

Default: io1, if the Iops parameter\n is specified. Otherwise, gp2.

" + "smithy.api#documentation": "

The storage type to associate with the DB instance.

\n

If you specify io1, io2, or gp3 \n you must also include a value for the Iops parameter.

\n

If you choose to migrate your DB instance from using standard storage to gp2 (General\n Purpose SSD), gp3, or Provisioned IOPS (io1), or from these storage types to standard\n storage, the process can take time. The duration of the migration depends on several\n factors such as database load, storage size, storage type (standard or Provisioned\n IOPS), amount of IOPS provisioned (if any), and the number of prior scale storage\n operations. Typical migration times are under 24 hours, but the process can take up to\n several days in some cases. During the migration, the DB instance is available for use,\n but might experience performance degradation. While the migration takes place, nightly\n backups for the instance are suspended. No other Amazon RDS operations can take place\n for the instance, including modifying the instance, rebooting the instance, deleting the\n instance, creating a read replica for the instance, and creating a DB snapshot of the\n instance.

\n

Valid Values: gp2 | gp3 | io1 | io2 | standard\n

\n

Default: io1, if the Iops parameter\n is specified. Otherwise, gp2.

" } }, "TdeCredentialArn": { @@ -22514,7 +22514,7 @@ "DatabaseInsightsMode": { "target": "com.amazonaws.rds#DatabaseInsightsMode", "traits": { - "smithy.api#documentation": "

Specifies the mode of Database Insights to enable for the instance.

" + "smithy.api#documentation": "

Specifies the mode of Database Insights to enable for the DB instance.

\n

This setting only applies to Amazon Aurora DB instances.

\n \n

Currently, this value is inherited from the DB cluster and can't be changed.

\n
" } }, "EnablePerformanceInsights": { @@ -22538,7 +22538,7 @@ "CloudwatchLogsExportConfiguration": { "target": "com.amazonaws.rds#CloudwatchLogsExportConfiguration", "traits": { - "smithy.api#documentation": "

The log types to be enabled for export to CloudWatch Logs for a \n specific DB instance.

\n

A change to the CloudwatchLogsExportConfiguration parameter is always applied to the DB instance \n immediately. Therefore, the ApplyImmediately parameter has no effect.

\n

This setting doesn't apply to RDS Custom DB instances.

" + "smithy.api#documentation": "

The log types to be enabled for export to CloudWatch Logs for a \n specific DB instance.

\n

A change to the CloudwatchLogsExportConfiguration parameter is always applied to the DB instance \n immediately. Therefore, the ApplyImmediately parameter has no effect.

\n

This setting doesn't apply to RDS Custom DB instances.

\n

The following values are valid for each DB engine:

\n
    \n
  • \n

    Aurora MySQL - audit | error | general | slowquery\n

    \n
  • \n
  • \n

    Aurora PostgreSQL - postgresql\n

    \n
  • \n
  • \n

    RDS for MySQL - error | general | slowquery\n

    \n
  • \n
  • \n

    RDS for PostgreSQL - postgresql | upgrade\n

    \n
  • \n
\n

For more information about exporting CloudWatch Logs for Amazon RDS, see \n Publishing Database Logs to Amazon CloudWatch Logs in the Amazon RDS User Guide.

\n

For more information about exporting CloudWatch Logs for Amazon Aurora, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon Aurora User Guide.

" } }, "ProcessorFeatures": { @@ -27838,7 +27838,7 @@ "EngineMode": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

The engine mode of the new cluster. Specify provisioned or serverless,\n depending on the type of the cluster you are creating. You can create an Aurora Serverless v1 clone\n from a provisioned cluster, or a provisioned clone from an Aurora Serverless v1 cluster. To create a clone\n that is an Aurora Serverless v1 cluster, the original cluster must be an Aurora Serverless v1 cluster or\n an encrypted provisioned cluster.

\n

Valid for: Aurora DB clusters only

" + "smithy.api#documentation": "

The engine mode of the new cluster. Specify provisioned or serverless,\n depending on the type of the cluster you are creating. You can create an Aurora Serverless v1 clone\n from a provisioned cluster, or a provisioned clone from an Aurora Serverless v1 cluster. To create a clone\n that is an Aurora Serverless v1 cluster, the original cluster must be an Aurora Serverless v1 cluster or\n an encrypted provisioned cluster. To create a full copy that is an Aurora Serverless v1 cluster, specify \n the engine mode serverless.

\n

Valid for: Aurora DB clusters only

" } }, "DBClusterInstanceClass": { @@ -28289,7 +28289,7 @@ "AllocatedStorage": { "target": "com.amazonaws.rds#IntegerOptional", "traits": { - "smithy.api#documentation": "

The amount of storage (in gibibytes) to allocate initially for the DB instance. Follow the allocation rules specified in\n CreateDBInstance.

\n \n

Be sure to allocate enough storage for your new DB instance so that the restore operation can succeed. You can also\n allocate additional storage for future growth.

\n
" + "smithy.api#documentation": "

The amount of storage (in gibibytes) to allocate initially for the DB instance. Follow the allocation rules specified in\n CreateDBInstance.

\n

This setting isn't valid for RDS for SQL Server.

\n \n

Be sure to allocate enough storage for your new DB instance so that the restore operation can succeed. You can also\n allocate additional storage for future growth.

\n
" } }, "DedicatedLogVolume": { @@ -28418,7 +28418,7 @@ "AllocatedStorage": { "target": "com.amazonaws.rds#IntegerOptional", "traits": { - "smithy.api#documentation": "

The amount of storage (in gibibytes) to allocate initially for the DB instance.\n Follow the allocation rules specified in CreateDBInstance.

\n \n

Be sure to allocate enough storage for your new DB instance so that the restore operation can succeed.\n You can also allocate additional storage for future growth.

\n
" + "smithy.api#documentation": "

The amount of storage (in gibibytes) to allocate initially for the DB instance.\n Follow the allocation rules specified in CreateDBInstance.

\n

This setting isn't valid for RDS for SQL Server.

\n \n

Be sure to allocate enough storage for your new DB instance so that the restore operation can succeed.\n You can also allocate additional storage for future growth.

\n
" } }, "DBInstanceClass": { @@ -28634,7 +28634,7 @@ "DatabaseInsightsMode": { "target": "com.amazonaws.rds#DatabaseInsightsMode", "traits": { - "smithy.api#documentation": "

Specifies the mode of Database Insights to enable for the instance.

" + "smithy.api#documentation": "

Specifies the mode of Database Insights to enable for the DB instance.

\n

This setting only applies to Amazon Aurora DB instances.

\n \n

Currently, this value is inherited from the DB cluster and can't be changed.

\n
" } }, "EnablePerformanceInsights": { @@ -29179,7 +29179,7 @@ "AllocatedStorage": { "target": "com.amazonaws.rds#IntegerOptional", "traits": { - "smithy.api#documentation": "

The amount of storage (in gibibytes) to allocate initially for the DB instance.\n Follow the allocation rules specified in CreateDBInstance.

\n \n

Be sure to allocate enough storage for your new DB instance so that the restore operation can succeed.\n You can also allocate additional storage for future growth.

\n
" + "smithy.api#documentation": "

The amount of storage (in gibibytes) to allocate initially for the DB instance.\n Follow the allocation rules specified in CreateDBInstance.

\n

This setting isn't valid for RDS for SQL Server.

\n \n

Be sure to allocate enough storage for your new DB instance so that the restore operation can succeed.\n You can also allocate additional storage for future growth.

\n
" } }, "DedicatedLogVolume": { diff --git a/models/redshift.json b/models/redshift.json index 428ffd004a..1a812f4665 100644 --- a/models/redshift.json +++ b/models/redshift.json @@ -1337,7 +1337,7 @@ "PubliclyAccessible": { "target": "com.amazonaws.redshift#Boolean", "traits": { - "smithy.api#documentation": "

A boolean value that, if true, indicates that the cluster can be\n accessed from a public network.

" + "smithy.api#documentation": "

A boolean value that, if true, indicates that the cluster can be\n accessed from a public network.

\n

Default: false

" } }, "Encrypted": { @@ -2919,13 +2919,13 @@ "PubliclyAccessible": { "target": "com.amazonaws.redshift#BooleanOptional", "traits": { - "smithy.api#documentation": "

If true, the cluster can be accessed from a public network.

" + "smithy.api#documentation": "

If true, the cluster can be accessed from a public network.

\n

Default: false

" } }, "Encrypted": { "target": "com.amazonaws.redshift#BooleanOptional", "traits": { - "smithy.api#documentation": "

If true, the data in the cluster is encrypted at rest.

\n

Default: false

" + "smithy.api#documentation": "

If true, the data in the cluster is encrypted at rest. \n If you set the value on this parameter to false, the request will fail.

\n

Default: true

" } }, "HsmClientCertificateIdentifier": { @@ -12156,7 +12156,7 @@ "PubliclyAccessible": { "target": "com.amazonaws.redshift#BooleanOptional", "traits": { - "smithy.api#documentation": "

If true, the cluster can be accessed from a public network. Only\n clusters in VPCs can be set to be publicly available.

" + "smithy.api#documentation": "

If true, the cluster can be accessed from a public network. Only\n clusters in VPCs can be set to be publicly available.

\n

Default: false

" } }, "ElasticIp": { @@ -17035,7 +17035,7 @@ "PubliclyAccessible": { "target": "com.amazonaws.redshift#BooleanOptional", "traits": { - "smithy.api#documentation": "

If true, the cluster can be accessed from a public network.

" + "smithy.api#documentation": "

If true, the cluster can be accessed from a public network.

\n

Default: false

" } }, "OwnerAccount": { diff --git a/models/resiliencehub.json b/models/resiliencehub.json index e2fc7be896..18f132ed97 100644 --- a/models/resiliencehub.json +++ b/models/resiliencehub.json @@ -225,6 +225,26 @@ } } }, + "com.amazonaws.resiliencehub#Alarm": { + "type": "structure", + "members": { + "alarmArn": { + "target": "com.amazonaws.resiliencehub#Arn", + "traits": { + "smithy.api#documentation": "

Amazon Resource Name (ARN) of the Amazon CloudWatch alarm.

" + } + }, + "source": { + "target": "com.amazonaws.resiliencehub#String255", + "traits": { + "smithy.api#documentation": "

Indicates the source of the Amazon CloudWatch alarm. That is, it indicates if the\n alarm was created using Resilience Hub recommendation (AwsResilienceHub),\n or if you had created the alarm in Amazon CloudWatch (Customer).

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Indicates the Amazon CloudWatch alarm detected while running an assessment.

" + } + }, "com.amazonaws.resiliencehub#AlarmRecommendation": { "type": "structure", "members": { @@ -683,7 +703,7 @@ "complianceStatus": { "target": "com.amazonaws.resiliencehub#ComplianceStatus", "traits": { - "smithy.api#documentation": "

Current\n status of compliance for the resiliency policy.

" + "smithy.api#documentation": "

Current status of compliance for the resiliency policy.

" } }, "cost": { @@ -1146,7 +1166,7 @@ "appComponents": { "target": "com.amazonaws.resiliencehub#AppComponentNameList", "traits": { - "smithy.api#documentation": "

Indicates the Application Components (AppComponents) that were assessed as part of the\n assessnent and are associated with the identified risk and recommendation.

\n \n

This property is available only in the US East (N. Virginia) Region.

\n
" + "smithy.api#documentation": "

Indicates the Application Components (AppComponents) that were assessed as part of the\n assessment and are associated with the identified risk and recommendation.

\n \n

This property is available only in the US East (N. Virginia) Region.

\n
" } } }, @@ -2465,6 +2485,12 @@ "smithy.api#required": {} } }, + "appComponentId": { + "target": "com.amazonaws.resiliencehub#EntityName255", + "traits": { + "smithy.api#documentation": "

Indicates the identifier of an AppComponent.

" + } + }, "excludeReason": { "target": "com.amazonaws.resiliencehub#ExcludeRecommendationReason", "traits": { @@ -2549,7 +2575,7 @@ "diffType": { "target": "com.amazonaws.resiliencehub#DifferenceType", "traits": { - "smithy.api#documentation": "

Difference type between actual and expected recovery point objective (RPO) and recovery\n time objective (RTO) values. Currently, Resilience Hub supports only\n NotEqual difference type.

" + "smithy.api#documentation": "

Difference type between actual and expected recovery point objective (RPO) and recovery\n time objective (RTO) values. Currently, Resilience Hub supports only\n NotEqual difference type.

" } } }, @@ -5422,6 +5448,26 @@ } } }, + "com.amazonaws.resiliencehub#Experiment": { + "type": "structure", + "members": { + "experimentArn": { + "target": "com.amazonaws.resiliencehub#String255", + "traits": { + "smithy.api#documentation": "

Amazon Resource Name (ARN) of the FIS experiment.

" + } + }, + "experimentTemplateId": { + "target": "com.amazonaws.resiliencehub#String255", + "traits": { + "smithy.api#documentation": "

Identifier of the FIS experiment template.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Indicates the FIS experiment detected while running an assessment.

" + } + }, "com.amazonaws.resiliencehub#FailedGroupingRecommendationEntries": { "type": "list", "member": { @@ -7987,7 +8033,7 @@ "invokerRoleName": { "target": "com.amazonaws.resiliencehub#IamRoleName", "traits": { - "smithy.api#documentation": "

Existing Amazon Web Services\n IAM role name in the primary Amazon Web Services account that will be assumed by\n Resilience Hub Service Principle to obtain a read-only access to your application\n resources while running an assessment.

\n \n
    \n
  • \n

    You must have iam:passRole permission for this role while creating or\n updating the application.

    \n
  • \n
  • \n

    Currently, invokerRoleName accepts only [A-Za-z0-9_+=,.@-]\n characters.

    \n
  • \n
\n
" + "smithy.api#documentation": "

Existing Amazon Web Services\n IAM role name in the primary Amazon Web Services account that will be assumed by\n Resilience Hub Service Principle to obtain a read-only access to your application\n resources while running an assessment.

\n

If your IAM role includes a path, you must include the path in the invokerRoleName parameter. \n For example, if your IAM role's ARN is arn:aws:iam:123456789012:role/my-path/role-name, you should pass my-path/role-name.\n

\n \n
    \n
  • \n

    You must have iam:passRole permission for this role while creating or\n updating the application.

    \n
  • \n
  • \n

    Currently, invokerRoleName accepts only [A-Za-z0-9_+=,.@-]\n characters.

    \n
  • \n
\n
" } }, "crossAccountRoleArns": { @@ -8427,6 +8473,18 @@ "traits": { "smithy.api#documentation": "

Indicates the reason for excluding an operational recommendation.

" } + }, + "latestDiscoveredExperiment": { + "target": "com.amazonaws.resiliencehub#Experiment", + "traits": { + "smithy.api#documentation": "

Indicates the experiment created in FIS that was discovered by Resilience Hub, which matches the recommendation.

" + } + }, + "discoveredAlarm": { + "target": "com.amazonaws.resiliencehub#Alarm", + "traits": { + "smithy.api#documentation": "

Indicates the previously implemented Amazon CloudWatch alarm discovered by Resilience Hub.

" + } } }, "traits": { @@ -9205,7 +9263,7 @@ "hasMoreErrors": { "target": "com.amazonaws.resiliencehub#BooleanOptional", "traits": { - "smithy.api#documentation": "

This indicates if there are more errors not listed in the\n resourceErrors\n list.

" + "smithy.api#documentation": "

This indicates if there are more errors not listed in the resourceErrors\n list.

" } } }, @@ -10204,6 +10262,12 @@ "smithy.api#required": {} } }, + "appComponentId": { + "target": "com.amazonaws.resiliencehub#EntityName255", + "traits": { + "smithy.api#documentation": "

Indicates the identifier of the AppComponent.

" + } + }, "appComponentName": { "target": "com.amazonaws.resiliencehub#EntityId", "traits": { @@ -10924,6 +10988,12 @@ "smithy.api#required": {} } }, + "appComponentId": { + "target": "com.amazonaws.resiliencehub#EntityName255", + "traits": { + "smithy.api#documentation": "

Indicates the identifier of the AppComponent.

" + } + }, "excludeReason": { "target": "com.amazonaws.resiliencehub#ExcludeRecommendationReason", "traits": { diff --git a/models/route-53-domains.json b/models/route-53-domains.json index 01bf154947..f6f9ae8637 100644 --- a/models/route-53-domains.json +++ b/models/route-53-domains.json @@ -199,7 +199,7 @@ "InvoiceId": { "target": "com.amazonaws.route53domains#InvoiceId", "traits": { - "smithy.api#documentation": "

The ID of the invoice that is associated with the billing record.

" + "smithy.api#documentation": "

Deprecated property. This field is retained in report structure for backwards compatibility, but will appear blank.

" } }, "BillDate": { diff --git a/models/route-53.json b/models/route-53.json index 205ae35f0f..92f3b785e0 100644 --- a/models/route-53.json +++ b/models/route-53.json @@ -3449,6 +3449,18 @@ "traits": { "smithy.api#enumValue": "ap-southeast-5" } + }, + "mx_central_1": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "mx-central-1" + } + }, + "ap_southeast_7": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ap-southeast-7" + } } }, "traits": { @@ -10884,6 +10896,18 @@ "traits": { "smithy.api#enumValue": "ap-southeast-5" } + }, + "mx_central_1": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "mx-central-1" + } + }, + "ap_southeast_7": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ap-southeast-7" + } } }, "traits": { @@ -12518,6 +12542,18 @@ "traits": { "smithy.api#enumValue": "ap-southeast-5" } + }, + "mx_central_1": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "mx-central-1" + } + }, + "ap_southeast_7": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ap-southeast-7" + } } }, "traits": { diff --git a/models/s3.json b/models/s3.json index e90ce50823..faff31ddab 100644 --- a/models/s3.json +++ b/models/s3.json @@ -19163,25 +19163,37 @@ "ChecksumCRC32": { "target": "com.amazonaws.s3#ChecksumCRC32", "traits": { - "smithy.api#documentation": "

The base64-encoded, 32-bit CRC-32 checksum of the object. This will only be present if it was uploaded\n with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

" + "smithy.api#documentation": "

The Base64 encoded, 32-bit CRC-32 checksum of the object. This checksum is only be present if the checksum was uploaded\n with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

" } }, "ChecksumCRC32C": { "target": "com.amazonaws.s3#ChecksumCRC32C", "traits": { - "smithy.api#documentation": "

The base64-encoded, 32-bit CRC-32C checksum of the object. This will only be present if it was uploaded\n with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

" + "smithy.api#documentation": "

The Base64 encoded, 32-bit CRC-32C checksum of the object. This checksum is only present if the checksum was uploaded\n with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

" + } + }, + "ChecksumCRC64NVME": { + "target": "com.amazonaws.s3#ChecksumCRC64NVME", + "traits": { + "smithy.api#documentation": "

The Base64 encoded, 64-bit CRC-64NVME checksum of the object. This checksum is present\n if the object was uploaded with the CRC-64NVME checksum algorithm, or if the object was uploaded without a\n checksum (and Amazon S3 added the default checksum, CRC-64NVME, to the uploaded object). For more information, see Checking object integrity in the Amazon S3 User Guide.

" } }, "ChecksumSHA1": { "target": "com.amazonaws.s3#ChecksumSHA1", "traits": { - "smithy.api#documentation": "

The base64-encoded, 160-bit SHA-1 digest of the object. This will only be present if it was uploaded\n with the object. When you use the API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

" + "smithy.api#documentation": "

The Base64 encoded, 160-bit SHA-1 digest of the object. This will only be present if the object was uploaded\n with the object. When you use the API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

" } }, "ChecksumSHA256": { "target": "com.amazonaws.s3#ChecksumSHA256", "traits": { - "smithy.api#documentation": "

The base64-encoded, 256-bit SHA-256 digest of the object. This will only be present if it was uploaded\n with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

" + "smithy.api#documentation": "

The Base64 encoded, 256-bit SHA-256 digest of the object. This will only be present if the object was uploaded\n with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

" + } + }, + "ChecksumType": { + "target": "com.amazonaws.s3#ChecksumType", + "traits": { + "smithy.api#documentation": "

The checksum type that is used to calculate the object’s\n checksum value. For more information, see Checking object integrity in the Amazon S3 User Guide.

" } } }, @@ -19215,6 +19227,12 @@ "traits": { "smithy.api#enumValue": "SHA256" } + }, + "CRC64NVME": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "CRC64NVME" + } } } }, @@ -19230,6 +19248,9 @@ "com.amazonaws.s3#ChecksumCRC32C": { "type": "string" }, + "com.amazonaws.s3#ChecksumCRC64NVME": { + "type": "string" + }, "com.amazonaws.s3#ChecksumMode": { "type": "enum", "members": { @@ -19247,6 +19268,23 @@ "com.amazonaws.s3#ChecksumSHA256": { "type": "string" }, + "com.amazonaws.s3#ChecksumType": { + "type": "enum", + "members": { + "COMPOSITE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "COMPOSITE" + } + }, + "FULL_OBJECT": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "FULL_OBJECT" + } + } + } + }, "com.amazonaws.s3#Code": { "type": "string" }, @@ -19356,25 +19394,37 @@ "ChecksumCRC32": { "target": "com.amazonaws.s3#ChecksumCRC32", "traits": { - "smithy.api#documentation": "

The base64-encoded, 32-bit CRC-32 checksum of the object. This will only be present if it was uploaded\n with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

" + "smithy.api#documentation": "

The Base64 encoded, 32-bit CRC-32 checksum of the object. This checksum is only be present if the checksum was uploaded\n with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

" } }, "ChecksumCRC32C": { "target": "com.amazonaws.s3#ChecksumCRC32C", "traits": { - "smithy.api#documentation": "

The base64-encoded, 32-bit CRC-32C checksum of the object. This will only be present if it was uploaded\n with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

" + "smithy.api#documentation": "

The Base64 encoded, 32-bit CRC-32C checksum of the object. This checksum is only present if the checksum was uploaded\n with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

" + } + }, + "ChecksumCRC64NVME": { + "target": "com.amazonaws.s3#ChecksumCRC64NVME", + "traits": { + "smithy.api#documentation": "

This header can be used as a data integrity check to verify that the data received is\n the same data that was originally sent. This header specifies the Base64 encoded, 64-bit\n CRC-64NVME checksum of the object. The CRC-64NVME checksum is\n always a full object checksum. For more information, see Checking object integrity\n in the Amazon S3 User Guide.

" } }, "ChecksumSHA1": { "target": "com.amazonaws.s3#ChecksumSHA1", "traits": { - "smithy.api#documentation": "

The base64-encoded, 160-bit SHA-1 digest of the object. This will only be present if it was uploaded\n with the object. When you use the API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

" + "smithy.api#documentation": "

The Base64 encoded, 160-bit SHA-1 digest of the object. This will only be present if the object was uploaded\n with the object. When you use the API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

" } }, "ChecksumSHA256": { "target": "com.amazonaws.s3#ChecksumSHA256", "traits": { - "smithy.api#documentation": "

The base64-encoded, 256-bit SHA-256 digest of the object. This will only be present if it was uploaded\n with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

" + "smithy.api#documentation": "

The Base64 encoded, 256-bit SHA-256 digest of the object. This will only be present if the object was uploaded\n with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

" + } + }, + "ChecksumType": { + "target": "com.amazonaws.s3#ChecksumType", + "traits": { + "smithy.api#documentation": "

The checksum type, which determines how part-level checksums are combined to create an\n object-level checksum for multipart objects. You can use this header as a data integrity\n check to verify that the checksum type that is received is the same checksum type that was\n specified during the CreateMultipartUpload request. For more information, see\n Checking object integrity\n in the Amazon S3 User Guide.

" } }, "ServerSideEncryption": { @@ -19461,31 +19511,52 @@ "ChecksumCRC32": { "target": "com.amazonaws.s3#ChecksumCRC32", "traits": { - "smithy.api#documentation": "

This header can be used as a data integrity check to verify that the data received is the same data that was originally sent.\n This header specifies the base64-encoded, 32-bit CRC-32 checksum of the object. For more information, see\n Checking object integrity in the\n Amazon S3 User Guide.

", + "smithy.api#documentation": "

This header can be used as a data integrity check to verify that the data received is the same data that was originally sent.\n This header specifies the Base64 encoded, 32-bit CRC-32 checksum of the object. For more information, see\n Checking object integrity in the\n Amazon S3 User Guide.

", "smithy.api#httpHeader": "x-amz-checksum-crc32" } }, "ChecksumCRC32C": { "target": "com.amazonaws.s3#ChecksumCRC32C", "traits": { - "smithy.api#documentation": "

This header can be used as a data integrity check to verify that the data received is the same data that was originally sent.\n This header specifies the base64-encoded, 32-bit CRC-32C checksum of the object. For more information, see\n Checking object integrity in the\n Amazon S3 User Guide.

", + "smithy.api#documentation": "

This header can be used as a data integrity check to verify that the data received is the same data that was originally sent.\n This header specifies the Base64 encoded, 32-bit CRC-32C checksum of the object. For more information, see\n Checking object integrity in the\n Amazon S3 User Guide.

", "smithy.api#httpHeader": "x-amz-checksum-crc32c" } }, + "ChecksumCRC64NVME": { + "target": "com.amazonaws.s3#ChecksumCRC64NVME", + "traits": { + "smithy.api#documentation": "

This header can be used as a data integrity check to verify that the data received is\n the same data that was originally sent. This header specifies the Base64 encoded, 64-bit\n CRC-64NVME checksum of the object. The CRC-64NVME checksum is\n always a full object checksum. For more information, see Checking object integrity\n in the Amazon S3 User Guide.

", + "smithy.api#httpHeader": "x-amz-checksum-crc64nvme" + } + }, "ChecksumSHA1": { "target": "com.amazonaws.s3#ChecksumSHA1", "traits": { - "smithy.api#documentation": "

This header can be used as a data integrity check to verify that the data received is the same data that was originally sent.\n This header specifies the base64-encoded, 160-bit SHA-1 digest of the object. For more information, see\n Checking object integrity in the\n Amazon S3 User Guide.

", + "smithy.api#documentation": "

This header can be used as a data integrity check to verify that the data received is the same data that was originally sent.\n This header specifies the Base64 encoded, 160-bit SHA-1 digest of the object. For more information, see\n Checking object integrity in the\n Amazon S3 User Guide.

", "smithy.api#httpHeader": "x-amz-checksum-sha1" } }, "ChecksumSHA256": { "target": "com.amazonaws.s3#ChecksumSHA256", "traits": { - "smithy.api#documentation": "

This header can be used as a data integrity check to verify that the data received is the same data that was originally sent.\n This header specifies the base64-encoded, 256-bit SHA-256 digest of the object. For more information, see\n Checking object integrity in the\n Amazon S3 User Guide.

", + "smithy.api#documentation": "

This header can be used as a data integrity check to verify that the data received is the same data that was originally sent.\n This header specifies the Base64 encoded, 256-bit SHA-256 digest of the object. For more information, see\n Checking object integrity in the\n Amazon S3 User Guide.

", "smithy.api#httpHeader": "x-amz-checksum-sha256" } }, + "ChecksumType": { + "target": "com.amazonaws.s3#ChecksumType", + "traits": { + "smithy.api#documentation": "

This header specifies the checksum type of the object, which determines how part-level\n checksums are combined to create an object-level checksum for multipart objects. You can\n use this header as a data integrity check to verify that the checksum type that is received\n is the same checksum that was specified. If the checksum type doesn’t match the checksum\n type that was specified for the object during the CreateMultipartUpload\n request, it’ll result in a BadDigest error. For more information, see Checking\n object integrity in the Amazon S3 User Guide.

", + "smithy.api#httpHeader": "x-amz-checksum-type" + } + }, + "MpuObjectSize": { + "target": "com.amazonaws.s3#MpuObjectSize", + "traits": { + "smithy.api#documentation": "

The expected total object size of the multipart upload request. If there’s a mismatch\n between the specified object size value and the actual object size value, it results in an\n HTTP 400 InvalidRequest error.

", + "smithy.api#httpHeader": "x-amz-mp-object-size" + } + }, "RequestPayer": { "target": "com.amazonaws.s3#RequestPayer", "traits": { @@ -19567,25 +19638,31 @@ "ChecksumCRC32": { "target": "com.amazonaws.s3#ChecksumCRC32", "traits": { - "smithy.api#documentation": "

The base64-encoded, 32-bit CRC-32 checksum of the object. This will only be present if it was uploaded\n with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

" + "smithy.api#documentation": "

The Base64 encoded, 32-bit CRC-32 checksum of the part. This checksum is present\n if the multipart upload request was created with the CRC-32 checksum algorithm. For more information, see Checking object integrity in the Amazon S3 User Guide.

" } }, "ChecksumCRC32C": { "target": "com.amazonaws.s3#ChecksumCRC32C", "traits": { - "smithy.api#documentation": "

The base64-encoded, 32-bit CRC-32C checksum of the object. This will only be present if it was uploaded\n with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

" + "smithy.api#documentation": "

The Base64 encoded, 32-bit CRC-32C checksum of the part. This checksum is present\n if the multipart upload request was created with the CRC-32C checksum algorithm. For more information, see Checking object integrity in the Amazon S3 User Guide.

" + } + }, + "ChecksumCRC64NVME": { + "target": "com.amazonaws.s3#ChecksumCRC64NVME", + "traits": { + "smithy.api#documentation": "

The Base64 encoded, 64-bit CRC-64NVME checksum of the part. This checksum is present\n if the multipart upload request was created with the CRC-64NVME checksum algorithm to the uploaded object). For more information, see Checking object integrity in the Amazon S3 User Guide.

" } }, "ChecksumSHA1": { "target": "com.amazonaws.s3#ChecksumSHA1", "traits": { - "smithy.api#documentation": "

The base64-encoded, 160-bit SHA-1 digest of the object. This will only be present if it was uploaded\n with the object. When you use the API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

" + "smithy.api#documentation": "

The Base64 encoded, 160-bit SHA-1 checksum of the part. This checksum is present\n if the multipart upload request was created with the SHA-1 checksum algorithm. For more information, see Checking object integrity in the Amazon S3 User Guide.

" } }, "ChecksumSHA256": { "target": "com.amazonaws.s3#ChecksumSHA256", "traits": { - "smithy.api#documentation": "

The base64-encoded, 256-bit SHA-256 digest of the object. This will only be present if it was uploaded\n with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

" + "smithy.api#documentation": "

The Base64 encoded, 256-bit SHA-256 checksum of the part. This checksum is present\n if the multipart upload request was created with the SHA-256 checksum algorithm. For more information, see Checking object integrity in the Amazon S3 User Guide.

" } }, "PartNumber": { @@ -19785,7 +19862,7 @@ "SSEKMSEncryptionContext": { "target": "com.amazonaws.s3#SSEKMSEncryptionContext", "traits": { - "smithy.api#documentation": "

If present, indicates the Amazon Web Services KMS Encryption Context to use for object encryption. The\n value of this header is a base64-encoded UTF-8 string holding JSON with the encryption\n context key-value pairs.

", + "smithy.api#documentation": "

If present, indicates the Amazon Web Services KMS Encryption Context to use for object encryption. The\n value of this header is a Base64 encoded UTF-8 string holding JSON with the encryption\n context key-value pairs.

", "smithy.api#httpHeader": "x-amz-server-side-encryption-context" } }, @@ -20028,7 +20105,7 @@ "SSEKMSEncryptionContext": { "target": "com.amazonaws.s3#SSEKMSEncryptionContext", "traits": { - "smithy.api#documentation": "

Specifies the Amazon Web Services KMS Encryption Context as an additional encryption context to use\n for the destination object encryption. The value of this header is a base64-encoded UTF-8\n string holding JSON with the encryption context key-value pairs.

\n

\n General purpose buckets - This value must be explicitly\n added to specify encryption context for CopyObject requests if you want an\n additional encryption context for your destination object. The additional encryption\n context of the source object won't be copied to the destination object. For more\n information, see Encryption\n context in the Amazon S3 User Guide.

\n

\n Directory buckets - You can optionally provide an explicit encryption context value. The value must match the default encryption context - the bucket Amazon Resource Name (ARN). An additional encryption context value is not supported.

", + "smithy.api#documentation": "

Specifies the Amazon Web Services KMS Encryption Context as an additional encryption context to use\n for the destination object encryption. The value of this header is a base64-encoded UTF-8 string holding JSON with the encryption context key-value pairs.

\n

\n General purpose buckets - This value must be explicitly\n added to specify encryption context for CopyObject requests if you want an\n additional encryption context for your destination object. The additional encryption\n context of the source object won't be copied to the destination object. For more\n information, see Encryption\n context in the Amazon S3 User Guide.

\n

\n Directory buckets - You can optionally provide an explicit encryption context value. The value must match the default encryption context - the bucket Amazon Resource Name (ARN). An additional encryption context value is not supported.

", "smithy.api#httpHeader": "x-amz-server-side-encryption-context" } }, @@ -20128,28 +20205,40 @@ "smithy.api#documentation": "

Creation date of the object.

" } }, + "ChecksumType": { + "target": "com.amazonaws.s3#ChecksumType", + "traits": { + "smithy.api#documentation": "

The checksum type that is used to calculate the object’s\n checksum value. For more information, see Checking object integrity in the Amazon S3 User Guide.

" + } + }, "ChecksumCRC32": { "target": "com.amazonaws.s3#ChecksumCRC32", "traits": { - "smithy.api#documentation": "

The base64-encoded, 32-bit CRC-32 checksum of the object. This will only be present if it was uploaded\n with the object. For more information, see \n Checking object integrity in the Amazon S3 User Guide.

" + "smithy.api#documentation": "

The Base64 encoded, 32-bit CRC-32 checksum of the object. This checksum is only present if the object was uploaded\n with the object. For more information, see \n Checking object integrity in the Amazon S3 User Guide.

" } }, "ChecksumCRC32C": { "target": "com.amazonaws.s3#ChecksumCRC32C", "traits": { - "smithy.api#documentation": "

The base64-encoded, 32-bit CRC-32C checksum of the object. This will only be present if it was uploaded\n with the object. For more information, see \n Checking object integrity in the Amazon S3 User Guide.

" + "smithy.api#documentation": "

The Base64 encoded, 32-bit CRC-32C checksum of the object. This will only be present if the object was uploaded\n with the object. For more information, see \n Checking object integrity in the Amazon S3 User Guide.

" + } + }, + "ChecksumCRC64NVME": { + "target": "com.amazonaws.s3#ChecksumCRC64NVME", + "traits": { + "smithy.api#documentation": "

The Base64 encoded, 64-bit CRC-64NVME checksum of the object. This checksum is present\n if the object being copied was uploaded with the CRC-64NVME checksum algorithm, or if the object was uploaded without a\n checksum (and Amazon S3 added the default checksum, CRC-64NVME, to the uploaded object). For more information, see Checking object integrity in the Amazon S3 User Guide.

" } }, "ChecksumSHA1": { "target": "com.amazonaws.s3#ChecksumSHA1", "traits": { - "smithy.api#documentation": "

The base64-encoded, 160-bit SHA-1 digest of the object. This will only be present if it was uploaded\n with the object. For more information, see \n Checking object integrity in the Amazon S3 User Guide.

" + "smithy.api#documentation": "

The Base64 encoded, 160-bit SHA-1 digest of the object. This will only be present if the object was uploaded\n with the object. For more information, see \n Checking object integrity in the Amazon S3 User Guide.

" } }, "ChecksumSHA256": { "target": "com.amazonaws.s3#ChecksumSHA256", "traits": { - "smithy.api#documentation": "

The base64-encoded, 256-bit SHA-256 digest of the object. This will only be present if it was uploaded\n with the object. For more information, see \n Checking object integrity in the Amazon S3 User Guide.

" + "smithy.api#documentation": "

The Base64 encoded, 256-bit SHA-256 digest of the object. This will only be present if the object was uploaded\n with the object. For more information, see \n Checking object integrity in the Amazon S3 User Guide.

" } } }, @@ -20175,25 +20264,31 @@ "ChecksumCRC32": { "target": "com.amazonaws.s3#ChecksumCRC32", "traits": { - "smithy.api#documentation": "

The base64-encoded, 32-bit CRC-32 checksum of the object. This will only be present if it was uploaded\n with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

" + "smithy.api#documentation": "

This header can be used as a data integrity check to verify that the data received is the same data that was originally sent.\n This header specifies the Base64 encoded, 32-bit CRC-32 checksum of the part. For more information, see\n Checking object integrity in the\n Amazon S3 User Guide.

" } }, "ChecksumCRC32C": { "target": "com.amazonaws.s3#ChecksumCRC32C", "traits": { - "smithy.api#documentation": "

The base64-encoded, 32-bit CRC-32C checksum of the object. This will only be present if it was uploaded\n with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

" + "smithy.api#documentation": "

This header can be used as a data integrity check to verify that the data received is the same data that was originally sent.\n This header specifies the Base64 encoded, 32-bit CRC-32C checksum of the part. For more information, see\n Checking object integrity in the\n Amazon S3 User Guide.

" + } + }, + "ChecksumCRC64NVME": { + "target": "com.amazonaws.s3#ChecksumCRC64NVME", + "traits": { + "smithy.api#documentation": "

The Base64 encoded, 64-bit CRC-64NVME checksum of the part. This checksum is present\n if the multipart upload request was created with the CRC-64NVME checksum algorithm to the uploaded object). For more information, see Checking object integrity in the Amazon S3 User Guide.

" } }, "ChecksumSHA1": { "target": "com.amazonaws.s3#ChecksumSHA1", "traits": { - "smithy.api#documentation": "

The base64-encoded, 160-bit SHA-1 digest of the object. This will only be present if it was uploaded\n with the object. When you use the API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

" + "smithy.api#documentation": "

This header can be used as a data integrity check to verify that the data received is the same data that was originally sent.\n This header specifies the Base64 encoded, 160-bit SHA-1 checksum of the part. For more information, see\n Checking object integrity in the\n Amazon S3 User Guide.

" } }, "ChecksumSHA256": { "target": "com.amazonaws.s3#ChecksumSHA256", "traits": { - "smithy.api#documentation": "

The base64-encoded, 256-bit SHA-256 digest of the object. This will only be present if it was uploaded\n with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

" + "smithy.api#documentation": "

This header can be used as a data integrity check to verify that the data received is the same data that was originally sent.\n This header specifies the Base64 encoded, 256-bit SHA-256 checksum of the part. For more information, see\n Checking object integrity in the\n Amazon S3 User Guide.

" } } }, @@ -20204,7 +20299,7 @@ "com.amazonaws.s3#CopySource": { "type": "string", "traits": { - "smithy.api#pattern": "^\\/.+\\/.+$" + "smithy.api#pattern": "^\\/?.+\\/.+$" } }, "com.amazonaws.s3#CopySourceIfMatch": { @@ -20592,7 +20687,7 @@ "SSEKMSEncryptionContext": { "target": "com.amazonaws.s3#SSEKMSEncryptionContext", "traits": { - "smithy.api#documentation": "

If present, indicates the Amazon Web Services KMS Encryption Context to use for object encryption. The value of\n this header is a Base64-encoded string of a UTF-8 encoded JSON, which contains the encryption context as key-value pairs.

", + "smithy.api#documentation": "

If present, indicates the Amazon Web Services KMS Encryption Context to use for object encryption. The value of\n this header is a Base64 encoded string of a UTF-8 encoded JSON, which contains the encryption context as key-value pairs.

", "smithy.api#httpHeader": "x-amz-server-side-encryption-context" } }, @@ -20615,6 +20710,13 @@ "smithy.api#documentation": "

The algorithm that was used to create a checksum of the object.

", "smithy.api#httpHeader": "x-amz-checksum-algorithm" } + }, + "ChecksumType": { + "target": "com.amazonaws.s3#ChecksumType", + "traits": { + "smithy.api#documentation": "

Indicates the checksum type that you want Amazon S3 to use to calculate the object’s\n checksum value. For more information, see Checking object integrity\n in the Amazon S3 User Guide.

", + "smithy.api#httpHeader": "x-amz-checksum-type" + } } }, "traits": { @@ -20783,7 +20885,7 @@ "SSEKMSEncryptionContext": { "target": "com.amazonaws.s3#SSEKMSEncryptionContext", "traits": { - "smithy.api#documentation": "

Specifies the Amazon Web Services KMS Encryption Context to use for object encryption. The value of\n this header is a Base64-encoded string of a UTF-8 encoded JSON, which contains the encryption context as key-value pairs.

\n

\n Directory buckets - You can optionally provide an explicit encryption context value. The value must match the default encryption context - the bucket Amazon Resource Name (ARN). An additional encryption context value is not supported.

", + "smithy.api#documentation": "

Specifies the Amazon Web Services KMS Encryption Context to use for object encryption. The value of\n this header is a Base64 encoded string of a UTF-8 encoded JSON, which contains the encryption context as key-value pairs.

\n

\n Directory buckets - You can optionally provide an explicit encryption context value. The value must match the default encryption context - the bucket Amazon Resource Name (ARN). An additional encryption context value is not supported.

", "smithy.api#httpHeader": "x-amz-server-side-encryption-context" } }, @@ -20841,6 +20943,13 @@ "smithy.api#documentation": "

Indicates the algorithm that you want Amazon S3 to use to create the checksum for the object. For more information, see\n Checking object integrity in\n the Amazon S3 User Guide.

", "smithy.api#httpHeader": "x-amz-checksum-algorithm" } + }, + "ChecksumType": { + "target": "com.amazonaws.s3#ChecksumType", + "traits": { + "smithy.api#documentation": "

Indicates the checksum type that you want Amazon S3 to use to calculate the object’s\n checksum value. For more information, see Checking object integrity\n in the Amazon S3 User Guide.

", + "smithy.api#httpHeader": "x-amz-checksum-type" + } } }, "traits": { @@ -20894,7 +21003,7 @@ "SSEKMSEncryptionContext": { "target": "com.amazonaws.s3#SSEKMSEncryptionContext", "traits": { - "smithy.api#documentation": "

If present, indicates the Amazon Web Services KMS Encryption Context to use for object encryption. The value of\n this header is a Base64-encoded string of a UTF-8 encoded JSON, which contains the encryption context as key-value pairs. \n This value is stored as object metadata and automatically gets\n passed on to Amazon Web Services KMS for future GetObject \n operations on this object.

", + "smithy.api#documentation": "

If present, indicates the Amazon Web Services KMS Encryption Context to use for object encryption. The value of\n this header is a Base64 encoded string of a UTF-8 encoded JSON, which contains the encryption context as key-value pairs. \n This value is stored as object metadata and automatically gets\n passed on to Amazon Web Services KMS for future GetObject \n operations on this object.

", "smithy.api#httpHeader": "x-amz-server-side-encryption-context" } }, @@ -20957,7 +21066,7 @@ "SSEKMSEncryptionContext": { "target": "com.amazonaws.s3#SSEKMSEncryptionContext", "traits": { - "smithy.api#documentation": "

Specifies the Amazon Web Services KMS Encryption Context as an additional encryption context to use for object encryption. The value of\n this header is a Base64-encoded string of a UTF-8 encoded JSON, which contains the encryption context as key-value pairs. \n This value is stored as object metadata and automatically gets passed on\n to Amazon Web Services KMS for future GetObject operations on\n this object.

\n

\n General purpose buckets - This value must be explicitly added during CopyObject operations if you want an additional encryption context for your object. For more information, see Encryption context in the Amazon S3 User Guide.

\n

\n Directory buckets - You can optionally provide an explicit encryption context value. The value must match the default encryption context - the bucket Amazon Resource Name (ARN). An additional encryption context value is not supported.

", + "smithy.api#documentation": "

Specifies the Amazon Web Services KMS Encryption Context as an additional encryption context to use for object encryption. The value of\n this header is a Base64 encoded string of a UTF-8 encoded JSON, which contains the encryption context as key-value pairs. \n This value is stored as object metadata and automatically gets passed on\n to Amazon Web Services KMS for future GetObject operations on\n this object.

\n

\n General purpose buckets - This value must be explicitly added during CopyObject operations if you want an additional encryption context for your object. For more information, see Encryption context in the Amazon S3 User Guide.

\n

\n Directory buckets - You can optionally provide an explicit encryption context value. The value must match the default encryption context - the bucket Amazon Resource Name (ARN). An additional encryption context value is not supported.

", "smithy.api#httpHeader": "x-amz-server-side-encryption-context" } }, @@ -22312,7 +22421,7 @@ "ChecksumAlgorithm": { "target": "com.amazonaws.s3#ChecksumAlgorithm", "traits": { - "smithy.api#documentation": "

Indicates the algorithm used to create the checksum for the object when you use the SDK. This header will not provide any\n additional functionality if you don't use the SDK. When you send this header, there must be a corresponding x-amz-checksum-algorithm\n or\n x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request.

\n

For the x-amz-checksum-algorithm\n header, replace \n algorithm\n with the supported algorithm from the following list:

\n
    \n
  • \n

    \n CRC32\n

    \n
  • \n
  • \n

    \n CRC32C\n

    \n
  • \n
  • \n

    \n SHA1\n

    \n
  • \n
  • \n

    \n SHA256\n

    \n
  • \n
\n

For more\n information, see Checking object integrity in\n the Amazon S3 User Guide.

\n

If the individual checksum value you provide through x-amz-checksum-algorithm\n doesn't match the checksum algorithm you set through x-amz-sdk-checksum-algorithm, Amazon S3 ignores any provided\n ChecksumAlgorithm parameter and uses the checksum algorithm that matches the provided value in x-amz-checksum-algorithm\n .

\n

If you provide an individual checksum, Amazon S3 ignores any provided\n ChecksumAlgorithm parameter.

", + "smithy.api#documentation": "

Indicates the algorithm used to create the checksum for the object when you use the SDK. This header will not provide any\n additional functionality if you don't use the SDK. When you send this header, there must be a corresponding x-amz-checksum-algorithm\n or\n x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request.

\n

For the x-amz-checksum-algorithm\n header, replace \n algorithm\n with the supported algorithm from the following list:

\n
    \n
  • \n

    \n CRC-32\n

    \n
  • \n
  • \n

    \n CRC-32C\n

    \n
  • \n
  • \n

    \n CRC-64NVME\n

    \n
  • \n
  • \n

    \n SHA-1\n

    \n
  • \n
  • \n

    \n SHA-256\n

    \n
  • \n
\n

For more\n information, see Checking object integrity in\n the Amazon S3 User Guide.

\n

If the individual checksum value you provide through x-amz-checksum-algorithm\n doesn't match the checksum algorithm you set through x-amz-sdk-checksum-algorithm, Amazon S3 fails the request with a BadDigest error.

\n

If you provide an individual checksum, Amazon S3 ignores any provided\n ChecksumAlgorithm parameter.

", "smithy.api#httpHeader": "x-amz-sdk-checksum-algorithm" } } @@ -23543,7 +23652,7 @@ "TransitionDefaultMinimumObjectSize": { "target": "com.amazonaws.s3#TransitionDefaultMinimumObjectSize", "traits": { - "smithy.api#documentation": "

Indicates which default minimum object size behavior is applied to the lifecycle\n configuration.

\n \n

This parameter applies to general purpose buckets only. It is not supported for\n directory bucket lifecycle configurations.

\n
\n
    \n
  • \n

    \n all_storage_classes_128K - Objects smaller than 128 KB will not\n transition to any storage class by default.

    \n
  • \n
  • \n

    \n varies_by_storage_class - Objects smaller than 128 KB will\n transition to Glacier Flexible Retrieval or Glacier Deep Archive storage classes. By\n default, all other storage classes will prevent transitions smaller than 128 KB.\n

    \n
  • \n
\n

To customize the minimum object size for any transition you can add a filter that\n specifies a custom ObjectSizeGreaterThan or ObjectSizeLessThan in\n the body of your transition rule. Custom filters always take precedence over the default\n transition behavior.

", + "smithy.api#documentation": "

Indicates which default minimum object size behavior is applied to the lifecycle\n configuration.

\n \n

This parameter applies to general purpose buckets only. It isn't supported for\n directory bucket lifecycle configurations.

\n
\n
    \n
  • \n

    \n all_storage_classes_128K - Objects smaller than 128 KB will not transition to any storage class by default.

    \n
  • \n
  • \n

    \n varies_by_storage_class - Objects smaller than 128 KB will\n transition to Glacier Flexible Retrieval or Glacier Deep Archive storage classes. By\n default, all other storage classes will prevent transitions smaller than 128 KB.\n

    \n
  • \n
\n

To customize the minimum object size for any transition you can add a filter that\n specifies a custom ObjectSizeGreaterThan or ObjectSizeLessThan in\n the body of your transition rule. Custom filters always take precedence over the default\n transition behavior.

", "smithy.api#httpHeader": "x-amz-transition-default-minimum-object-size" } } @@ -24573,6 +24682,7 @@ "aws.protocols#httpChecksum": { "requestValidationModeMember": "ChecksumMode", "responseAlgorithms": [ + "CRC64NVME", "CRC32", "CRC32C", "SHA256", @@ -25202,31 +25312,45 @@ "ChecksumCRC32": { "target": "com.amazonaws.s3#ChecksumCRC32", "traits": { - "smithy.api#documentation": "

The base64-encoded, 32-bit CRC-32 checksum of the object. This will only be present if it was uploaded\n with the object. For more information, see \n Checking object integrity in the Amazon S3 User Guide.

", + "smithy.api#documentation": "

The Base64 encoded, 32-bit CRC-32 checksum of the object. This checksum is only present if the object was uploaded\n with the object. For more information, see \n Checking object integrity in the Amazon S3 User Guide.

", "smithy.api#httpHeader": "x-amz-checksum-crc32" } }, "ChecksumCRC32C": { "target": "com.amazonaws.s3#ChecksumCRC32C", "traits": { - "smithy.api#documentation": "

The base64-encoded, 32-bit CRC-32C checksum of the object. This will only be present if it was uploaded\n with the object. For more information, see \n Checking object integrity in the Amazon S3 User Guide.

", + "smithy.api#documentation": "

The Base64 encoded, 32-bit CRC-32C checksum of the object. This will only be present if the object was uploaded\n with the object. For more information, see \n Checking object integrity in the Amazon S3 User Guide.

", "smithy.api#httpHeader": "x-amz-checksum-crc32c" } }, + "ChecksumCRC64NVME": { + "target": "com.amazonaws.s3#ChecksumCRC64NVME", + "traits": { + "smithy.api#documentation": "

The Base64 encoded, 64-bit CRC-64NVME checksum of the object. For more\n information, see Checking object integrity\n in the Amazon S3 User Guide.

", + "smithy.api#httpHeader": "x-amz-checksum-crc64nvme" + } + }, "ChecksumSHA1": { "target": "com.amazonaws.s3#ChecksumSHA1", "traits": { - "smithy.api#documentation": "

The base64-encoded, 160-bit SHA-1 digest of the object. This will only be present if it was uploaded\n with the object. For more information, see \n Checking object integrity in the Amazon S3 User Guide.

", + "smithy.api#documentation": "

The Base64 encoded, 160-bit SHA-1 digest of the object. This will only be present if the object was uploaded\n with the object. For more information, see \n Checking object integrity in the Amazon S3 User Guide.

", "smithy.api#httpHeader": "x-amz-checksum-sha1" } }, "ChecksumSHA256": { "target": "com.amazonaws.s3#ChecksumSHA256", "traits": { - "smithy.api#documentation": "

The base64-encoded, 256-bit SHA-256 digest of the object. This will only be present if it was uploaded\n with the object. For more information, see \n Checking object integrity in the Amazon S3 User Guide.

", + "smithy.api#documentation": "

The Base64 encoded, 256-bit SHA-256 digest of the object. This will only be present if the object was uploaded\n with the object. For more information, see \n Checking object integrity in the Amazon S3 User Guide.

", "smithy.api#httpHeader": "x-amz-checksum-sha256" } }, + "ChecksumType": { + "target": "com.amazonaws.s3#ChecksumType", + "traits": { + "smithy.api#documentation": "

The checksum type, which determines how part-level checksums are combined to create an\n object-level checksum for multipart objects. You can use this header response to verify\n that the checksum type that is received is the same checksum type that was specified in the\n CreateMultipartUpload request. For more information, see Checking object integrity in the Amazon S3 User Guide.

", + "smithy.api#httpHeader": "x-amz-checksum-type" + } + }, "MissingMeta": { "target": "com.amazonaws.s3#MissingMeta", "traits": { @@ -26278,31 +26402,45 @@ "ChecksumCRC32": { "target": "com.amazonaws.s3#ChecksumCRC32", "traits": { - "smithy.api#documentation": "

The base64-encoded, 32-bit CRC-32 checksum of the object. This will only be present if it was uploaded\n with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

", + "smithy.api#documentation": "

The Base64 encoded, 32-bit CRC-32 checksum of the object. This checksum is only be present if the checksum was uploaded\n with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

", "smithy.api#httpHeader": "x-amz-checksum-crc32" } }, "ChecksumCRC32C": { "target": "com.amazonaws.s3#ChecksumCRC32C", "traits": { - "smithy.api#documentation": "

The base64-encoded, 32-bit CRC-32C checksum of the object. This will only be present if it was uploaded\n with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

", + "smithy.api#documentation": "

The Base64 encoded, 32-bit CRC-32C checksum of the object. This checksum is only present if the checksum was uploaded\n with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

", "smithy.api#httpHeader": "x-amz-checksum-crc32c" } }, + "ChecksumCRC64NVME": { + "target": "com.amazonaws.s3#ChecksumCRC64NVME", + "traits": { + "smithy.api#documentation": "

The Base64 encoded, 64-bit CRC-64NVME checksum of the object. For more\n information, see Checking object integrity\n in the Amazon S3 User Guide.

", + "smithy.api#httpHeader": "x-amz-checksum-crc64nvme" + } + }, "ChecksumSHA1": { "target": "com.amazonaws.s3#ChecksumSHA1", "traits": { - "smithy.api#documentation": "

The base64-encoded, 160-bit SHA-1 digest of the object. This will only be present if it was uploaded\n with the object. When you use the API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

", + "smithy.api#documentation": "

The Base64 encoded, 160-bit SHA-1 digest of the object. This will only be present if the object was uploaded\n with the object. When you use the API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

", "smithy.api#httpHeader": "x-amz-checksum-sha1" } }, "ChecksumSHA256": { "target": "com.amazonaws.s3#ChecksumSHA256", "traits": { - "smithy.api#documentation": "

The base64-encoded, 256-bit SHA-256 digest of the object. This will only be present if it was uploaded\n with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

", + "smithy.api#documentation": "

The Base64 encoded, 256-bit SHA-256 digest of the object. This will only be present if the object was uploaded\n with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

", "smithy.api#httpHeader": "x-amz-checksum-sha256" } }, + "ChecksumType": { + "target": "com.amazonaws.s3#ChecksumType", + "traits": { + "smithy.api#documentation": "

The checksum type, which determines how part-level checksums are combined to create an\n object-level checksum for multipart objects. You can use this header response to verify\n that the checksum type that is received is the same checksum type that was specified in\n CreateMultipartUpload request. For more\n information, see Checking object integrity\n in the Amazon S3 User Guide.

", + "smithy.api#httpHeader": "x-amz-checksum-type" + } + }, "ETag": { "target": "com.amazonaws.s3#ETag", "traits": { @@ -29174,6 +29312,12 @@ "traits": { "smithy.api#documentation": "

The algorithm that was used to create a checksum of the object.

" } + }, + "ChecksumType": { + "target": "com.amazonaws.s3#ChecksumType", + "traits": { + "smithy.api#documentation": "

The checksum type, which determines how part-level checksums are combined to create an\n object-level checksum for multipart objects. You can use this header response to verify\n that the checksum type that is received is the same checksum type that was specified in\n CreateMultipartUpload request. For more\n information, see Checking object integrity\n in the Amazon S3 User Guide.

" + } } }, "traits": { @@ -29639,6 +29783,9 @@ "com.amazonaws.s3#MissingMeta": { "type": "integer" }, + "com.amazonaws.s3#MpuObjectSize": { + "type": "string" + }, "com.amazonaws.s3#MultipartUpload": { "type": "structure", "members": { @@ -29683,6 +29830,12 @@ "traits": { "smithy.api#documentation": "

The algorithm that was used to create a checksum of the object.

" } + }, + "ChecksumType": { + "target": "com.amazonaws.s3#ChecksumType", + "traits": { + "smithy.api#documentation": "

The checksum type that is used to calculate the object’s\n checksum value. For more information, see Checking object integrity in the Amazon S3 User Guide.

" + } } }, "traits": { @@ -29889,6 +30042,12 @@ "smithy.api#xmlFlattened": {} } }, + "ChecksumType": { + "target": "com.amazonaws.s3#ChecksumType", + "traits": { + "smithy.api#documentation": "

The checksum type that is used to calculate the object’s\n checksum value. For more information, see Checking object integrity in the Amazon S3 User Guide.

" + } + }, "Size": { "target": "com.amazonaws.s3#Size", "traits": { @@ -30269,25 +30428,31 @@ "ChecksumCRC32": { "target": "com.amazonaws.s3#ChecksumCRC32", "traits": { - "smithy.api#documentation": "

This header can be used as a data integrity check to verify that the data received is the same data that was originally sent.\n This header specifies the base64-encoded, 32-bit CRC-32 checksum of the object. For more information, see\n Checking object integrity in the\n Amazon S3 User Guide.

" + "smithy.api#documentation": "

The Base64 encoded, 32-bit CRC-32 checksum of the part. This checksum is present\n if the multipart upload request was created with the CRC-32 checksum algorithm. For more information, see Checking object integrity in the Amazon S3 User Guide.

" } }, "ChecksumCRC32C": { "target": "com.amazonaws.s3#ChecksumCRC32C", "traits": { - "smithy.api#documentation": "

The base64-encoded, 32-bit CRC-32C checksum of the object. This will only be present if it was uploaded\n with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

" + "smithy.api#documentation": "

The Base64 encoded, 32-bit CRC-32C checksum of the part. This checksum is present\n if the multipart upload request was created with the CRC-32C checksum algorithm. For more information, see Checking object integrity in the Amazon S3 User Guide.

" + } + }, + "ChecksumCRC64NVME": { + "target": "com.amazonaws.s3#ChecksumCRC64NVME", + "traits": { + "smithy.api#documentation": "

The Base64 encoded, 64-bit CRC-64NVME checksum of the part. This checksum is present\n if the multipart upload request was created with the CRC-64NVME checksum algorithm, or if the object was uploaded without a\n checksum (and Amazon S3 added the default checksum, CRC-64NVME, to the uploaded object). For more information, see Checking object integrity in the Amazon S3 User Guide.

" } }, "ChecksumSHA1": { "target": "com.amazonaws.s3#ChecksumSHA1", "traits": { - "smithy.api#documentation": "

The base64-encoded, 160-bit SHA-1 digest of the object. This will only be present if it was uploaded\n with the object. When you use the API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

" + "smithy.api#documentation": "

The Base64 encoded, 160-bit SHA-1 checksum of the part. This checksum is present\n if the multipart upload request was created with the SHA-1 checksum algorithm. For more information, see Checking object integrity in the Amazon S3 User Guide.

" } }, "ChecksumSHA256": { "target": "com.amazonaws.s3#ChecksumSHA256", "traits": { - "smithy.api#documentation": "

The base64-encoded, 256-bit SHA-256 digest of the object. This will only be present if it was uploaded\n with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

" + "smithy.api#documentation": "

The Base64 encoded, 256-bit SHA-256 checksum of the part. This checksum is present\n if the multipart upload request was created with the SHA-256 checksum algorithm. For more information, see Checking object integrity in the Amazon S3 User Guide.

" } } }, @@ -30391,6 +30556,12 @@ "smithy.api#xmlFlattened": {} } }, + "ChecksumType": { + "target": "com.amazonaws.s3#ChecksumType", + "traits": { + "smithy.api#documentation": "

The checksum type that is used to calculate the object’s\n checksum value. For more information, see Checking object integrity in the Amazon S3 User Guide.

" + } + }, "Size": { "target": "com.amazonaws.s3#Size", "traits": { @@ -30620,25 +30791,31 @@ "ChecksumCRC32": { "target": "com.amazonaws.s3#ChecksumCRC32", "traits": { - "smithy.api#documentation": "

This header can be used as a data integrity check to verify that the data received is the same data that was originally sent.\n This header specifies the base64-encoded, 32-bit CRC-32 checksum of the object. For more information, see\n Checking object integrity in the\n Amazon S3 User Guide.

" + "smithy.api#documentation": "

The Base64 encoded, 32-bit CRC-32 checksum of the part. This checksum is present\n if the object was uploaded with the CRC-32 checksum algorithm. For more information, see Checking object integrity in the Amazon S3 User Guide.

" } }, "ChecksumCRC32C": { "target": "com.amazonaws.s3#ChecksumCRC32C", "traits": { - "smithy.api#documentation": "

The base64-encoded, 32-bit CRC-32C checksum of the object. This will only be present if it was uploaded\n with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

" + "smithy.api#documentation": "

The Base64 encoded, 32-bit CRC-32C checksum of the part. This checksum is present\n if the object was uploaded with the CRC-32C checksum algorithm. For more information, see Checking object integrity in the Amazon S3 User Guide.

" + } + }, + "ChecksumCRC64NVME": { + "target": "com.amazonaws.s3#ChecksumCRC64NVME", + "traits": { + "smithy.api#documentation": "

The Base64 encoded, 64-bit CRC-64NVME checksum of the part. This checksum is present\n if the multipart upload request was created with the CRC-64NVME checksum algorithm, or if the object was uploaded without a\n checksum (and Amazon S3 added the default checksum, CRC-64NVME, to the uploaded object). For more information, see Checking object integrity in the Amazon S3 User Guide.

" } }, "ChecksumSHA1": { "target": "com.amazonaws.s3#ChecksumSHA1", "traits": { - "smithy.api#documentation": "

The base64-encoded, 160-bit SHA-1 digest of the object. This will only be present if it was uploaded\n with the object. When you use the API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

" + "smithy.api#documentation": "

The Base64 encoded, 160-bit SHA-1 checksum of the part. This checksum is present\n if the object was uploaded with the SHA-1 checksum algorithm. For more information, see Checking object integrity in the Amazon S3 User Guide.

" } }, "ChecksumSHA256": { "target": "com.amazonaws.s3#ChecksumSHA256", "traits": { - "smithy.api#documentation": "

This header can be used as a data integrity check to verify that the data received is the same data that was originally sent.\n This header specifies the base64-encoded, 256-bit SHA-256 digest of the object. For more information, see\n Checking object integrity in the\n Amazon S3 User Guide.

" + "smithy.api#documentation": "

The Base64 encoded, 256-bit SHA-256 checksum of the part. This checksum is present\n if the object was uploaded with the SHA-256 checksum algorithm. For more information, see Checking object integrity in the Amazon S3 User Guide.

" } } }, @@ -31005,7 +31182,7 @@ "ContentMD5": { "target": "com.amazonaws.s3#ContentMD5", "traits": { - "smithy.api#documentation": "

The base64-encoded 128-bit MD5 digest of the data. This header must be used as a message\n integrity check to verify that the request body was not corrupted in transit. For more\n information, go to RFC\n 1864.\n

\n

For requests made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is calculated automatically.

", + "smithy.api#documentation": "

The Base64 encoded 128-bit MD5 digest of the data. This header must be used as a message\n integrity check to verify that the request body was not corrupted in transit. For more\n information, go to RFC\n 1864.\n

\n

For requests made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is calculated automatically.

", "smithy.api#httpHeader": "Content-MD5" } }, @@ -31223,7 +31400,7 @@ "ContentMD5": { "target": "com.amazonaws.s3#ContentMD5", "traits": { - "smithy.api#documentation": "

The base64-encoded 128-bit MD5 digest of the data. This header must be used as a message\n integrity check to verify that the request body was not corrupted in transit. For more\n information, go to RFC\n 1864.\n

\n

For requests made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is calculated automatically.

", + "smithy.api#documentation": "

The Base64 encoded 128-bit MD5 digest of the data. This header must be used as a message\n integrity check to verify that the request body was not corrupted in transit. For more\n information, go to RFC\n 1864.\n

\n

For requests made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is calculated automatically.

", "smithy.api#httpHeader": "Content-MD5" } }, @@ -31289,7 +31466,7 @@ "ContentMD5": { "target": "com.amazonaws.s3#ContentMD5", "traits": { - "smithy.api#documentation": "

The base64-encoded 128-bit MD5 digest of the server-side encryption\n configuration.

\n

For requests made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is calculated automatically.

\n \n

This functionality is not supported for directory buckets.

\n
", + "smithy.api#documentation": "

The Base64 encoded 128-bit MD5 digest of the server-side encryption\n configuration.

\n

For requests made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is calculated automatically.

\n \n

This functionality is not supported for directory buckets.

\n
", "smithy.api#httpHeader": "Content-MD5" } }, @@ -31934,7 +32111,7 @@ "ChecksumAlgorithm": { "target": "com.amazonaws.s3#ChecksumAlgorithm", "traits": { - "smithy.api#documentation": "

Indicates the algorithm used to create the checksum for the object when you use the SDK. This header will not provide any\n additional functionality if you don't use the SDK. When you send this header, there must be a corresponding x-amz-checksum-algorithm\n or\n x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request.

\n

For the x-amz-checksum-algorithm\n header, replace \n algorithm\n with the supported algorithm from the following list:

\n
    \n
  • \n

    \n CRC32\n

    \n
  • \n
  • \n

    \n CRC32C\n

    \n
  • \n
  • \n

    \n SHA1\n

    \n
  • \n
  • \n

    \n SHA256\n

    \n
  • \n
\n

For more\n information, see Checking object integrity in\n the Amazon S3 User Guide.

\n

If the individual checksum value you provide through x-amz-checksum-algorithm\n doesn't match the checksum algorithm you set through x-amz-sdk-checksum-algorithm, Amazon S3 ignores any provided\n ChecksumAlgorithm parameter and uses the checksum algorithm that matches the provided value in x-amz-checksum-algorithm\n .

\n \n

For directory buckets, when you use Amazon Web Services SDKs, CRC32 is the default checksum algorithm that's used for performance.

\n
", + "smithy.api#documentation": "

Indicates the algorithm used to create the checksum for the object when you use the SDK. This header will not provide any\n additional functionality if you don't use the SDK. When you send this header, there must be a corresponding x-amz-checksum-algorithm\n or\n x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request.

\n

For the x-amz-checksum-algorithm\n header, replace \n algorithm\n with the supported algorithm from the following list:

\n
    \n
  • \n

    \n CRC-32\n

    \n
  • \n
  • \n

    \n CRC-32C\n

    \n
  • \n
  • \n

    \n CRC-64NVME\n

    \n
  • \n
  • \n

    \n SHA-1\n

    \n
  • \n
  • \n

    \n SHA-256\n

    \n
  • \n
\n

For more\n information, see Checking object integrity in\n the Amazon S3 User Guide.

\n

If the individual checksum value you provide through x-amz-checksum-algorithm\n doesn't match the checksum algorithm you set through x-amz-sdk-checksum-algorithm, Amazon S3 fails the request with a BadDigest error.

\n \n

For directory buckets, when you use Amazon Web Services SDKs, CRC32 is the default checksum algorithm that's used for performance.

\n
", "smithy.api#httpHeader": "x-amz-sdk-checksum-algorithm" } }, @@ -32030,7 +32207,7 @@ "ContentMD5": { "target": "com.amazonaws.s3#ContentMD5", "traits": { - "smithy.api#documentation": "

The base64-encoded 128-bit MD5 digest of the data. You must use this header as a message\n integrity check to verify that the request body was not corrupted in transit. For more\n information, see RFC 1864.

\n

For requests made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is calculated automatically.

", + "smithy.api#documentation": "

The Base64 encoded 128-bit MD5 digest of the data. You must use this header as a message\n integrity check to verify that the request body was not corrupted in transit. For more\n information, see RFC 1864.

\n

For requests made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is calculated automatically.

", "smithy.api#httpHeader": "Content-MD5" } }, @@ -32123,7 +32300,7 @@ "ContentMD5": { "target": "com.amazonaws.s3#ContentMD5", "traits": { - "smithy.api#documentation": "

The base64-encoded 128-bit MD5 digest of the data. You must use this header as a message\n integrity check to verify that the request body was not corrupted in transit. For more\n information, see RFC 1864.

\n

For requests made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is calculated automatically.

", + "smithy.api#documentation": "

The Base64 encoded 128-bit MD5 digest of the data. You must use this header as a message\n integrity check to verify that the request body was not corrupted in transit. For more\n information, see RFC 1864.

\n

For requests made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is calculated automatically.

", "smithy.api#httpHeader": "Content-MD5" } }, @@ -32219,7 +32396,7 @@ "ContentMD5": { "target": "com.amazonaws.s3#ContentMD5", "traits": { - "smithy.api#documentation": "

The base64-encoded 128-bit MD5 digest of the data. You must use this header as a message\n integrity check to verify that the request body was not corrupted in transit. For more\n information, see RFC 1864.

\n

For requests made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is calculated automatically.

", + "smithy.api#documentation": "

The Base64 encoded 128-bit MD5 digest of the data. You must use this header as a message\n integrity check to verify that the request body was not corrupted in transit. For more\n information, see RFC 1864.

\n

For requests made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is calculated automatically.

", "smithy.api#httpHeader": "Content-MD5" } }, @@ -32264,7 +32441,7 @@ "requestAlgorithmMember": "ChecksumAlgorithm", "requestChecksumRequired": true }, - "smithy.api#documentation": "\n

This operation is not supported for directory buckets.

\n
\n \n

When you enable versioning on a bucket for the first time, it might take a short\n amount of time for the change to be fully propagated. While this change is propagating,\n you may encounter intermittent HTTP 404 NoSuchKey errors for requests to\n objects created or updated after enabling versioning. We recommend that you wait for 15\n minutes after enabling versioning before issuing write operations (PUT or\n DELETE) on objects in the bucket.

\n
\n

Sets the versioning state of an existing bucket.

\n

You can set the versioning state with one of the following values:

\n

\n Enabled—Enables versioning for the objects in the\n bucket. All objects added to the bucket receive a unique version ID.

\n

\n Suspended—Disables versioning for the objects in the\n bucket. All objects added to the bucket receive the version ID null.

\n

If the versioning state has never been set on a bucket, it has no versioning state; a\n GetBucketVersioning request does not return a versioning state value.

\n

In order to enable MFA Delete, you must be the bucket owner. If you are the bucket owner\n and want to enable MFA Delete in the bucket versioning configuration, you must include the\n x-amz-mfa request header and the Status and the\n MfaDelete request elements in a request to set the versioning state of the\n bucket.

\n \n

If you have an object expiration lifecycle configuration in your non-versioned bucket\n and you want to maintain the same permanent delete behavior when you enable versioning,\n you must add a noncurrent expiration policy. The noncurrent expiration lifecycle\n configuration will manage the deletes of the noncurrent object versions in the\n version-enabled bucket. (A version-enabled bucket maintains one current and zero or more\n noncurrent object versions.) For more information, see Lifecycle and Versioning.

\n
\n

The following operations are related to PutBucketVersioning:

\n ", + "smithy.api#documentation": "\n

This operation is not supported for directory buckets.

\n
\n \n

When you enable versioning on a bucket for the first time, it might take a short\n amount of time for the change to be fully propagated. While this change is propagating,\n you might encounter intermittent HTTP 404 NoSuchKey errors for requests to\n objects created or updated after enabling versioning. We recommend that you wait for 15\n minutes after enabling versioning before issuing write operations (PUT or\n DELETE) on objects in the bucket.

\n
\n

Sets the versioning state of an existing bucket.

\n

You can set the versioning state with one of the following values:

\n

\n Enabled—Enables versioning for the objects in the\n bucket. All objects added to the bucket receive a unique version ID.

\n

\n Suspended—Disables versioning for the objects in the\n bucket. All objects added to the bucket receive the version ID null.

\n

If the versioning state has never been set on a bucket, it has no versioning state; a\n GetBucketVersioning request does not return a versioning state value.

\n

In order to enable MFA Delete, you must be the bucket owner. If you are the bucket owner\n and want to enable MFA Delete in the bucket versioning configuration, you must include the\n x-amz-mfa request header and the Status and the\n MfaDelete request elements in a request to set the versioning state of the\n bucket.

\n \n

If you have an object expiration lifecycle configuration in your non-versioned bucket\n and you want to maintain the same permanent delete behavior when you enable versioning,\n you must add a noncurrent expiration policy. The noncurrent expiration lifecycle\n configuration will manage the deletes of the noncurrent object versions in the\n version-enabled bucket. (A version-enabled bucket maintains one current and zero or more\n noncurrent object versions.) For more information, see Lifecycle and Versioning.

\n
\n

The following operations are related to PutBucketVersioning:

\n ", "smithy.api#examples": [ { "title": "Set versioning configuration on a bucket", @@ -32307,7 +32484,7 @@ "ContentMD5": { "target": "com.amazonaws.s3#ContentMD5", "traits": { - "smithy.api#documentation": "

>The base64-encoded 128-bit MD5 digest of the data. You must use this header as a\n message integrity check to verify that the request body was not corrupted in transit. For\n more information, see RFC\n 1864.

\n

For requests made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is calculated automatically.

", + "smithy.api#documentation": "

>The Base64 encoded 128-bit MD5 digest of the data. You must use this header as a\n message integrity check to verify that the request body was not corrupted in transit. For\n more information, see RFC\n 1864.

\n

For requests made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is calculated automatically.

", "smithy.api#httpHeader": "Content-MD5" } }, @@ -32407,7 +32584,7 @@ "ContentMD5": { "target": "com.amazonaws.s3#ContentMD5", "traits": { - "smithy.api#documentation": "

The base64-encoded 128-bit MD5 digest of the data. You must use this header as a message\n integrity check to verify that the request body was not corrupted in transit. For more\n information, see RFC 1864.

\n

For requests made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is calculated automatically.

", + "smithy.api#documentation": "

The Base64 encoded 128-bit MD5 digest of the data. You must use this header as a message\n integrity check to verify that the request body was not corrupted in transit. For more\n information, see RFC 1864.

\n

For requests made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is calculated automatically.

", "smithy.api#httpHeader": "Content-MD5" } }, @@ -32664,7 +32841,7 @@ "ContentMD5": { "target": "com.amazonaws.s3#ContentMD5", "traits": { - "smithy.api#documentation": "

The base64-encoded 128-bit MD5 digest of the data. This header must be used as a message\n integrity check to verify that the request body was not corrupted in transit. For more\n information, go to RFC\n 1864.>\n

\n

For requests made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is calculated automatically.

", + "smithy.api#documentation": "

The Base64 encoded 128-bit MD5 digest of the data. This header must be used as a message\n integrity check to verify that the request body was not corrupted in transit. For more\n information, go to RFC\n 1864.>\n

\n

For requests made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is calculated automatically.

", "smithy.api#httpHeader": "Content-MD5" } }, @@ -32966,31 +33143,45 @@ "ChecksumCRC32": { "target": "com.amazonaws.s3#ChecksumCRC32", "traits": { - "smithy.api#documentation": "

The base64-encoded, 32-bit CRC-32 checksum of the object. This will only be present if it was uploaded\n with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

", + "smithy.api#documentation": "

The Base64 encoded, 32-bit CRC-32 checksum of the object. This checksum is only be present if the checksum was uploaded\n with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

", "smithy.api#httpHeader": "x-amz-checksum-crc32" } }, "ChecksumCRC32C": { "target": "com.amazonaws.s3#ChecksumCRC32C", "traits": { - "smithy.api#documentation": "

The base64-encoded, 32-bit CRC-32C checksum of the object. This will only be present if it was uploaded\n with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

", + "smithy.api#documentation": "

The Base64 encoded, 32-bit CRC-32C checksum of the object. This checksum is only present if the checksum was uploaded\n with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

", "smithy.api#httpHeader": "x-amz-checksum-crc32c" } }, + "ChecksumCRC64NVME": { + "target": "com.amazonaws.s3#ChecksumCRC64NVME", + "traits": { + "smithy.api#documentation": "

The Base64 encoded, 64-bit CRC-64NVME checksum of the object. This header\n is present if the object was uploaded with the CRC-64NVME checksum algorithm, or if it\n was uploaded without a checksum (and Amazon S3 added the default checksum,\n CRC-64NVME, to the uploaded object). For more information about how\n checksums are calculated with multipart uploads, see Checking object integrity\n in the Amazon S3 User Guide.

", + "smithy.api#httpHeader": "x-amz-checksum-crc64nvme" + } + }, "ChecksumSHA1": { "target": "com.amazonaws.s3#ChecksumSHA1", "traits": { - "smithy.api#documentation": "

The base64-encoded, 160-bit SHA-1 digest of the object. This will only be present if it was uploaded\n with the object. When you use the API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

", + "smithy.api#documentation": "

The Base64 encoded, 160-bit SHA-1 digest of the object. This will only be present if the object was uploaded\n with the object. When you use the API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

", "smithy.api#httpHeader": "x-amz-checksum-sha1" } }, "ChecksumSHA256": { "target": "com.amazonaws.s3#ChecksumSHA256", "traits": { - "smithy.api#documentation": "

The base64-encoded, 256-bit SHA-256 digest of the object. This will only be present if it was uploaded\n with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

", + "smithy.api#documentation": "

The Base64 encoded, 256-bit SHA-256 digest of the object. This will only be present if the object was uploaded\n with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

", "smithy.api#httpHeader": "x-amz-checksum-sha256" } }, + "ChecksumType": { + "target": "com.amazonaws.s3#ChecksumType", + "traits": { + "smithy.api#documentation": "

This header specifies the checksum type of the object, which determines how part-level\n checksums are combined to create an object-level checksum for multipart objects. For\n PutObject uploads, the checksum type is always FULL_OBJECT. You can use this header as a\n data integrity check to verify that the checksum type that is received is the same checksum\n that was specified. For more information, see Checking object integrity in the Amazon S3 User Guide.

", + "smithy.api#httpHeader": "x-amz-checksum-type" + } + }, "ServerSideEncryption": { "target": "com.amazonaws.s3#ServerSideEncryption", "traits": { @@ -33029,7 +33220,7 @@ "SSEKMSEncryptionContext": { "target": "com.amazonaws.s3#SSEKMSEncryptionContext", "traits": { - "smithy.api#documentation": "

If present, indicates the Amazon Web Services KMS Encryption Context to use for object encryption. The value of\n this header is a Base64-encoded string of a UTF-8 encoded JSON, which contains the encryption context as key-value pairs. \n This value is stored as object metadata and automatically gets\n passed on to Amazon Web Services KMS for future GetObject \n operations on this object.

", + "smithy.api#documentation": "

If present, indicates the Amazon Web Services KMS Encryption Context to use for object encryption. The value of\n this header is a Base64 encoded string of a UTF-8 encoded JSON, which contains the encryption context as key-value pairs. \n This value is stored as object metadata and automatically gets\n passed on to Amazon Web Services KMS for future GetObject \n operations on this object.

", "smithy.api#httpHeader": "x-amz-server-side-encryption-context" } }, @@ -33043,7 +33234,7 @@ "Size": { "target": "com.amazonaws.s3#Size", "traits": { - "smithy.api#documentation": "

\n The size of the object in bytes. This will only be present if you append to an object.\n

\n \n

This functionality is only supported for objects in the Amazon S3 Express One Zone storage class in directory buckets.

\n
", + "smithy.api#documentation": "

\n The size of the object in bytes. This value is only be present if you append to an object.\n

\n \n

This functionality is only supported for objects in the Amazon S3 Express One Zone storage class in directory buckets.

\n
", "smithy.api#httpHeader": "x-amz-object-size" } }, @@ -33125,7 +33316,7 @@ "ContentMD5": { "target": "com.amazonaws.s3#ContentMD5", "traits": { - "smithy.api#documentation": "

The base64-encoded 128-bit MD5 digest of the message (without the headers) according to\n RFC 1864. This header can be used as a message integrity check to verify that the data is\n the same data that was originally sent. Although it is optional, we recommend using the\n Content-MD5 mechanism as an end-to-end integrity check. For more information about REST\n request authentication, see REST Authentication.

\n \n

The Content-MD5 or x-amz-sdk-checksum-algorithm header is\n required for any request to upload an object with a retention period configured using\n Amazon S3 Object Lock. For more information, see Uploading objects to an Object Lock enabled bucket in the\n Amazon S3 User Guide.

\n
\n \n

This functionality is not supported for directory buckets.

\n
", + "smithy.api#documentation": "

The Base64 encoded 128-bit MD5 digest of the message (without the headers) according to\n RFC 1864. This header can be used as a message integrity check to verify that the data is\n the same data that was originally sent. Although it is optional, we recommend using the\n Content-MD5 mechanism as an end-to-end integrity check. For more information about REST\n request authentication, see REST Authentication.

\n \n

The Content-MD5 or x-amz-sdk-checksum-algorithm header is\n required for any request to upload an object with a retention period configured using\n Amazon S3 Object Lock. For more information, see Uploading objects to an Object Lock enabled bucket in the\n Amazon S3 User Guide.

\n
\n \n

This functionality is not supported for directory buckets.

\n
", "smithy.api#httpHeader": "Content-MD5" } }, @@ -33139,35 +33330,42 @@ "ChecksumAlgorithm": { "target": "com.amazonaws.s3#ChecksumAlgorithm", "traits": { - "smithy.api#documentation": "

Indicates the algorithm used to create the checksum for the object when you use the SDK. This header will not provide any\n additional functionality if you don't use the SDK. When you send this header, there must be a corresponding x-amz-checksum-algorithm\n or\n x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request.

\n

For the x-amz-checksum-algorithm\n header, replace \n algorithm\n with the supported algorithm from the following list:

\n
    \n
  • \n

    \n CRC32\n

    \n
  • \n
  • \n

    \n CRC32C\n

    \n
  • \n
  • \n

    \n SHA1\n

    \n
  • \n
  • \n

    \n SHA256\n

    \n
  • \n
\n

For more\n information, see Checking object integrity in\n the Amazon S3 User Guide.

\n

If the individual checksum value you provide through x-amz-checksum-algorithm\n doesn't match the checksum algorithm you set through x-amz-sdk-checksum-algorithm, Amazon S3 ignores any provided\n ChecksumAlgorithm parameter and uses the checksum algorithm that matches the provided value in x-amz-checksum-algorithm\n .

\n \n

The Content-MD5 or x-amz-sdk-checksum-algorithm header is\n required for any request to upload an object with a retention period configured using\n Amazon S3 Object Lock. For more information, see Uploading objects to an Object Lock enabled bucket in the\n Amazon S3 User Guide.

\n
\n

For directory buckets, when you use Amazon Web Services SDKs, CRC32 is the default checksum algorithm that's used for performance.

", + "smithy.api#documentation": "

Indicates the algorithm used to create the checksum for the object when you use the SDK. This header will not provide any\n additional functionality if you don't use the SDK. When you send this header, there must be a corresponding x-amz-checksum-algorithm\n or\n x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request.

\n

For the x-amz-checksum-algorithm\n header, replace \n algorithm\n with the supported algorithm from the following list:

\n
    \n
  • \n

    \n CRC-32\n

    \n
  • \n
  • \n

    \n CRC-32C\n

    \n
  • \n
  • \n

    \n CRC-64NVME\n

    \n
  • \n
  • \n

    \n SHA-1\n

    \n
  • \n
  • \n

    \n SHA-256\n

    \n
  • \n
\n

For more\n information, see Checking object integrity in\n the Amazon S3 User Guide.

\n

If the individual checksum value you provide through x-amz-checksum-algorithm\n doesn't match the checksum algorithm you set through x-amz-sdk-checksum-algorithm, Amazon S3 fails the request with a BadDigest error.

\n \n

The Content-MD5 or x-amz-sdk-checksum-algorithm header is\n required for any request to upload an object with a retention period configured using\n Amazon S3 Object Lock. For more information, see Uploading objects to an Object Lock enabled bucket in the\n Amazon S3 User Guide.

\n
\n

For directory buckets, when you use Amazon Web Services SDKs, CRC32 is the default checksum algorithm that's used for performance.

", "smithy.api#httpHeader": "x-amz-sdk-checksum-algorithm" } }, "ChecksumCRC32": { "target": "com.amazonaws.s3#ChecksumCRC32", "traits": { - "smithy.api#documentation": "

This header can be used as a data integrity check to verify that the data received is the same data that was originally sent.\n This header specifies the base64-encoded, 32-bit CRC-32 checksum of the object. For more information, see\n Checking object integrity in the\n Amazon S3 User Guide.

", + "smithy.api#documentation": "

This header can be used as a data integrity check to verify that the data received is the same data that was originally sent.\n This header specifies the Base64 encoded, 32-bit CRC-32 checksum of the object. For more information, see\n Checking object integrity in the\n Amazon S3 User Guide.

", "smithy.api#httpHeader": "x-amz-checksum-crc32" } }, "ChecksumCRC32C": { "target": "com.amazonaws.s3#ChecksumCRC32C", "traits": { - "smithy.api#documentation": "

This header can be used as a data integrity check to verify that the data received is the same data that was originally sent.\n This header specifies the base64-encoded, 32-bit CRC-32C checksum of the object. For more information, see\n Checking object integrity in the\n Amazon S3 User Guide.

", + "smithy.api#documentation": "

This header can be used as a data integrity check to verify that the data received is the same data that was originally sent.\n This header specifies the Base64 encoded, 32-bit CRC-32C checksum of the object. For more information, see\n Checking object integrity in the\n Amazon S3 User Guide.

", "smithy.api#httpHeader": "x-amz-checksum-crc32c" } }, + "ChecksumCRC64NVME": { + "target": "com.amazonaws.s3#ChecksumCRC64NVME", + "traits": { + "smithy.api#documentation": "

This header can be used as a data integrity check to verify that the data received is\n the same data that was originally sent. This header specifies the Base64 encoded, 64-bit\n CRC-64NVME checksum of the object. The CRC-64NVME checksum is\n always a full object checksum. For more information, see Checking object integrity\n in the Amazon S3 User Guide.

", + "smithy.api#httpHeader": "x-amz-checksum-crc64nvme" + } + }, "ChecksumSHA1": { "target": "com.amazonaws.s3#ChecksumSHA1", "traits": { - "smithy.api#documentation": "

This header can be used as a data integrity check to verify that the data received is the same data that was originally sent.\n This header specifies the base64-encoded, 160-bit SHA-1 digest of the object. For more information, see\n Checking object integrity in the\n Amazon S3 User Guide.

", + "smithy.api#documentation": "

This header can be used as a data integrity check to verify that the data received is the same data that was originally sent.\n This header specifies the Base64 encoded, 160-bit SHA-1 digest of the object. For more information, see\n Checking object integrity in the\n Amazon S3 User Guide.

", "smithy.api#httpHeader": "x-amz-checksum-sha1" } }, "ChecksumSHA256": { "target": "com.amazonaws.s3#ChecksumSHA256", "traits": { - "smithy.api#documentation": "

This header can be used as a data integrity check to verify that the data received is the same data that was originally sent.\n This header specifies the base64-encoded, 256-bit SHA-256 digest of the object. For more information, see\n Checking object integrity in the\n Amazon S3 User Guide.

", + "smithy.api#documentation": "

This header can be used as a data integrity check to verify that the data received is the same data that was originally sent.\n This header specifies the Base64 encoded, 256-bit SHA-256 digest of the object. For more information, see\n Checking object integrity in the\n Amazon S3 User Guide.

", "smithy.api#httpHeader": "x-amz-checksum-sha256" } }, @@ -33297,7 +33495,7 @@ "SSEKMSEncryptionContext": { "target": "com.amazonaws.s3#SSEKMSEncryptionContext", "traits": { - "smithy.api#documentation": "

Specifies the Amazon Web Services KMS Encryption Context as an additional encryption context to use for object encryption. The value of\n this header is a Base64-encoded string of a UTF-8 encoded JSON, which contains the encryption context as key-value pairs. \n This value is stored as object metadata and automatically gets passed on\n to Amazon Web Services KMS for future GetObject operations on\n this object.

\n

\n General purpose buckets - This value must be explicitly added during CopyObject operations if you want an additional encryption context for your object. For more information, see Encryption context in the Amazon S3 User Guide.

\n

\n Directory buckets - You can optionally provide an explicit encryption context value. The value must match the default encryption context - the bucket Amazon Resource Name (ARN). An additional encryption context value is not supported.

", + "smithy.api#documentation": "

Specifies the Amazon Web Services KMS Encryption Context as an additional encryption context to use for object encryption. The value of\n this header is a Base64 encoded string of a UTF-8 encoded JSON, which contains the encryption context as key-value pairs. \n This value is stored as object metadata and automatically gets passed on\n to Amazon Web Services KMS for future GetObject operations on\n this object.

\n

\n General purpose buckets - This value must be explicitly added during CopyObject operations if you want an additional encryption context for your object. For more information, see Encryption context in the Amazon S3 User Guide.

\n

\n Directory buckets - You can optionally provide an explicit encryption context value. The value must match the default encryption context - the bucket Amazon Resource Name (ARN). An additional encryption context value is not supported.

", "smithy.api#httpHeader": "x-amz-server-side-encryption-context" } }, @@ -35925,28 +36123,35 @@ "ChecksumCRC32": { "target": "com.amazonaws.s3#ChecksumCRC32", "traits": { - "smithy.api#documentation": "

The base64-encoded, 32-bit CRC-32 checksum of the object. This will only be present if it was uploaded\n with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

", + "smithy.api#documentation": "

The Base64 encoded, 32-bit CRC-32 checksum of the object. This checksum is only be present if the checksum was uploaded\n with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

", "smithy.api#httpHeader": "x-amz-checksum-crc32" } }, "ChecksumCRC32C": { "target": "com.amazonaws.s3#ChecksumCRC32C", "traits": { - "smithy.api#documentation": "

The base64-encoded, 32-bit CRC-32C checksum of the object. This will only be present if it was uploaded\n with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

", + "smithy.api#documentation": "

The Base64 encoded, 32-bit CRC-32C checksum of the object. This checksum is only present if the checksum was uploaded\n with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

", "smithy.api#httpHeader": "x-amz-checksum-crc32c" } }, + "ChecksumCRC64NVME": { + "target": "com.amazonaws.s3#ChecksumCRC64NVME", + "traits": { + "smithy.api#documentation": "

This header can be used as a data integrity check to verify that the data received is\n the same data that was originally sent. This header specifies the Base64 encoded, 64-bit\n CRC-64NVME checksum of the part. For more information, see Checking object integrity in the Amazon S3 User Guide.

", + "smithy.api#httpHeader": "x-amz-checksum-crc64nvme" + } + }, "ChecksumSHA1": { "target": "com.amazonaws.s3#ChecksumSHA1", "traits": { - "smithy.api#documentation": "

The base64-encoded, 160-bit SHA-1 digest of the object. This will only be present if it was uploaded\n with the object. When you use the API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

", + "smithy.api#documentation": "

The Base64 encoded, 160-bit SHA-1 digest of the object. This will only be present if the object was uploaded\n with the object. When you use the API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

", "smithy.api#httpHeader": "x-amz-checksum-sha1" } }, "ChecksumSHA256": { "target": "com.amazonaws.s3#ChecksumSHA256", "traits": { - "smithy.api#documentation": "

The base64-encoded, 256-bit SHA-256 digest of the object. This will only be present if it was uploaded\n with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

", + "smithy.api#documentation": "

The Base64 encoded, 256-bit SHA-256 digest of the object. This will only be present if the object was uploaded\n with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

", "smithy.api#httpHeader": "x-amz-checksum-sha256" } }, @@ -36021,7 +36226,7 @@ "ContentMD5": { "target": "com.amazonaws.s3#ContentMD5", "traits": { - "smithy.api#documentation": "

The base64-encoded 128-bit MD5 digest of the part data. This parameter is auto-populated\n when using the command from the CLI. This parameter is required if object lock parameters\n are specified.

\n \n

This functionality is not supported for directory buckets.

\n
", + "smithy.api#documentation": "

The Base64 encoded 128-bit MD5 digest of the part data. This parameter is auto-populated\n when using the command from the CLI. This parameter is required if object lock parameters\n are specified.

\n \n

This functionality is not supported for directory buckets.

\n
", "smithy.api#httpHeader": "Content-MD5" } }, @@ -36035,28 +36240,35 @@ "ChecksumCRC32": { "target": "com.amazonaws.s3#ChecksumCRC32", "traits": { - "smithy.api#documentation": "

This header can be used as a data integrity check to verify that the data received is the same data that was originally sent.\n This header specifies the base64-encoded, 32-bit CRC-32 checksum of the object. For more information, see\n Checking object integrity in the\n Amazon S3 User Guide.

", + "smithy.api#documentation": "

This header can be used as a data integrity check to verify that the data received is the same data that was originally sent.\n This header specifies the Base64 encoded, 32-bit CRC-32 checksum of the object. For more information, see\n Checking object integrity in the\n Amazon S3 User Guide.

", "smithy.api#httpHeader": "x-amz-checksum-crc32" } }, "ChecksumCRC32C": { "target": "com.amazonaws.s3#ChecksumCRC32C", "traits": { - "smithy.api#documentation": "

This header can be used as a data integrity check to verify that the data received is the same data that was originally sent.\n This header specifies the base64-encoded, 32-bit CRC-32C checksum of the object. For more information, see\n Checking object integrity in the\n Amazon S3 User Guide.

", + "smithy.api#documentation": "

This header can be used as a data integrity check to verify that the data received is the same data that was originally sent.\n This header specifies the Base64 encoded, 32-bit CRC-32C checksum of the object. For more information, see\n Checking object integrity in the\n Amazon S3 User Guide.

", "smithy.api#httpHeader": "x-amz-checksum-crc32c" } }, + "ChecksumCRC64NVME": { + "target": "com.amazonaws.s3#ChecksumCRC64NVME", + "traits": { + "smithy.api#documentation": "

This header can be used as a data integrity check to verify that the data received is\n the same data that was originally sent. This header specifies the Base64 encoded, 64-bit\n CRC-64NVME checksum of the part. For more information, see Checking object integrity in the Amazon S3 User Guide.

", + "smithy.api#httpHeader": "x-amz-checksum-crc64nvme" + } + }, "ChecksumSHA1": { "target": "com.amazonaws.s3#ChecksumSHA1", "traits": { - "smithy.api#documentation": "

This header can be used as a data integrity check to verify that the data received is the same data that was originally sent.\n This header specifies the base64-encoded, 160-bit SHA-1 digest of the object. For more information, see\n Checking object integrity in the\n Amazon S3 User Guide.

", + "smithy.api#documentation": "

This header can be used as a data integrity check to verify that the data received is the same data that was originally sent.\n This header specifies the Base64 encoded, 160-bit SHA-1 digest of the object. For more information, see\n Checking object integrity in the\n Amazon S3 User Guide.

", "smithy.api#httpHeader": "x-amz-checksum-sha1" } }, "ChecksumSHA256": { "target": "com.amazonaws.s3#ChecksumSHA256", "traits": { - "smithy.api#documentation": "

This header can be used as a data integrity check to verify that the data received is the same data that was originally sent.\n This header specifies the base64-encoded, 256-bit SHA-256 digest of the object. For more information, see\n Checking object integrity in the\n Amazon S3 User Guide.

", + "smithy.api#documentation": "

This header can be used as a data integrity check to verify that the data received is the same data that was originally sent.\n This header specifies the Base64 encoded, 256-bit SHA-256 digest of the object. For more information, see\n Checking object integrity in the\n Amazon S3 User Guide.

", "smithy.api#httpHeader": "x-amz-checksum-sha256" } }, @@ -36337,28 +36549,35 @@ "ChecksumCRC32": { "target": "com.amazonaws.s3#ChecksumCRC32", "traits": { - "smithy.api#documentation": "

This header can be used as a data integrity check to verify that the data received is\n the same data that was originally sent. This specifies the base64-encoded, 32-bit CRC-32\n checksum of the object returned by the Object Lambda function. This may not match the\n checksum for the object stored in Amazon S3. Amazon S3 will perform validation of the checksum values\n only when the original GetObject request required checksum validation. For\n more information about checksums, see Checking object\n integrity in the Amazon S3 User Guide.

\n

Only one checksum header can be specified at a time. If you supply multiple checksum\n headers, this request will fail.

\n

", + "smithy.api#documentation": "

This header can be used as a data integrity check to verify that the data received is\n the same data that was originally sent. This specifies the Base64 encoded, 32-bit CRC-32\n checksum of the object returned by the Object Lambda function. This may not match the\n checksum for the object stored in Amazon S3. Amazon S3 will perform validation of the checksum values\n only when the original GetObject request required checksum validation. For\n more information about checksums, see Checking object\n integrity in the Amazon S3 User Guide.

\n

Only one checksum header can be specified at a time. If you supply multiple checksum\n headers, this request will fail.

\n

", "smithy.api#httpHeader": "x-amz-fwd-header-x-amz-checksum-crc32" } }, "ChecksumCRC32C": { "target": "com.amazonaws.s3#ChecksumCRC32C", "traits": { - "smithy.api#documentation": "

This header can be used as a data integrity check to verify that the data received is\n the same data that was originally sent. This specifies the base64-encoded, 32-bit CRC-32C\n checksum of the object returned by the Object Lambda function. This may not match the\n checksum for the object stored in Amazon S3. Amazon S3 will perform validation of the checksum values\n only when the original GetObject request required checksum validation. For\n more information about checksums, see Checking object\n integrity in the Amazon S3 User Guide.

\n

Only one checksum header can be specified at a time. If you supply multiple checksum\n headers, this request will fail.

", + "smithy.api#documentation": "

This header can be used as a data integrity check to verify that the data received is\n the same data that was originally sent. This specifies the Base64 encoded, 32-bit CRC-32C\n checksum of the object returned by the Object Lambda function. This may not match the\n checksum for the object stored in Amazon S3. Amazon S3 will perform validation of the checksum values\n only when the original GetObject request required checksum validation. For\n more information about checksums, see Checking object\n integrity in the Amazon S3 User Guide.

\n

Only one checksum header can be specified at a time. If you supply multiple checksum\n headers, this request will fail.

", "smithy.api#httpHeader": "x-amz-fwd-header-x-amz-checksum-crc32c" } }, + "ChecksumCRC64NVME": { + "target": "com.amazonaws.s3#ChecksumCRC64NVME", + "traits": { + "smithy.api#documentation": "

This header can be used as a data integrity check to verify that the data received is\n the same data that was originally sent. This header specifies the Base64 encoded, 64-bit\n CRC-64NVME checksum of the part. For more information, see Checking object integrity in the Amazon S3 User Guide.

", + "smithy.api#httpHeader": "x-amz-fwd-header-x-amz-checksum-crc64nvme" + } + }, "ChecksumSHA1": { "target": "com.amazonaws.s3#ChecksumSHA1", "traits": { - "smithy.api#documentation": "

This header can be used as a data integrity check to verify that the data received is\n the same data that was originally sent. This specifies the base64-encoded, 160-bit SHA-1\n digest of the object returned by the Object Lambda function. This may not match the\n checksum for the object stored in Amazon S3. Amazon S3 will perform validation of the checksum values\n only when the original GetObject request required checksum validation. For\n more information about checksums, see Checking object\n integrity in the Amazon S3 User Guide.

\n

Only one checksum header can be specified at a time. If you supply multiple checksum\n headers, this request will fail.

", + "smithy.api#documentation": "

This header can be used as a data integrity check to verify that the data received is\n the same data that was originally sent. This specifies the Base64 encoded, 160-bit SHA-1\n digest of the object returned by the Object Lambda function. This may not match the\n checksum for the object stored in Amazon S3. Amazon S3 will perform validation of the checksum values\n only when the original GetObject request required checksum validation. For\n more information about checksums, see Checking object\n integrity in the Amazon S3 User Guide.

\n

Only one checksum header can be specified at a time. If you supply multiple checksum\n headers, this request will fail.

", "smithy.api#httpHeader": "x-amz-fwd-header-x-amz-checksum-sha1" } }, "ChecksumSHA256": { "target": "com.amazonaws.s3#ChecksumSHA256", "traits": { - "smithy.api#documentation": "

This header can be used as a data integrity check to verify that the data received is\n the same data that was originally sent. This specifies the base64-encoded, 256-bit SHA-256\n digest of the object returned by the Object Lambda function. This may not match the\n checksum for the object stored in Amazon S3. Amazon S3 will perform validation of the checksum values\n only when the original GetObject request required checksum validation. For\n more information about checksums, see Checking object\n integrity in the Amazon S3 User Guide.

\n

Only one checksum header can be specified at a time. If you supply multiple checksum\n headers, this request will fail.

", + "smithy.api#documentation": "

This header can be used as a data integrity check to verify that the data received is\n the same data that was originally sent. This specifies the Base64 encoded, 256-bit SHA-256\n digest of the object returned by the Object Lambda function. This may not match the\n checksum for the object stored in Amazon S3. Amazon S3 will perform validation of the checksum values\n only when the original GetObject request required checksum validation. For\n more information about checksums, see Checking object\n integrity in the Amazon S3 User Guide.

\n

Only one checksum header can be specified at a time. If you supply multiple checksum\n headers, this request will fail.

", "smithy.api#httpHeader": "x-amz-fwd-header-x-amz-checksum-sha256" } }, diff --git a/models/sagemaker.json b/models/sagemaker.json index 81d8031897..e2beec9f86 100644 --- a/models/sagemaker.json +++ b/models/sagemaker.json @@ -471,6 +471,12 @@ "traits": { "smithy.api#documentation": "

The type of compression used for an additional data source used in inference or\n training. Specify None if your additional data source is not\n compressed.

" } + }, + "ETag": { + "target": "com.amazonaws.sagemaker#String", + "traits": { + "smithy.api#documentation": "

The ETag associated with S3 URI.

" + } } }, "traits": { @@ -701,7 +707,7 @@ } }, "traits": { - "smithy.api#documentation": "

Specifies the training algorithm to use in a CreateTrainingJob request.

\n

For more information about algorithms provided by SageMaker, see Algorithms. For\n information about using your own algorithms, see Using Your Own Algorithms with\n Amazon SageMaker.

" + "smithy.api#documentation": "

Specifies the training algorithm to use in a CreateTrainingJob request.

\n \n

SageMaker uses its own SageMaker account credentials to pull and access built-in algorithms\n so built-in algorithms are universally accessible across all Amazon Web Services accounts. As a\n result, built-in algorithms have standard, unrestricted access. You cannot restrict\n built-in algorithms using IAM roles. Use custom algorithms if you require specific\n access controls.

\n
\n

For more information about algorithms provided by SageMaker, see Algorithms. For\n information about using your own algorithms, see Using Your Own Algorithms with\n Amazon SageMaker.

" } }, "com.amazonaws.sagemaker#AlgorithmStatus": { @@ -1009,7 +1015,7 @@ } }, "traits": { - "smithy.api#documentation": "

Details about an Amazon SageMaker app.

" + "smithy.api#documentation": "

Details about an Amazon SageMaker AI app.

" } }, "com.amazonaws.sagemaker#AppImageConfigArn": { @@ -1052,7 +1058,7 @@ "KernelGatewayImageConfig": { "target": "com.amazonaws.sagemaker#KernelGatewayImageConfig", "traits": { - "smithy.api#documentation": "

The configuration for the file system and kernels in the SageMaker image.

" + "smithy.api#documentation": "

The configuration for the file system and kernels in the SageMaker AI image.

" } }, "JupyterLabAppImageConfig": { @@ -1069,7 +1075,7 @@ } }, "traits": { - "smithy.api#documentation": "

The configuration for running a SageMaker image as a KernelGateway app.

" + "smithy.api#documentation": "

The configuration for running a SageMaker AI image as a KernelGateway app.

" } }, "com.amazonaws.sagemaker#AppImageConfigList": { @@ -4448,7 +4454,7 @@ "target": "com.amazonaws.sagemaker#AutoMLS3DataType", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The data type.

\n
    \n
  • \n

    If you choose S3Prefix, S3Uri identifies a key name\n prefix. SageMaker uses all objects that match the specified key name prefix\n for model training.

    \n

    The S3Prefix should have the following format:

    \n

    \n s3://DOC-EXAMPLE-BUCKET/DOC-EXAMPLE-FOLDER-OR-FILE\n

    \n
  • \n
  • \n

    If you choose ManifestFile, S3Uri identifies an object\n that is a manifest file containing a list of object keys that you want SageMaker to use for model training.

    \n

    A ManifestFile should have the format shown below:

    \n

    \n [ {\"prefix\":\n \"s3://DOC-EXAMPLE-BUCKET/DOC-EXAMPLE-FOLDER/DOC-EXAMPLE-PREFIX/\"}, \n

    \n

    \n \"DOC-EXAMPLE-RELATIVE-PATH/DOC-EXAMPLE-FOLDER/DATA-1\",\n

    \n

    \n \"DOC-EXAMPLE-RELATIVE-PATH/DOC-EXAMPLE-FOLDER/DATA-2\",\n

    \n

    \n ... \"DOC-EXAMPLE-RELATIVE-PATH/DOC-EXAMPLE-FOLDER/DATA-N\" ]\n

    \n
  • \n
  • \n

    If you choose AugmentedManifestFile, S3Uri identifies an\n object that is an augmented manifest file in JSON lines format. This file contains\n the data you want to use for model training. AugmentedManifestFile is\n available for V2 API jobs only (for example, for jobs created by calling\n CreateAutoMLJobV2).

    \n

    Here is a minimal, single-record example of an\n AugmentedManifestFile:

    \n

    \n {\"source-ref\":\n \"s3://DOC-EXAMPLE-BUCKET/DOC-EXAMPLE-FOLDER/cats/cat.jpg\",\n

    \n

    \n \"label-metadata\": {\"class-name\": \"cat\" }

    \n

    For more information on AugmentedManifestFile, see Provide\n Dataset Metadata to Training Jobs with an Augmented Manifest File.

    \n
  • \n
", + "smithy.api#documentation": "

The data type.

\n
    \n
  • \n

    If you choose S3Prefix, S3Uri identifies a key name\n prefix. SageMaker AI uses all objects that match the specified key name prefix\n for model training.

    \n

    The S3Prefix should have the following format:

    \n

    \n s3://DOC-EXAMPLE-BUCKET/DOC-EXAMPLE-FOLDER-OR-FILE\n

    \n
  • \n
  • \n

    If you choose ManifestFile, S3Uri identifies an object\n that is a manifest file containing a list of object keys that you want SageMaker AI to use for model training.

    \n

    A ManifestFile should have the format shown below:

    \n

    \n [ {\"prefix\":\n \"s3://DOC-EXAMPLE-BUCKET/DOC-EXAMPLE-FOLDER/DOC-EXAMPLE-PREFIX/\"}, \n

    \n

    \n \"DOC-EXAMPLE-RELATIVE-PATH/DOC-EXAMPLE-FOLDER/DATA-1\",\n

    \n

    \n \"DOC-EXAMPLE-RELATIVE-PATH/DOC-EXAMPLE-FOLDER/DATA-2\",\n

    \n

    \n ... \"DOC-EXAMPLE-RELATIVE-PATH/DOC-EXAMPLE-FOLDER/DATA-N\" ]\n

    \n
  • \n
  • \n

    If you choose AugmentedManifestFile, S3Uri identifies an\n object that is an augmented manifest file in JSON lines format. This file contains\n the data you want to use for model training. AugmentedManifestFile is\n available for V2 API jobs only (for example, for jobs created by calling\n CreateAutoMLJobV2).

    \n

    Here is a minimal, single-record example of an\n AugmentedManifestFile:

    \n

    \n {\"source-ref\":\n \"s3://DOC-EXAMPLE-BUCKET/DOC-EXAMPLE-FOLDER/cats/cat.jpg\",\n

    \n

    \n \"label-metadata\": {\"class-name\": \"cat\" }

    \n

    For more information on AugmentedManifestFile, see Provide\n Dataset Metadata to Training Jobs with an Augmented Manifest File.

    \n
  • \n
", "smithy.api#required": {} } }, @@ -5601,18 +5607,18 @@ "CsvContentTypes": { "target": "com.amazonaws.sagemaker#CsvContentTypes", "traits": { - "smithy.api#documentation": "

The list of all content type headers that Amazon SageMaker will treat as CSV and\n capture accordingly.

" + "smithy.api#documentation": "

The list of all content type headers that Amazon SageMaker AI will treat as CSV and\n capture accordingly.

" } }, "JsonContentTypes": { "target": "com.amazonaws.sagemaker#JsonContentTypes", "traits": { - "smithy.api#documentation": "

The list of all content type headers that SageMaker will treat as JSON and\n capture accordingly.

" + "smithy.api#documentation": "

The list of all content type headers that SageMaker AI will treat as JSON and\n capture accordingly.

" } } }, "traits": { - "smithy.api#documentation": "

Configuration specifying how to treat different headers. If no headers are specified\n Amazon SageMaker will by default base64 encode when capturing the data.

" + "smithy.api#documentation": "

Configuration specifying how to treat different headers. If no headers are specified\n Amazon SageMaker AI will by default base64 encode when capturing the data.

" } }, "com.amazonaws.sagemaker#CaptureMode": { @@ -6873,7 +6879,8 @@ "type": "integer", "traits": { "smithy.api#range": { - "min": 0 + "min": 0, + "max": 6758 } } }, @@ -7528,6 +7535,168 @@ "traits": { "smithy.api#enumValue": "ml.trn2.48xlarge" } + }, + "ML_C6I_LARGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ml.c6i.large" + } + }, + "ML_C6I_XLARGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ml.c6i.xlarge" + } + }, + "ML_C6I_2XLARGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ml.c6i.2xlarge" + } + }, + "ML_C6I_4XLARGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ml.c6i.4xlarge" + } + }, + "ML_C6I_8XLARGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ml.c6i.8xlarge" + } + }, + "ML_C6I_12XLARGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ml.c6i.12xlarge" + } + }, + "ML_C6I_16XLARGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ml.c6i.16xlarge" + } + }, + "ML_C6I_24XLARGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ml.c6i.24xlarge" + } + }, + "ML_C6I_32XLARGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ml.c6i.32xlarge" + } + }, + "ML_M6I_LARGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ml.m6i.large" + } + }, + "ML_M6I_XLARGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ml.m6i.xlarge" + } + }, + "ML_M6I_2XLARGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ml.m6i.2xlarge" + } + }, + "ML_M6I_4XLARGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ml.m6i.4xlarge" + } + }, + "ML_M6I_8XLARGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ml.m6i.8xlarge" + } + }, + "ML_M6I_12XLARGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ml.m6i.12xlarge" + } + }, + "ML_M6I_16XLARGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ml.m6i.16xlarge" + } + }, + "ML_M6I_24XLARGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ml.m6i.24xlarge" + } + }, + "ML_M6I_32XLARGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ml.m6i.32xlarge" + } + }, + "ML_R6I_LARGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ml.r6i.large" + } + }, + "ML_R6I_XLARGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ml.r6i.xlarge" + } + }, + "ML_R6I_2XLARGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ml.r6i.2xlarge" + } + }, + "ML_R6I_4XLARGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ml.r6i.4xlarge" + } + }, + "ML_R6I_8XLARGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ml.r6i.8xlarge" + } + }, + "ML_R6I_12XLARGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ml.r6i.12xlarge" + } + }, + "ML_R6I_16XLARGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ml.r6i.16xlarge" + } + }, + "ML_R6I_24XLARGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ml.r6i.24xlarge" + } + }, + "ML_R6I_32XLARGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ml.r6i.32xlarge" + } } } }, @@ -7645,6 +7814,12 @@ "smithy.api#documentation": "

The private primary IP address of the SageMaker HyperPod cluster node.

" } }, + "PrivatePrimaryIpv6": { + "target": "com.amazonaws.sagemaker#ClusterPrivatePrimaryIpv6", + "traits": { + "smithy.api#documentation": "

The private primary IPv6 address of the SageMaker HyperPod cluster node.

" + } + }, "PrivateDnsHostname": { "target": "com.amazonaws.sagemaker#ClusterPrivateDnsHostname", "traits": { @@ -7807,6 +7982,9 @@ "smithy.api#pattern": "^((25[0-5]|(2[0-4]|1\\d|[1-9]|)\\d)\\.?\\b){4}$" } }, + "com.amazonaws.sagemaker#ClusterPrivatePrimaryIpv6": { + "type": "string" + }, "com.amazonaws.sagemaker#ClusterSchedulerConfigArn": { "type": "string", "traits": { @@ -8110,7 +8288,7 @@ } }, "traits": { - "smithy.api#documentation": "

A Git repository that SageMaker automatically displays to users for cloning in the\n JupyterServer application.

" + "smithy.api#documentation": "

A Git repository that SageMaker AI automatically displays to users for cloning in the\n JupyterServer application.

" } }, "com.amazonaws.sagemaker#CodeRepositoryArn": { @@ -9523,7 +9701,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates a running app for the specified UserProfile. This operation is automatically\n invoked by Amazon SageMaker upon access to the associated Domain, and when new kernel\n configurations are selected by the user. A user may have multiple Apps active\n simultaneously.

" + "smithy.api#documentation": "

Creates a running app for the specified UserProfile. This operation is automatically\n invoked by Amazon SageMaker AI upon access to the associated Domain, and when new kernel\n configurations are selected by the user. A user may have multiple Apps active\n simultaneously.

" } }, "com.amazonaws.sagemaker#CreateAppImageConfig": { @@ -9540,7 +9718,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates a configuration for running a SageMaker image as a KernelGateway app. The\n configuration specifies the Amazon Elastic File System storage volume on the image, and a list of the\n kernels in the image.

" + "smithy.api#documentation": "

Creates a configuration for running a SageMaker AI image as a KernelGateway app. The\n configuration specifies the Amazon Elastic File System storage volume on the image, and a list of the\n kernels in the image.

" } }, "com.amazonaws.sagemaker#CreateAppImageConfigRequest": { @@ -9645,7 +9823,7 @@ "ResourceSpec": { "target": "com.amazonaws.sagemaker#ResourceSpec", "traits": { - "smithy.api#documentation": "

The instance type and the Amazon Resource Name (ARN) of the SageMaker image\n created on the instance.

\n \n

The value of InstanceType passed as part of the ResourceSpec\n in the CreateApp call overrides the value passed as part of the\n ResourceSpec configured for the user profile or the domain. If\n InstanceType is not specified in any of those three ResourceSpec\n values for a KernelGateway app, the CreateApp call fails with a\n request validation error.

\n
" + "smithy.api#documentation": "

The instance type and the Amazon Resource Name (ARN) of the SageMaker AI image\n created on the instance.

\n \n

The value of InstanceType passed as part of the ResourceSpec\n in the CreateApp call overrides the value passed as part of the\n ResourceSpec configured for the user profile or the domain. If\n InstanceType is not specified in any of those three ResourceSpec\n values for a KernelGateway app, the CreateApp call fails with a\n request validation error.

\n
" } } }, @@ -9760,7 +9938,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates an Autopilot job also referred to as Autopilot experiment or AutoML job.

\n

An AutoML job in SageMaker is a fully automated process that allows you to build machine\n learning models with minimal effort and machine learning expertise. When initiating an\n AutoML job, you provide your data and optionally specify parameters tailored to your use\n case. SageMaker then automates the entire model development lifecycle, including data\n preprocessing, model training, tuning, and evaluation. AutoML jobs are designed to simplify\n and accelerate the model building process by automating various tasks and exploring\n different combinations of machine learning algorithms, data preprocessing techniques, and\n hyperparameter values. The output of an AutoML job comprises one or more trained models\n ready for deployment and inference. Additionally, SageMaker AutoML jobs generate a candidate\n model leaderboard, allowing you to select the best-performing model for deployment.

\n

For more information about AutoML jobs, see https://docs.aws.amazon.com/sagemaker/latest/dg/autopilot-automate-model-development.html\n in the SageMaker developer guide.

\n \n

We recommend using the new versions CreateAutoMLJobV2 and DescribeAutoMLJobV2, which offer backward compatibility.

\n

\n CreateAutoMLJobV2 can manage tabular problem types identical to those of\n its previous version CreateAutoMLJob, as well as time-series forecasting,\n non-tabular problem types such as image or text classification, and text generation\n (LLMs fine-tuning).

\n

Find guidelines about how to migrate a CreateAutoMLJob to\n CreateAutoMLJobV2 in Migrate a CreateAutoMLJob to CreateAutoMLJobV2.

\n
\n

You can find the best-performing model after you run an AutoML job by calling DescribeAutoMLJobV2 (recommended) or DescribeAutoMLJob.

" + "smithy.api#documentation": "

Creates an Autopilot job also referred to as Autopilot experiment or AutoML job.

\n

An AutoML job in SageMaker AI is a fully automated process that allows you to build machine\n learning models with minimal effort and machine learning expertise. When initiating an\n AutoML job, you provide your data and optionally specify parameters tailored to your use\n case. SageMaker AI then automates the entire model development lifecycle, including data\n preprocessing, model training, tuning, and evaluation. AutoML jobs are designed to simplify\n and accelerate the model building process by automating various tasks and exploring\n different combinations of machine learning algorithms, data preprocessing techniques, and\n hyperparameter values. The output of an AutoML job comprises one or more trained models\n ready for deployment and inference. Additionally, SageMaker AI AutoML jobs generate a candidate\n model leaderboard, allowing you to select the best-performing model for deployment.

\n

For more information about AutoML jobs, see https://docs.aws.amazon.com/sagemaker/latest/dg/autopilot-automate-model-development.html\n in the SageMaker AI developer guide.

\n \n

We recommend using the new versions CreateAutoMLJobV2 and DescribeAutoMLJobV2, which offer backward compatibility.

\n

\n CreateAutoMLJobV2 can manage tabular problem types identical to those of\n its previous version CreateAutoMLJob, as well as time-series forecasting,\n non-tabular problem types such as image or text classification, and text generation\n (LLMs fine-tuning).

\n

Find guidelines about how to migrate a CreateAutoMLJob to\n CreateAutoMLJobV2 in Migrate a CreateAutoMLJob to CreateAutoMLJobV2.

\n
\n

You can find the best-performing model after you run an AutoML job by calling DescribeAutoMLJobV2 (recommended) or DescribeAutoMLJob.

" } }, "com.amazonaws.sagemaker#CreateAutoMLJobRequest": { @@ -9872,7 +10050,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates an Autopilot job also referred to as Autopilot experiment or AutoML job V2.

\n

An AutoML job in SageMaker is a fully automated process that allows you to build machine\n learning models with minimal effort and machine learning expertise. When initiating an\n AutoML job, you provide your data and optionally specify parameters tailored to your use\n case. SageMaker then automates the entire model development lifecycle, including data\n preprocessing, model training, tuning, and evaluation. AutoML jobs are designed to simplify\n and accelerate the model building process by automating various tasks and exploring\n different combinations of machine learning algorithms, data preprocessing techniques, and\n hyperparameter values. The output of an AutoML job comprises one or more trained models\n ready for deployment and inference. Additionally, SageMaker AutoML jobs generate a candidate\n model leaderboard, allowing you to select the best-performing model for deployment.

\n

For more information about AutoML jobs, see https://docs.aws.amazon.com/sagemaker/latest/dg/autopilot-automate-model-development.html\n in the SageMaker developer guide.

\n

AutoML jobs V2 support various problem types such as regression, binary, and multiclass\n classification with tabular data, text and image classification, time-series forecasting,\n and fine-tuning of large language models (LLMs) for text generation.

\n \n

\n CreateAutoMLJobV2 and DescribeAutoMLJobV2 are new versions of CreateAutoMLJob\n and DescribeAutoMLJob which offer backward compatibility.

\n

\n CreateAutoMLJobV2 can manage tabular problem types identical to those of\n its previous version CreateAutoMLJob, as well as time-series forecasting,\n non-tabular problem types such as image or text classification, and text generation\n (LLMs fine-tuning).

\n

Find guidelines about how to migrate a CreateAutoMLJob to\n CreateAutoMLJobV2 in Migrate a CreateAutoMLJob to CreateAutoMLJobV2.

\n
\n

For the list of available problem types supported by CreateAutoMLJobV2, see\n AutoMLProblemTypeConfig.

\n

You can find the best-performing model after you run an AutoML job V2 by calling DescribeAutoMLJobV2.

" + "smithy.api#documentation": "

Creates an Autopilot job also referred to as Autopilot experiment or AutoML job V2.

\n

An AutoML job in SageMaker AI is a fully automated process that allows you to build machine\n learning models with minimal effort and machine learning expertise. When initiating an\n AutoML job, you provide your data and optionally specify parameters tailored to your use\n case. SageMaker AI then automates the entire model development lifecycle, including data\n preprocessing, model training, tuning, and evaluation. AutoML jobs are designed to simplify\n and accelerate the model building process by automating various tasks and exploring\n different combinations of machine learning algorithms, data preprocessing techniques, and\n hyperparameter values. The output of an AutoML job comprises one or more trained models\n ready for deployment and inference. Additionally, SageMaker AI AutoML jobs generate a candidate\n model leaderboard, allowing you to select the best-performing model for deployment.

\n

For more information about AutoML jobs, see https://docs.aws.amazon.com/sagemaker/latest/dg/autopilot-automate-model-development.html\n in the SageMaker AI developer guide.

\n

AutoML jobs V2 support various problem types such as regression, binary, and multiclass\n classification with tabular data, text and image classification, time-series forecasting,\n and fine-tuning of large language models (LLMs) for text generation.

\n \n

\n CreateAutoMLJobV2 and DescribeAutoMLJobV2 are new versions of CreateAutoMLJob\n and DescribeAutoMLJob which offer backward compatibility.

\n

\n CreateAutoMLJobV2 can manage tabular problem types identical to those of\n its previous version CreateAutoMLJob, as well as time-series forecasting,\n non-tabular problem types such as image or text classification, and text generation\n (LLMs fine-tuning).

\n

Find guidelines about how to migrate a CreateAutoMLJob to\n CreateAutoMLJobV2 in Migrate a CreateAutoMLJob to CreateAutoMLJobV2.

\n
\n

For the list of available problem types supported by CreateAutoMLJobV2, see\n AutoMLProblemTypeConfig.

\n

You can find the best-performing model after you run an AutoML job V2 by calling DescribeAutoMLJobV2.

" } }, "com.amazonaws.sagemaker#CreateAutoMLJobV2Request": { @@ -10153,7 +10331,7 @@ "target": "com.amazonaws.sagemaker#CreateCodeRepositoryOutput" }, "traits": { - "smithy.api#documentation": "

Creates a Git repository as a resource in your SageMaker account. You can\n associate the repository with notebook instances so that you can use Git source control\n for the notebooks you create. The Git repository is a resource in your SageMaker\n account, so it can be associated with more than one notebook instance, and it persists\n independently from the lifecycle of any notebook instances it is associated with.

\n

The repository can be hosted either in Amazon Web Services CodeCommit\n or in any other Git repository.

" + "smithy.api#documentation": "

Creates a Git repository as a resource in your SageMaker AI account. You can\n associate the repository with notebook instances so that you can use Git source control\n for the notebooks you create. The Git repository is a resource in your SageMaker AI\n account, so it can be associated with more than one notebook instance, and it persists\n independently from the lifecycle of any notebook instances it is associated with.

\n

The repository can be hosted either in Amazon Web Services CodeCommit\n or in any other Git repository.

" } }, "com.amazonaws.sagemaker#CreateCodeRepositoryInput": { @@ -10219,7 +10397,7 @@ } ], "traits": { - "smithy.api#documentation": "

Starts a model compilation job. After the model has been compiled, Amazon SageMaker saves the\n resulting model artifacts to an Amazon Simple Storage Service (Amazon S3) bucket that you specify.

\n

If\n you choose to host your model using Amazon SageMaker hosting services, you can use the resulting\n model artifacts as part of the model. You can also use the artifacts with\n Amazon Web Services IoT Greengrass. In that case, deploy them as an ML\n resource.

\n

In the request body, you provide the following:

\n
    \n
  • \n

    A name for the compilation job

    \n
  • \n
  • \n

    Information about the input model artifacts

    \n
  • \n
  • \n

    The output location for the compiled model and the device (target) that the\n model runs on

    \n
  • \n
  • \n

    The Amazon Resource Name (ARN) of the IAM role that Amazon SageMaker assumes to perform\n the model compilation job.

    \n
  • \n
\n

You can also provide a Tag to track the model compilation job's resource\n use and costs. The response body contains the\n CompilationJobArn\n for the compiled job.

\n

To stop a model compilation job, use StopCompilationJob. To get information about a particular model compilation\n job, use DescribeCompilationJob. To get information about multiple model compilation\n jobs, use ListCompilationJobs.

" + "smithy.api#documentation": "

Starts a model compilation job. After the model has been compiled, Amazon SageMaker AI saves the\n resulting model artifacts to an Amazon Simple Storage Service (Amazon S3) bucket that you specify.

\n

If\n you choose to host your model using Amazon SageMaker AI hosting services, you can use the resulting\n model artifacts as part of the model. You can also use the artifacts with\n Amazon Web Services IoT Greengrass. In that case, deploy them as an ML\n resource.

\n

In the request body, you provide the following:

\n
    \n
  • \n

    A name for the compilation job

    \n
  • \n
  • \n

    Information about the input model artifacts

    \n
  • \n
  • \n

    The output location for the compiled model and the device (target) that the\n model runs on

    \n
  • \n
  • \n

    The Amazon Resource Name (ARN) of the IAM role that Amazon SageMaker AI assumes to perform\n the model compilation job.

    \n
  • \n
\n

You can also provide a Tag to track the model compilation job's resource\n use and costs. The response body contains the\n CompilationJobArn\n for the compiled job.

\n

To stop a model compilation job, use StopCompilationJob. To get information about a particular model compilation\n job, use DescribeCompilationJob. To get information about multiple model compilation\n jobs, use ListCompilationJobs.

" } }, "com.amazonaws.sagemaker#CreateCompilationJobRequest": { @@ -10237,7 +10415,7 @@ "target": "com.amazonaws.sagemaker#RoleArn", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of an IAM role that enables Amazon SageMaker to perform tasks on\n your behalf.

\n

During model compilation, Amazon SageMaker needs your permission to:

\n
    \n
  • \n

    Read input data from an S3 bucket

    \n
  • \n
  • \n

    Write model artifacts to an S3 bucket

    \n
  • \n
  • \n

    Write logs to Amazon CloudWatch Logs

    \n
  • \n
  • \n

    Publish metrics to Amazon CloudWatch

    \n
  • \n
\n

You grant permissions for all of these tasks to an IAM role. To pass this role to\n Amazon SageMaker, the caller of this API must have the iam:PassRole permission. For\n more information, see Amazon SageMaker\n Roles.\n

", + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of an IAM role that enables Amazon SageMaker AI to perform tasks on\n your behalf.

\n

During model compilation, Amazon SageMaker AI needs your permission to:

\n
    \n
  • \n

    Read input data from an S3 bucket

    \n
  • \n
  • \n

    Write model artifacts to an S3 bucket

    \n
  • \n
  • \n

    Write logs to Amazon CloudWatch Logs

    \n
  • \n
  • \n

    Publish metrics to Amazon CloudWatch

    \n
  • \n
\n

You grant permissions for all of these tasks to an IAM role. To pass this role to\n Amazon SageMaker AI, the caller of this API must have the iam:PassRole permission. For\n more information, see Amazon SageMaker AI\n Roles.\n

", "smithy.api#required": {} } }, @@ -10271,7 +10449,7 @@ "target": "com.amazonaws.sagemaker#StoppingCondition", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

Specifies a limit to how long a model compilation job can run. When the job reaches\n the time limit, Amazon SageMaker ends the compilation job. Use this API to cap model training\n costs.

", + "smithy.api#documentation": "

Specifies a limit to how long a model compilation job can run. When the job reaches\n the time limit, Amazon SageMaker AI ends the compilation job. Use this API to cap model training\n costs.

", "smithy.api#required": {} } }, @@ -10293,7 +10471,7 @@ "target": "com.amazonaws.sagemaker#CompilationJobArn", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

If the action is successful, the service sends back an HTTP 200 response. Amazon SageMaker returns\n the following data in JSON format:

\n
    \n
  • \n

    \n CompilationJobArn: The Amazon Resource Name (ARN) of the compiled\n job.

    \n
  • \n
", + "smithy.api#documentation": "

If the action is successful, the service sends back an HTTP 200 response. Amazon SageMaker AI returns\n the following data in JSON format:

\n
    \n
  • \n

    \n CompilationJobArn: The Amazon Resource Name (ARN) of the compiled\n job.

    \n
  • \n
", "smithy.api#required": {} } } @@ -10502,7 +10680,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates a definition for a job that monitors data quality and drift. For information\n about model monitor, see Amazon SageMaker Model\n Monitor.

" + "smithy.api#documentation": "

Creates a definition for a job that monitors data quality and drift. For information\n about model monitor, see Amazon SageMaker AI Model\n Monitor.

" } }, "com.amazonaws.sagemaker#CreateDataQualityJobDefinitionRequest": { @@ -10562,7 +10740,7 @@ "target": "com.amazonaws.sagemaker#RoleArn", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker can \n assume to perform tasks on your behalf.

", + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker AI can \n assume to perform tasks on your behalf.

", "smithy.api#required": {} } }, @@ -10681,7 +10859,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates a Domain. A domain consists of an associated Amazon Elastic File System\n volume, a list of authorized users, and a variety of security, application, policy, and\n Amazon Virtual Private Cloud (VPC) configurations. Users within a domain can share notebook files\n and other artifacts with each other.

\n

\n EFS storage\n

\n

When a domain is created, an EFS volume is created for use by all of the users within the\n domain. Each user receives a private home directory within the EFS volume for notebooks, Git\n repositories, and data files.

\n

SageMaker uses the Amazon Web Services Key Management Service (Amazon Web Services\n KMS) to encrypt the EFS volume attached to the domain with an Amazon Web Services managed key\n by default. For more control, you can specify a customer managed key. For more information,\n see Protect Data\n at Rest Using Encryption.

\n

\n VPC configuration\n

\n

All traffic between the domain and the Amazon EFS volume is through the specified\n VPC and subnets. For other traffic, you can specify the AppNetworkAccessType\n parameter. AppNetworkAccessType corresponds to the network access type that you\n choose when you onboard to the domain. The following options are available:

\n
    \n
  • \n

    \n PublicInternetOnly - Non-EFS traffic goes through a VPC managed by\n Amazon SageMaker, which allows internet access. This is the default value.

    \n
  • \n
  • \n

    \n VpcOnly - All traffic is through the specified VPC and subnets. Internet\n access is disabled by default. To allow internet access, you must specify a NAT\n gateway.

    \n

    When internet access is disabled, you won't be able to run a Amazon SageMaker\n Studio notebook or to train or host models unless your VPC has an interface endpoint to\n the SageMaker API and runtime or a NAT gateway and your security groups allow\n outbound connections.

    \n
  • \n
\n \n

NFS traffic over TCP on port 2049 needs to be allowed in both inbound and outbound rules\n in order to launch a Amazon SageMaker Studio app successfully.

\n
\n

For more information, see Connect Amazon SageMaker Studio Notebooks to Resources in a VPC.

" + "smithy.api#documentation": "

Creates a Domain. A domain consists of an associated Amazon Elastic File System\n volume, a list of authorized users, and a variety of security, application, policy, and\n Amazon Virtual Private Cloud (VPC) configurations. Users within a domain can share notebook files\n and other artifacts with each other.

\n

\n EFS storage\n

\n

When a domain is created, an EFS volume is created for use by all of the users within the\n domain. Each user receives a private home directory within the EFS volume for notebooks, Git\n repositories, and data files.

\n

SageMaker AI uses the Amazon Web Services Key Management Service (Amazon Web Services\n KMS) to encrypt the EFS volume attached to the domain with an Amazon Web Services managed key\n by default. For more control, you can specify a customer managed key. For more information,\n see Protect Data\n at Rest Using Encryption.

\n

\n VPC configuration\n

\n

All traffic between the domain and the Amazon EFS volume is through the specified\n VPC and subnets. For other traffic, you can specify the AppNetworkAccessType\n parameter. AppNetworkAccessType corresponds to the network access type that you\n choose when you onboard to the domain. The following options are available:

\n
    \n
  • \n

    \n PublicInternetOnly - Non-EFS traffic goes through a VPC managed by\n Amazon SageMaker AI, which allows internet access. This is the default value.

    \n
  • \n
  • \n

    \n VpcOnly - All traffic is through the specified VPC and subnets. Internet\n access is disabled by default. To allow internet access, you must specify a NAT\n gateway.

    \n

    When internet access is disabled, you won't be able to run a Amazon SageMaker AI\n Studio notebook or to train or host models unless your VPC has an interface endpoint to\n the SageMaker AI API and runtime or a NAT gateway and your security groups allow\n outbound connections.

    \n
  • \n
\n \n

NFS traffic over TCP on port 2049 needs to be allowed in both inbound and outbound rules\n in order to launch a Amazon SageMaker AI Studio app successfully.

\n
\n

For more information, see Connect Amazon SageMaker AI Studio Notebooks to Resources in a VPC.

" } }, "com.amazonaws.sagemaker#CreateDomainRequest": { @@ -10742,7 +10920,7 @@ "AppNetworkAccessType": { "target": "com.amazonaws.sagemaker#AppNetworkAccessType", "traits": { - "smithy.api#documentation": "

Specifies the VPC used for non-EFS traffic. The default value is\n PublicInternetOnly.

\n
    \n
  • \n

    \n PublicInternetOnly - Non-EFS traffic is through a VPC managed by Amazon SageMaker, which allows direct internet access

    \n
  • \n
  • \n

    \n VpcOnly - All traffic is through the specified VPC and subnets

    \n
  • \n
" + "smithy.api#documentation": "

Specifies the VPC used for non-EFS traffic. The default value is\n PublicInternetOnly.

\n
    \n
  • \n

    \n PublicInternetOnly - Non-EFS traffic is through a VPC managed by Amazon SageMaker AI, which allows direct internet access

    \n
  • \n
  • \n

    \n VpcOnly - All traffic is through the specified VPC and subnets

    \n
  • \n
" } }, "HomeEfsFileSystemKmsKeyId": { @@ -10757,7 +10935,7 @@ "KmsKeyId": { "target": "com.amazonaws.sagemaker#KmsKeyId", "traits": { - "smithy.api#documentation": "

SageMaker uses Amazon Web Services KMS to encrypt EFS and EBS volumes attached to\n the domain with an Amazon Web Services managed key by default. For more control, specify a\n customer managed key.

" + "smithy.api#documentation": "

SageMaker AI uses Amazon Web Services KMS to encrypt EFS and EBS volumes attached to\n the domain with an Amazon Web Services managed key by default. For more control, specify a\n customer managed key.

" } }, "AppSecurityGroupManagement": { @@ -11095,7 +11273,7 @@ "ExecutionRoleArn": { "target": "com.amazonaws.sagemaker#RoleArn", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker can assume to perform actions on your behalf. For more information, see SageMaker\n Roles.

\n \n

To be able to pass this role to Amazon SageMaker, the caller of this action must\n have the iam:PassRole permission.

\n
" + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker AI can assume to perform actions on your behalf. For more information, see SageMaker AI\n Roles.

\n \n

To be able to pass this role to Amazon SageMaker AI, the caller of this action must\n have the iam:PassRole permission.

\n
" } }, "VpcConfig": { @@ -11788,7 +11966,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates a custom SageMaker image. A SageMaker image is a set of image versions. Each image\n version represents a container image stored in Amazon ECR. For more information, see\n Bring your own SageMaker image.

" + "smithy.api#documentation": "

Creates a custom SageMaker AI image. A SageMaker AI image is a set of image versions. Each image\n version represents a container image stored in Amazon ECR. For more information, see\n Bring your own SageMaker AI image.

" } }, "com.amazonaws.sagemaker#CreateImageRequest": { @@ -11818,7 +11996,7 @@ "target": "com.amazonaws.sagemaker#RoleArn", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The ARN of an IAM role that enables Amazon SageMaker to perform tasks on your behalf.

", + "smithy.api#documentation": "

The ARN of an IAM role that enables Amazon SageMaker AI to perform tasks on your behalf.

", "smithy.api#required": {} } }, @@ -11867,7 +12045,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates a version of the SageMaker image specified by ImageName. The version\n represents the Amazon ECR container image specified by BaseImage.

" + "smithy.api#documentation": "

Creates a version of the SageMaker AI image specified by ImageName. The version\n represents the Amazon ECR container image specified by BaseImage.

" } }, "com.amazonaws.sagemaker#CreateImageVersionRequest": { @@ -11913,7 +12091,7 @@ "JobType": { "target": "com.amazonaws.sagemaker#JobType", "traits": { - "smithy.api#documentation": "

Indicates SageMaker job type compatibility.

\n
    \n
  • \n

    \n TRAINING: The image version is compatible with SageMaker training jobs.

    \n
  • \n
  • \n

    \n INFERENCE: The image version is compatible with SageMaker inference jobs.

    \n
  • \n
  • \n

    \n NOTEBOOK_KERNEL: The image version is compatible with SageMaker notebook kernels.

    \n
  • \n
" + "smithy.api#documentation": "

Indicates SageMaker AI job type compatibility.

\n
    \n
  • \n

    \n TRAINING: The image version is compatible with SageMaker AI training jobs.

    \n
  • \n
  • \n

    \n INFERENCE: The image version is compatible with SageMaker AI inference jobs.

    \n
  • \n
  • \n

    \n NOTEBOOK_KERNEL: The image version is compatible with SageMaker AI notebook kernels.

    \n
  • \n
" } }, "MLFramework": { @@ -11979,7 +12157,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates an inference component, which is a SageMaker hosting object that you can\n use to deploy a model to an endpoint. In the inference component settings, you specify the\n model, the endpoint, and how the model utilizes the resources that the endpoint hosts. You\n can optimize resource utilization by tailoring how the required CPU cores, accelerators,\n and memory are allocated. You can deploy multiple inference components to an endpoint,\n where each inference component contains one model and the resource utilization needs for\n that individual model. After you deploy an inference component, you can directly invoke the\n associated model when you use the InvokeEndpoint API action.

" + "smithy.api#documentation": "

Creates an inference component, which is a SageMaker AI hosting object that you can\n use to deploy a model to an endpoint. In the inference component settings, you specify the\n model, the endpoint, and how the model utilizes the resources that the endpoint hosts. You\n can optimize resource utilization by tailoring how the required CPU cores, accelerators,\n and memory are allocated. You can deploy multiple inference components to an endpoint,\n where each inference component contains one model and the resource utilization needs for\n that individual model. After you deploy an inference component, you can directly invoke the\n associated model when you use the InvokeEndpoint API action.

" } }, "com.amazonaws.sagemaker#CreateInferenceComponentInput": { @@ -12573,7 +12751,7 @@ "target": "com.amazonaws.sagemaker#RoleArn", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker can \n assume to perform tasks on your behalf.

", + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker AI can \n assume to perform tasks on your behalf.

", "smithy.api#required": {} } }, @@ -12841,7 +13019,7 @@ "target": "com.amazonaws.sagemaker#RoleArn", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker can \n assume to perform tasks on your behalf.

", + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker AI can \n assume to perform tasks on your behalf.

", "smithy.api#required": {} } }, @@ -13207,7 +13385,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates a definition for a job that monitors model quality and drift. For information\n about model monitor, see Amazon SageMaker Model\n Monitor.

" + "smithy.api#documentation": "

Creates a definition for a job that monitors model quality and drift. For information\n about model monitor, see Amazon SageMaker AI Model\n Monitor.

" } }, "com.amazonaws.sagemaker#CreateModelQualityJobDefinitionRequest": { @@ -13267,7 +13445,7 @@ "target": "com.amazonaws.sagemaker#RoleArn", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker can \n assume to perform tasks on your behalf.

", + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker AI can \n assume to perform tasks on your behalf.

", "smithy.api#required": {} } }, @@ -13318,7 +13496,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates a schedule that regularly starts Amazon SageMaker Processing Jobs to\n monitor the data captured for an Amazon SageMaker Endpoint.

" + "smithy.api#documentation": "

Creates a schedule that regularly starts Amazon SageMaker AI Processing Jobs to\n monitor the data captured for an Amazon SageMaker AI Endpoint.

" } }, "com.amazonaws.sagemaker#CreateMonitoringScheduleRequest": { @@ -13381,7 +13559,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates an SageMaker notebook instance. A notebook instance is a machine\n learning (ML) compute instance running on a Jupyter notebook.

\n

In a CreateNotebookInstance request, specify the type of ML compute\n instance that you want to run. SageMaker launches the instance, installs common\n libraries that you can use to explore datasets for model training, and attaches an ML\n storage volume to the notebook instance.

\n

SageMaker also provides a set of example notebooks. Each notebook\n demonstrates how to use SageMaker with a specific algorithm or with a machine\n learning framework.

\n

After receiving the request, SageMaker does the following:

\n
    \n
  1. \n

    Creates a network interface in the SageMaker VPC.

    \n
  2. \n
  3. \n

    (Option) If you specified SubnetId, SageMaker creates\n a network interface in your own VPC, which is inferred from the subnet ID that\n you provide in the input. When creating this network interface, SageMaker attaches the security group that you specified in the request to the network\n interface that it creates in your VPC.

    \n
  4. \n
  5. \n

    Launches an EC2 instance of the type specified in the request in the\n SageMaker VPC. If you specified SubnetId of your VPC,\n SageMaker specifies both network interfaces when launching this\n instance. This enables inbound traffic from your own VPC to the notebook\n instance, assuming that the security groups allow it.

    \n
  6. \n
\n

After creating the notebook instance, SageMaker returns its Amazon Resource\n Name (ARN). You can't change the name of a notebook instance after you create\n it.

\n

After SageMaker creates the notebook instance, you can connect to the\n Jupyter server and work in Jupyter notebooks. For example, you can write code to explore\n a dataset that you can use for model training, train a model, host models by creating\n SageMaker endpoints, and validate hosted models.

\n

For more information, see How It Works.

" + "smithy.api#documentation": "

Creates an SageMaker AI notebook instance. A notebook instance is a machine\n learning (ML) compute instance running on a Jupyter notebook.

\n

In a CreateNotebookInstance request, specify the type of ML compute\n instance that you want to run. SageMaker AI launches the instance, installs common\n libraries that you can use to explore datasets for model training, and attaches an ML\n storage volume to the notebook instance.

\n

SageMaker AI also provides a set of example notebooks. Each notebook\n demonstrates how to use SageMaker AI with a specific algorithm or with a machine\n learning framework.

\n

After receiving the request, SageMaker AI does the following:

\n
    \n
  1. \n

    Creates a network interface in the SageMaker AI VPC.

    \n
  2. \n
  3. \n

    (Option) If you specified SubnetId, SageMaker AI creates\n a network interface in your own VPC, which is inferred from the subnet ID that\n you provide in the input. When creating this network interface, SageMaker AI attaches the security group that you specified in the request to the network\n interface that it creates in your VPC.

    \n
  4. \n
  5. \n

    Launches an EC2 instance of the type specified in the request in the\n SageMaker AI VPC. If you specified SubnetId of your VPC,\n SageMaker AI specifies both network interfaces when launching this\n instance. This enables inbound traffic from your own VPC to the notebook\n instance, assuming that the security groups allow it.

    \n
  6. \n
\n

After creating the notebook instance, SageMaker AI returns its Amazon Resource\n Name (ARN). You can't change the name of a notebook instance after you create\n it.

\n

After SageMaker AI creates the notebook instance, you can connect to the\n Jupyter server and work in Jupyter notebooks. For example, you can write code to explore\n a dataset that you can use for model training, train a model, host models by creating\n SageMaker AI endpoints, and validate hosted models.

\n

For more information, see How It Works.

" } }, "com.amazonaws.sagemaker#CreateNotebookInstanceInput": { @@ -13419,14 +13597,14 @@ "target": "com.amazonaws.sagemaker#RoleArn", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

When you send any requests to Amazon Web Services resources from the notebook\n instance, SageMaker assumes this role to perform tasks on your behalf. You must\n grant this role necessary permissions so SageMaker can perform these tasks. The\n policy must allow the SageMaker service principal (sagemaker.amazonaws.com)\n permissions to assume this role. For more information, see SageMaker Roles.

\n \n

To be able to pass this role to SageMaker, the caller of this API must\n have the iam:PassRole permission.

\n
", + "smithy.api#documentation": "

When you send any requests to Amazon Web Services resources from the notebook\n instance, SageMaker AI assumes this role to perform tasks on your behalf. You must\n grant this role necessary permissions so SageMaker AI can perform these tasks. The\n policy must allow the SageMaker AI service principal (sagemaker.amazonaws.com)\n permissions to assume this role. For more information, see SageMaker AI Roles.

\n \n

To be able to pass this role to SageMaker AI, the caller of this API must\n have the iam:PassRole permission.

\n
", "smithy.api#required": {} } }, "KmsKeyId": { "target": "com.amazonaws.sagemaker#KmsKeyId", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of a Amazon Web Services Key Management Service key that\n SageMaker uses to encrypt data on the storage volume attached to your\n notebook instance. The KMS key you provide must be enabled. For information, see Enabling and\n Disabling Keys in the Amazon Web Services Key Management Service\n Developer Guide.

" + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of a Amazon Web Services Key Management Service key that\n SageMaker AI uses to encrypt data on the storage volume attached to your\n notebook instance. The KMS key you provide must be enabled. For information, see Enabling and\n Disabling Keys in the Amazon Web Services Key Management Service\n Developer Guide.

" } }, "Tags": { @@ -13444,7 +13622,7 @@ "DirectInternetAccess": { "target": "com.amazonaws.sagemaker#DirectInternetAccess", "traits": { - "smithy.api#documentation": "

Sets whether SageMaker provides internet access to the notebook instance. If\n you set this to Disabled this notebook instance is able to access resources\n only in your VPC, and is not be able to connect to SageMaker training and\n endpoint services unless you configure a NAT Gateway in your VPC.

\n

For more information, see Notebook Instances Are Internet-Enabled by Default. You can set the value\n of this parameter to Disabled only if you set a value for the\n SubnetId parameter.

" + "smithy.api#documentation": "

Sets whether SageMaker AI provides internet access to the notebook instance. If\n you set this to Disabled this notebook instance is able to access resources\n only in your VPC, and is not be able to connect to SageMaker AI training and\n endpoint services unless you configure a NAT Gateway in your VPC.

\n

For more information, see Notebook Instances Are Internet-Enabled by Default. You can set the value\n of this parameter to Disabled only if you set a value for the\n SubnetId parameter.

" } }, "VolumeSizeInGB": { @@ -13462,13 +13640,13 @@ "DefaultCodeRepository": { "target": "com.amazonaws.sagemaker#CodeRepositoryNameOrUrl", "traits": { - "smithy.api#documentation": "

A Git repository to associate with the notebook instance as its default code\n repository. This can be either the name of a Git repository stored as a resource in your\n account, or the URL of a Git repository in Amazon Web Services CodeCommit\n or in any other Git repository. When you open a notebook instance, it opens in the\n directory that contains this repository. For more information, see Associating Git\n Repositories with SageMaker Notebook Instances.

" + "smithy.api#documentation": "

A Git repository to associate with the notebook instance as its default code\n repository. This can be either the name of a Git repository stored as a resource in your\n account, or the URL of a Git repository in Amazon Web Services CodeCommit\n or in any other Git repository. When you open a notebook instance, it opens in the\n directory that contains this repository. For more information, see Associating Git\n Repositories with SageMaker AI Notebook Instances.

" } }, "AdditionalCodeRepositories": { "target": "com.amazonaws.sagemaker#AdditionalCodeRepositoryNamesOrUrls", "traits": { - "smithy.api#documentation": "

An array of up to three Git repositories to associate with the notebook instance.\n These can be either the names of Git repositories stored as resources in your account,\n or the URL of Git repositories in Amazon Web Services CodeCommit\n or in any other Git repository. These repositories are cloned at the same level as the\n default repository of your notebook instance. For more information, see Associating Git\n Repositories with SageMaker Notebook Instances.

" + "smithy.api#documentation": "

An array of up to three Git repositories to associate with the notebook instance.\n These can be either the names of Git repositories stored as resources in your account,\n or the URL of Git repositories in Amazon Web Services CodeCommit\n or in any other Git repository. These repositories are cloned at the same level as the\n default repository of your notebook instance. For more information, see Associating Git\n Repositories with SageMaker AI Notebook Instances.

" } }, "RootAccess": { @@ -13602,7 +13780,7 @@ "target": "com.amazonaws.sagemaker#RoleArn", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of an IAM role that enables Amazon SageMaker to perform tasks on your behalf.

\n

During model optimization, Amazon SageMaker needs your permission to:

\n
    \n
  • \n

    Read input data from an S3 bucket

    \n
  • \n
  • \n

    Write model artifacts to an S3 bucket

    \n
  • \n
  • \n

    Write logs to Amazon CloudWatch Logs

    \n
  • \n
  • \n

    Publish metrics to Amazon CloudWatch

    \n
  • \n
\n

You grant permissions for all of these tasks to an IAM role. To pass this\n role to Amazon SageMaker, the caller of this API must have the\n iam:PassRole permission. For more information, see Amazon SageMaker Roles.\n

", + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of an IAM role that enables Amazon SageMaker AI to perform tasks on your behalf.

\n

During model optimization, Amazon SageMaker AI needs your permission to:

\n
    \n
  • \n

    Read input data from an S3 bucket

    \n
  • \n
  • \n

    Write model artifacts to an S3 bucket

    \n
  • \n
  • \n

    Write logs to Amazon CloudWatch Logs

    \n
  • \n
  • \n

    Publish metrics to Amazon CloudWatch

    \n
  • \n
\n

You grant permissions for all of these tasks to an IAM role. To pass this\n role to Amazon SageMaker AI, the caller of this API must have the\n iam:PassRole permission. For more information, see Amazon SageMaker AI Roles.\n

", "smithy.api#required": {} } }, @@ -13976,7 +14154,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates a URL for a specified UserProfile in a Domain. When accessed in a web browser, the\n user will be automatically signed in to the domain, and granted access to all of the Apps and\n files associated with the Domain's Amazon Elastic File System volume. This operation can only be\n called when the authentication mode equals IAM.

\n

The IAM role or user passed to this API defines the permissions to access\n the app. Once the presigned URL is created, no additional permission is required to access\n this URL. IAM authorization policies for this API are also enforced for every\n HTTP request and WebSocket frame that attempts to connect to the app.

\n

You can restrict access to this API and to the URL that it returns to a list of IP\n addresses, Amazon VPCs or Amazon VPC Endpoints that you specify. For more\n information, see Connect to Amazon SageMaker\n Studio Through an Interface VPC Endpoint .

\n \n
    \n
  • \n

    The URL that you get from a call to CreatePresignedDomainUrl has a\n default timeout of 5 minutes. You can configure this value using\n ExpiresInSeconds. If you try to use the URL after the timeout limit\n expires, you are directed to the Amazon Web Services console sign-in page.

    \n
  • \n
  • \n

    The JupyterLab session default expiration time is 12 hours. You can configure this\n value using SessionExpirationDurationInSeconds.

    \n
  • \n
\n
" + "smithy.api#documentation": "

Creates a URL for a specified UserProfile in a Domain. When accessed in a web browser, the\n user will be automatically signed in to the domain, and granted access to all of the Apps and\n files associated with the Domain's Amazon Elastic File System volume. This operation can only be\n called when the authentication mode equals IAM.

\n

The IAM role or user passed to this API defines the permissions to access\n the app. Once the presigned URL is created, no additional permission is required to access\n this URL. IAM authorization policies for this API are also enforced for every\n HTTP request and WebSocket frame that attempts to connect to the app.

\n

You can restrict access to this API and to the URL that it returns to a list of IP\n addresses, Amazon VPCs or Amazon VPC Endpoints that you specify. For more\n information, see Connect to Amazon SageMaker AI\n Studio Through an Interface VPC Endpoint .

\n \n
    \n
  • \n

    The URL that you get from a call to CreatePresignedDomainUrl has a\n default timeout of 5 minutes. You can configure this value using\n ExpiresInSeconds. If you try to use the URL after the timeout limit\n expires, you are directed to the Amazon Web Services console sign-in page.

    \n
  • \n
  • \n

    The JupyterLab session default expiration time is 12 hours. You can configure this\n value using SessionExpirationDurationInSeconds.

    \n
  • \n
\n
" } }, "com.amazonaws.sagemaker#CreatePresignedDomainUrlRequest": { @@ -14109,7 +14287,7 @@ "target": "com.amazonaws.sagemaker#CreatePresignedNotebookInstanceUrlOutput" }, "traits": { - "smithy.api#documentation": "

Returns a URL that you can use to connect to the Jupyter server from a notebook\n instance. In the SageMaker console, when you choose Open next to a\n notebook instance, SageMaker opens a new tab showing the Jupyter server home\n page from the notebook instance. The console uses this API to get the URL and show the\n page.

\n

The IAM role or user used to call this API defines the permissions to\n access the notebook instance. Once the presigned URL is created, no additional\n permission is required to access this URL. IAM authorization policies for\n this API are also enforced for every HTTP request and WebSocket frame that attempts to\n connect to the notebook instance.

\n

You can restrict access to this API and to the URL that it returns to a list of IP\n addresses that you specify. Use the NotIpAddress condition operator and the\n aws:SourceIP condition context key to specify the list of IP addresses\n that you want to have access to the notebook instance. For more information, see Limit Access to a Notebook Instance by IP Address.

\n \n

The URL that you get from a call to CreatePresignedNotebookInstanceUrl is valid only for 5 minutes. If you\n try to use the URL after the 5-minute limit expires, you are directed to the Amazon Web Services console sign-in page.

\n
" + "smithy.api#documentation": "

Returns a URL that you can use to connect to the Jupyter server from a notebook\n instance. In the SageMaker AI console, when you choose Open next to a\n notebook instance, SageMaker AI opens a new tab showing the Jupyter server home\n page from the notebook instance. The console uses this API to get the URL and show the\n page.

\n

The IAM role or user used to call this API defines the permissions to\n access the notebook instance. Once the presigned URL is created, no additional\n permission is required to access this URL. IAM authorization policies for\n this API are also enforced for every HTTP request and WebSocket frame that attempts to\n connect to the notebook instance.

\n

You can restrict access to this API and to the URL that it returns to a list of IP\n addresses that you specify. Use the NotIpAddress condition operator and the\n aws:SourceIP condition context key to specify the list of IP addresses\n that you want to have access to the notebook instance. For more information, see Limit Access to a Notebook Instance by IP Address.

\n \n

The URL that you get from a call to CreatePresignedNotebookInstanceUrl is valid only for 5 minutes. If you\n try to use the URL after the 5-minute limit expires, you are directed to the Amazon Web Services console sign-in page.

\n
" } }, "com.amazonaws.sagemaker#CreatePresignedNotebookInstanceUrlInput": { @@ -14445,7 +14623,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates a new Amazon SageMaker Studio Lifecycle Configuration.

" + "smithy.api#documentation": "

Creates a new Amazon SageMaker AI Studio Lifecycle Configuration.

" } }, "com.amazonaws.sagemaker#CreateStudioLifecycleConfigRequest": { @@ -14455,7 +14633,7 @@ "target": "com.amazonaws.sagemaker#StudioLifecycleConfigName", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The name of the Amazon SageMaker Studio Lifecycle Configuration to create.

", + "smithy.api#documentation": "

The name of the Amazon SageMaker AI Studio Lifecycle Configuration to create.

", "smithy.api#required": {} } }, @@ -14463,7 +14641,7 @@ "target": "com.amazonaws.sagemaker#StudioLifecycleConfigContent", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The content of your Amazon SageMaker Studio Lifecycle Configuration script. This\n content must be base64 encoded.

", + "smithy.api#documentation": "

The content of your Amazon SageMaker AI Studio Lifecycle Configuration script. This\n content must be base64 encoded.

", "smithy.api#required": {} } }, @@ -15380,7 +15558,7 @@ } }, "traits": { - "smithy.api#documentation": "

A file system, created by you, that you assign to a user profile or space for an Amazon SageMaker Domain. Permitted users can access this file system in Amazon SageMaker\n Studio.

" + "smithy.api#documentation": "

A file system, created by you, that you assign to a user profile or space for an Amazon SageMaker AI Domain. Permitted users can access this file system in Amazon SageMaker AI\n Studio.

" } }, "com.amazonaws.sagemaker#CustomFileSystemConfig": { @@ -15400,7 +15578,7 @@ } }, "traits": { - "smithy.api#documentation": "

The settings for assigning a custom file system to a user profile or space for an Amazon SageMaker Domain. Permitted users can access this file system in Amazon SageMaker\n Studio.

" + "smithy.api#documentation": "

The settings for assigning a custom file system to a user profile or space for an Amazon SageMaker AI Domain. Permitted users can access this file system in Amazon SageMaker AI\n Studio.

" } }, "com.amazonaws.sagemaker#CustomFileSystemConfigs": { @@ -15454,7 +15632,7 @@ } }, "traits": { - "smithy.api#documentation": "

A custom SageMaker image. For more information, see\n Bring your own SageMaker image.

" + "smithy.api#documentation": "

A custom SageMaker AI image. For more information, see\n Bring your own SageMaker AI image.

" } }, "com.amazonaws.sagemaker#CustomImageContainerArguments": { @@ -15612,7 +15790,7 @@ "target": "com.amazonaws.sagemaker#SamplingPercentage", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The percentage of requests SageMaker will capture. A lower value is recommended\n for Endpoints with high traffic.

", + "smithy.api#documentation": "

The percentage of requests SageMaker AI will capture. A lower value is recommended\n for Endpoints with high traffic.

", "smithy.api#required": {} } }, @@ -15627,7 +15805,7 @@ "KmsKeyId": { "target": "com.amazonaws.sagemaker#KmsKeyId", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of an Key Management Service key that SageMaker\n uses to encrypt the captured data at rest using Amazon S3 server-side\n encryption.

\n

The KmsKeyId can be any of the following formats:

\n
    \n
  • \n

    Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab\n

    \n
  • \n
  • \n

    Key ARN:\n arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab\n

    \n
  • \n
  • \n

    Alias name: alias/ExampleAlias\n

    \n
  • \n
  • \n

    Alias name ARN:\n arn:aws:kms:us-west-2:111122223333:alias/ExampleAlias\n

    \n
  • \n
" + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of an Key Management Service key that SageMaker AI\n uses to encrypt the captured data at rest using Amazon S3 server-side\n encryption.

\n

The KmsKeyId can be any of the following formats:

\n
    \n
  • \n

    Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab\n

    \n
  • \n
  • \n

    Key ARN:\n arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab\n

    \n
  • \n
  • \n

    Alias name: alias/ExampleAlias\n

    \n
  • \n
  • \n

    Alias name ARN:\n arn:aws:kms:us-west-2:111122223333:alias/ExampleAlias\n

    \n
  • \n
" } }, "CaptureOptions": { @@ -15641,12 +15819,12 @@ "CaptureContentTypeHeader": { "target": "com.amazonaws.sagemaker#CaptureContentTypeHeader", "traits": { - "smithy.api#documentation": "

Configuration specifying how to treat different headers. If no headers are specified\n SageMaker will by default base64 encode when capturing the data.

" + "smithy.api#documentation": "

Configuration specifying how to treat different headers. If no headers are specified\n SageMaker AI will by default base64 encode when capturing the data.

" } } }, "traits": { - "smithy.api#documentation": "

Configuration to control how SageMaker captures inference data.

" + "smithy.api#documentation": "

Configuration to control how SageMaker AI captures inference data.

" } }, "com.amazonaws.sagemaker#DataCaptureConfigSummary": { @@ -16185,7 +16363,7 @@ "CustomFileSystemConfigs": { "target": "com.amazonaws.sagemaker#CustomFileSystemConfigs", "traits": { - "smithy.api#documentation": "

The settings for assigning a custom file system to a domain. Permitted users can access\n this file system in Amazon SageMaker Studio.

" + "smithy.api#documentation": "

The settings for assigning a custom file system to a domain. Permitted users can access\n this file system in Amazon SageMaker AI Studio.

" } } }, @@ -16632,7 +16810,7 @@ } ], "traits": { - "smithy.api#documentation": "

Deletes the specified compilation job. This action deletes only the compilation job\n resource in Amazon SageMaker. It doesn't delete other resources that are related to\n that job, such as the model artifacts that the job creates, the compilation logs in\n CloudWatch, the compiled model, or the IAM role.

\n

You can delete a compilation job only if its current status is COMPLETED,\n FAILED, or STOPPED. If the job status is\n STARTING or INPROGRESS, stop the job, and then delete it\n after its status becomes STOPPED.

" + "smithy.api#documentation": "

Deletes the specified compilation job. This action deletes only the compilation job\n resource in Amazon SageMaker AI. It doesn't delete other resources that are related to\n that job, such as the model artifacts that the job creates, the compilation logs in\n CloudWatch, the compiled model, or the IAM role.

\n

You can delete a compilation job only if its current status is COMPLETED,\n FAILED, or STOPPED. If the job status is\n STARTING or INPROGRESS, stop the job, and then delete it\n after its status becomes STOPPED.

" } }, "com.amazonaws.sagemaker#DeleteCompilationJobRequest": { @@ -17322,7 +17500,7 @@ } ], "traits": { - "smithy.api#documentation": "

Deletes a SageMaker image and all versions of the image. The container images aren't\n deleted.

" + "smithy.api#documentation": "

Deletes a SageMaker AI image and all versions of the image. The container images aren't\n deleted.

" } }, "com.amazonaws.sagemaker#DeleteImageRequest": { @@ -17365,7 +17543,7 @@ } ], "traits": { - "smithy.api#documentation": "

Deletes a version of a SageMaker image. The container image the version represents isn't\n deleted.

" + "smithy.api#documentation": "

Deletes a version of a SageMaker AI image. The container image the version represents isn't\n deleted.

" } }, "com.amazonaws.sagemaker#DeleteImageVersionRequest": { @@ -17556,7 +17734,7 @@ } ], "traits": { - "smithy.api#documentation": "

Deletes an Amazon SageMaker model bias job definition.

" + "smithy.api#documentation": "

Deletes an Amazon SageMaker AI model bias job definition.

" } }, "com.amazonaws.sagemaker#DeleteModelBiasJobDefinitionRequest": { @@ -17625,7 +17803,7 @@ } ], "traits": { - "smithy.api#documentation": "

Deletes an Amazon SageMaker model explainability job definition.

" + "smithy.api#documentation": "

Deletes an Amazon SageMaker AI model explainability job definition.

" } }, "com.amazonaws.sagemaker#DeleteModelExplainabilityJobDefinitionRequest": { @@ -17829,7 +18007,7 @@ "target": "smithy.api#Unit" }, "traits": { - "smithy.api#documentation": "

Deletes an SageMaker notebook instance. Before you can delete a notebook\n instance, you must call the StopNotebookInstance API.

\n \n

When you delete a notebook instance, you lose all of your data. SageMaker removes the ML compute instance, and deletes the ML storage volume and the\n network interface associated with the notebook instance.

\n
" + "smithy.api#documentation": "

Deletes an SageMaker AI notebook instance. Before you can delete a notebook\n instance, you must call the StopNotebookInstance API.

\n \n

When you delete a notebook instance, you lose all of your data. SageMaker AI removes the ML compute instance, and deletes the ML storage volume and the\n network interface associated with the notebook instance.

\n
" } }, "com.amazonaws.sagemaker#DeleteNotebookInstanceInput": { @@ -17839,7 +18017,7 @@ "target": "com.amazonaws.sagemaker#NotebookInstanceName", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The name of the SageMaker notebook instance to delete.

", + "smithy.api#documentation": "

The name of the SageMaker AI notebook instance to delete.

", "smithy.api#required": {} } } @@ -18119,7 +18297,7 @@ } ], "traits": { - "smithy.api#documentation": "

Deletes the Amazon SageMaker Studio Lifecycle Configuration. In order to delete the\n Lifecycle Configuration, there must be no running apps using the Lifecycle Configuration. You\n must also remove the Lifecycle Configuration from UserSettings in all Domains and\n UserProfiles.

" + "smithy.api#documentation": "

Deletes the Amazon SageMaker AI Studio Lifecycle Configuration. In order to delete the\n Lifecycle Configuration, there must be no running apps using the Lifecycle Configuration. You\n must also remove the Lifecycle Configuration from UserSettings in all Domains and\n UserProfiles.

" } }, "com.amazonaws.sagemaker#DeleteStudioLifecycleConfigRequest": { @@ -18129,7 +18307,7 @@ "target": "com.amazonaws.sagemaker#StudioLifecycleConfigName", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The name of the Amazon SageMaker Studio Lifecycle Configuration to delete.

", + "smithy.api#documentation": "

The name of the Amazon SageMaker AI Studio Lifecycle Configuration to delete.

", "smithy.api#required": {} } } @@ -19065,13 +19243,13 @@ "LastUserActivityTimestamp": { "target": "com.amazonaws.sagemaker#Timestamp", "traits": { - "smithy.api#documentation": "

The timestamp of the last user's activity. LastUserActivityTimestamp is also\n updated when SageMaker performs health checks without user activity. As a result, this\n value is set to the same value as LastHealthCheckTimestamp.

" + "smithy.api#documentation": "

The timestamp of the last user's activity. LastUserActivityTimestamp is also\n updated when SageMaker AI performs health checks without user activity. As a result, this\n value is set to the same value as LastHealthCheckTimestamp.

" } }, "CreationTime": { "target": "com.amazonaws.sagemaker#Timestamp", "traits": { - "smithy.api#documentation": "

The creation time of the application.

\n \n

After an application has been shut down for 24 hours, SageMaker deletes all\n metadata for the application. To be considered an update and retain application metadata,\n applications must be restarted within 24 hours after the previous application has been shut\n down. After this time window, creation of an application is considered a new application\n rather than an update of the previous application.

\n
" + "smithy.api#documentation": "

The creation time of the application.

\n \n

After an application has been shut down for 24 hours, SageMaker AI deletes all\n metadata for the application. To be considered an update and retain application metadata,\n applications must be restarted within 24 hours after the previous application has been shut\n down. After this time window, creation of an application is considered a new application\n rather than an update of the previous application.

\n
" } }, "FailureReason": { @@ -19083,7 +19261,7 @@ "ResourceSpec": { "target": "com.amazonaws.sagemaker#ResourceSpec", "traits": { - "smithy.api#documentation": "

The instance type and the Amazon Resource Name (ARN) of the SageMaker image\n created on the instance.

" + "smithy.api#documentation": "

The instance type and the Amazon Resource Name (ARN) of the SageMaker AI image\n created on the instance.

" } }, "BuiltInLifecycleConfigArn": { @@ -19326,7 +19504,7 @@ "BestCandidate": { "target": "com.amazonaws.sagemaker#AutoMLCandidate", "traits": { - "smithy.api#documentation": "

The best model candidate selected by SageMaker Autopilot using both the best\n objective metric and lowest InferenceLatency for\n an experiment.

" + "smithy.api#documentation": "

The best model candidate selected by SageMaker AI Autopilot using both the best\n objective metric and lowest InferenceLatency for\n an experiment.

" } }, "AutoMLJobStatus": { @@ -20003,14 +20181,14 @@ "CompilationEndTime": { "target": "com.amazonaws.sagemaker#Timestamp", "traits": { - "smithy.api#documentation": "

The time when the model compilation job on a compilation job instance ended. For a\n successful or stopped job, this is when the job's model artifacts have finished\n uploading. For a failed job, this is when Amazon SageMaker detected that the job failed.

" + "smithy.api#documentation": "

The time when the model compilation job on a compilation job instance ended. For a\n successful or stopped job, this is when the job's model artifacts have finished\n uploading. For a failed job, this is when Amazon SageMaker AI detected that the job failed.

" } }, "StoppingCondition": { "target": "com.amazonaws.sagemaker#StoppingCondition", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

Specifies a limit to how long a model compilation job can run. When the job reaches\n the time limit, Amazon SageMaker ends the compilation job. Use this API to cap model training\n costs.

", + "smithy.api#documentation": "

Specifies a limit to how long a model compilation job can run. When the job reaches\n the time limit, Amazon SageMaker AI ends the compilation job. Use this API to cap model training\n costs.

", "smithy.api#required": {} } }, @@ -20068,7 +20246,7 @@ "target": "com.amazonaws.sagemaker#RoleArn", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker assumes to perform the model\n compilation job.

", + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker AI assumes to perform the model\n compilation job.

", "smithy.api#required": {} } }, @@ -20457,7 +20635,7 @@ "target": "com.amazonaws.sagemaker#RoleArn", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker can \n assume to perform tasks on your behalf.

", + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker AI can \n assume to perform tasks on your behalf.

", "smithy.api#required": {} } }, @@ -20764,7 +20942,7 @@ "SingleSignOnApplicationArn": { "target": "com.amazonaws.sagemaker#SingleSignOnApplicationArn", "traits": { - "smithy.api#documentation": "

The ARN of the application managed by SageMaker in IAM Identity Center. This value\n is only returned for domains created after October 1, 2023.

" + "smithy.api#documentation": "

The ARN of the application managed by SageMaker AI in IAM Identity Center. This value\n is only returned for domains created after October 1, 2023.

" } }, "Status": { @@ -20818,7 +20996,7 @@ "AppNetworkAccessType": { "target": "com.amazonaws.sagemaker#AppNetworkAccessType", "traits": { - "smithy.api#documentation": "

Specifies the VPC used for non-EFS traffic. The default value is\n PublicInternetOnly.

\n
    \n
  • \n

    \n PublicInternetOnly - Non-EFS traffic is through a VPC managed by Amazon SageMaker, which allows direct internet access

    \n
  • \n
  • \n

    \n VpcOnly - All traffic is through the specified VPC and subnets

    \n
  • \n
" + "smithy.api#documentation": "

Specifies the VPC used for non-EFS traffic. The default value is\n PublicInternetOnly.

\n
    \n
  • \n

    \n PublicInternetOnly - Non-EFS traffic is through a VPC managed by Amazon SageMaker AI, which allows direct internet access

    \n
  • \n
  • \n

    \n VpcOnly - All traffic is through the specified VPC and subnets

    \n
  • \n
" } }, "HomeEfsFileSystemKmsKeyId": { @@ -22480,7 +22658,7 @@ } ], "traits": { - "smithy.api#documentation": "

Describes a SageMaker image.

", + "smithy.api#documentation": "

Describes a SageMaker AI image.

", "smithy.api#suppress": [ "WaitableTraitInvalidErrorType" ], @@ -22647,7 +22825,7 @@ "RoleArn": { "target": "com.amazonaws.sagemaker#RoleArn", "traits": { - "smithy.api#documentation": "

The ARN of the IAM role that enables Amazon SageMaker to perform tasks on your behalf.

" + "smithy.api#documentation": "

The ARN of the IAM role that enables Amazon SageMaker AI to perform tasks on your behalf.

" } } }, @@ -22669,7 +22847,7 @@ } ], "traits": { - "smithy.api#documentation": "

Describes a version of a SageMaker image.

", + "smithy.api#documentation": "

Describes a version of a SageMaker AI image.

", "smithy.api#suppress": [ "WaitableTraitInvalidErrorType" ], @@ -22829,7 +23007,7 @@ "JobType": { "target": "com.amazonaws.sagemaker#JobType", "traits": { - "smithy.api#documentation": "

Indicates SageMaker job type compatibility.

\n
    \n
  • \n

    \n TRAINING: The image version is compatible with SageMaker training jobs.

    \n
  • \n
  • \n

    \n INFERENCE: The image version is compatible with SageMaker inference jobs.

    \n
  • \n
  • \n

    \n NOTEBOOK_KERNEL: The image version is compatible with SageMaker notebook kernels.

    \n
  • \n
" + "smithy.api#documentation": "

Indicates SageMaker AI job type compatibility.

\n
    \n
  • \n

    \n TRAINING: The image version is compatible with SageMaker AI training jobs.

    \n
  • \n
  • \n

    \n INFERENCE: The image version is compatible with SageMaker AI inference jobs.

    \n
  • \n
  • \n

    \n NOTEBOOK_KERNEL: The image version is compatible with SageMaker AI notebook kernels.

    \n
  • \n
" } }, "MLFramework": { @@ -24632,7 +24810,7 @@ "target": "com.amazonaws.sagemaker#RoleArn", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker can \n assume to perform tasks on your behalf.

", + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker AI can \n assume to perform tasks on your behalf.

", "smithy.api#required": {} } }, @@ -24945,7 +25123,7 @@ "NotebookInstanceName": { "target": "com.amazonaws.sagemaker#NotebookInstanceName", "traits": { - "smithy.api#documentation": "

The name of the SageMaker notebook instance.

" + "smithy.api#documentation": "

The name of the SageMaker AI notebook instance.

" } }, "NotebookInstanceStatus": { @@ -24993,13 +25171,13 @@ "KmsKeyId": { "target": "com.amazonaws.sagemaker#KmsKeyId", "traits": { - "smithy.api#documentation": "

The Amazon Web Services KMS key ID SageMaker uses to encrypt data when\n storing it on the ML storage volume attached to the instance.

" + "smithy.api#documentation": "

The Amazon Web Services KMS key ID SageMaker AI uses to encrypt data when\n storing it on the ML storage volume attached to the instance.

" } }, "NetworkInterfaceId": { "target": "com.amazonaws.sagemaker#NetworkInterfaceId", "traits": { - "smithy.api#documentation": "

The network interface IDs that SageMaker created at the time of creating\n the instance.

" + "smithy.api#documentation": "

The network interface IDs that SageMaker AI created at the time of creating\n the instance.

" } }, "LastModifiedTime": { @@ -25023,7 +25201,7 @@ "DirectInternetAccess": { "target": "com.amazonaws.sagemaker#DirectInternetAccess", "traits": { - "smithy.api#documentation": "

Describes whether SageMaker provides internet access to the notebook instance.\n If this value is set to Disabled, the notebook instance does not\n have internet access, and cannot connect to SageMaker training and endpoint\n services.

\n

For more information, see Notebook Instances Are Internet-Enabled by Default.

" + "smithy.api#documentation": "

Describes whether SageMaker AI provides internet access to the notebook instance.\n If this value is set to Disabled, the notebook instance does not\n have internet access, and cannot connect to SageMaker AI training and endpoint\n services.

\n

For more information, see Notebook Instances Are Internet-Enabled by Default.

" } }, "VolumeSizeInGB": { @@ -25041,13 +25219,13 @@ "DefaultCodeRepository": { "target": "com.amazonaws.sagemaker#CodeRepositoryNameOrUrl", "traits": { - "smithy.api#documentation": "

The Git repository associated with the notebook instance as its default code\n repository. This can be either the name of a Git repository stored as a resource in your\n account, or the URL of a Git repository in Amazon Web Services CodeCommit\n or in any other Git repository. When you open a notebook instance, it opens in the\n directory that contains this repository. For more information, see Associating Git\n Repositories with SageMaker Notebook Instances.

" + "smithy.api#documentation": "

The Git repository associated with the notebook instance as its default code\n repository. This can be either the name of a Git repository stored as a resource in your\n account, or the URL of a Git repository in Amazon Web Services CodeCommit\n or in any other Git repository. When you open a notebook instance, it opens in the\n directory that contains this repository. For more information, see Associating Git\n Repositories with SageMaker AI Notebook Instances.

" } }, "AdditionalCodeRepositories": { "target": "com.amazonaws.sagemaker#AdditionalCodeRepositoryNamesOrUrls", "traits": { - "smithy.api#documentation": "

An array of up to three Git repositories associated with the notebook instance. These\n can be either the names of Git repositories stored as resources in your account, or the\n URL of Git repositories in Amazon Web Services CodeCommit\n or in any other Git repository. These repositories are cloned at the same level as the\n default repository of your notebook instance. For more information, see Associating Git\n Repositories with SageMaker Notebook Instances.

" + "smithy.api#documentation": "

An array of up to three Git repositories associated with the notebook instance. These\n can be either the names of Git repositories stored as resources in your account, or the\n URL of Git repositories in Amazon Web Services CodeCommit\n or in any other Git repository. These repositories are cloned at the same level as the\n default repository of your notebook instance. For more information, see Associating Git\n Repositories with SageMaker AI Notebook Instances.

" } }, "RootAccess": { @@ -26112,7 +26290,7 @@ } ], "traits": { - "smithy.api#documentation": "

Describes the Amazon SageMaker Studio Lifecycle Configuration.

" + "smithy.api#documentation": "

Describes the Amazon SageMaker AI Studio Lifecycle Configuration.

" } }, "com.amazonaws.sagemaker#DescribeStudioLifecycleConfigRequest": { @@ -26122,7 +26300,7 @@ "target": "com.amazonaws.sagemaker#StudioLifecycleConfigName", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The name of the Amazon SageMaker Studio Lifecycle Configuration to describe.

", + "smithy.api#documentation": "

The name of the Amazon SageMaker AI Studio Lifecycle Configuration to describe.

", "smithy.api#required": {} } } @@ -26143,25 +26321,25 @@ "StudioLifecycleConfigName": { "target": "com.amazonaws.sagemaker#StudioLifecycleConfigName", "traits": { - "smithy.api#documentation": "

The name of the Amazon SageMaker Studio Lifecycle Configuration that is\n described.

" + "smithy.api#documentation": "

The name of the Amazon SageMaker AI Studio Lifecycle Configuration that is\n described.

" } }, "CreationTime": { "target": "com.amazonaws.sagemaker#Timestamp", "traits": { - "smithy.api#documentation": "

The creation time of the Amazon SageMaker Studio Lifecycle Configuration.

" + "smithy.api#documentation": "

The creation time of the Amazon SageMaker AI Studio Lifecycle Configuration.

" } }, "LastModifiedTime": { "target": "com.amazonaws.sagemaker#Timestamp", "traits": { - "smithy.api#documentation": "

This value is equivalent to CreationTime because Amazon SageMaker Studio Lifecycle\n Configurations are immutable.

" + "smithy.api#documentation": "

This value is equivalent to CreationTime because Amazon SageMaker AI Studio Lifecycle\n Configurations are immutable.

" } }, "StudioLifecycleConfigContent": { "target": "com.amazonaws.sagemaker#StudioLifecycleConfigContent", "traits": { - "smithy.api#documentation": "

The content of your Amazon SageMaker Studio Lifecycle Configuration script.

" + "smithy.api#documentation": "

The content of your Amazon SageMaker AI Studio Lifecycle Configuration script.

" } }, "StudioLifecycleConfigAppType": { @@ -28239,7 +28417,7 @@ "ExecutionRoleIdentityConfig": { "target": "com.amazonaws.sagemaker#ExecutionRoleIdentityConfig", "traits": { - "smithy.api#documentation": "

The configuration for attaching a SageMaker user profile name to the execution\n role as a sts:SourceIdentity key.

" + "smithy.api#documentation": "

The configuration for attaching a SageMaker AI user profile name to the execution\n role as a sts:SourceIdentity key.

" } }, "DockerSettings": { @@ -28271,7 +28449,7 @@ "ExecutionRoleIdentityConfig": { "target": "com.amazonaws.sagemaker#ExecutionRoleIdentityConfig", "traits": { - "smithy.api#documentation": "

The configuration for attaching a SageMaker user profile name to the execution\n role as a sts:SourceIdentity key. This configuration can only be modified if there are no\n apps in the InService or Pending state.

" + "smithy.api#documentation": "

The configuration for attaching a SageMaker AI user profile name to the execution\n role as a sts:SourceIdentity key. This configuration can only be modified if there are no\n apps in the InService or Pending state.

" } }, "SecurityGroupIds": { @@ -28519,7 +28697,7 @@ } }, "traits": { - "smithy.api#documentation": "

A file system, created by you in Amazon EFS, that you assign to a user profile or\n space for an Amazon SageMaker Domain. Permitted users can access this file system in\n Amazon SageMaker Studio.

" + "smithy.api#documentation": "

A file system, created by you in Amazon EFS, that you assign to a user profile or\n space for an Amazon SageMaker AI Domain. Permitted users can access this file system in\n Amazon SageMaker AI Studio.

" } }, "com.amazonaws.sagemaker#EFSFileSystemConfig": { @@ -28536,12 +28714,12 @@ "FileSystemPath": { "target": "com.amazonaws.sagemaker#FileSystemPath", "traits": { - "smithy.api#documentation": "

The path to the file system directory that is accessible in Amazon SageMaker Studio.\n Permitted users can access only this directory and below.

" + "smithy.api#documentation": "

The path to the file system directory that is accessible in Amazon SageMaker AI Studio.\n Permitted users can access only this directory and below.

" } } }, "traits": { - "smithy.api#documentation": "

The settings for assigning a custom Amazon EFS file system to a user profile or\n space for an Amazon SageMaker Domain.

" + "smithy.api#documentation": "

The settings for assigning a custom Amazon EFS file system to a user profile or\n space for an Amazon SageMaker AI Domain.

" } }, "com.amazonaws.sagemaker#EMRStepMetadata": { @@ -31113,7 +31291,7 @@ } }, "traits": { - "smithy.api#documentation": "

The Amazon Elastic File System storage configuration for a SageMaker image.

" + "smithy.api#documentation": "

The Amazon Elastic File System storage configuration for a SageMaker AI image.

" } }, "com.amazonaws.sagemaker#FileSystemDataSource": { @@ -34425,7 +34603,7 @@ } }, "traits": { - "smithy.api#documentation": "

A SageMaker image. A SageMaker image represents a set of container images that are derived from\n a common base container image. Each of these container images is represented by a SageMaker\n ImageVersion.

" + "smithy.api#documentation": "

A SageMaker AI image. A SageMaker AI image represents a set of container images that are derived from\n a common base container image. Each of these container images is represented by a SageMaker AI\n ImageVersion.

" } }, "com.amazonaws.sagemaker#ImageArn": { @@ -34721,7 +34899,7 @@ } }, "traits": { - "smithy.api#documentation": "

A version of a SageMaker Image. A version represents an existing container\n image.

" + "smithy.api#documentation": "

A version of a SageMaker AI Image. A version represents an existing container\n image.

" } }, "com.amazonaws.sagemaker#ImageVersionAlias": { @@ -35173,7 +35351,7 @@ "ModelName": { "target": "com.amazonaws.sagemaker#ModelName", "traits": { - "smithy.api#documentation": "

The name of an existing SageMaker model object in your account that you want to\n deploy with the inference component.

" + "smithy.api#documentation": "

The name of an existing SageMaker AI model object in your account that you want to\n deploy with the inference component.

" } }, "Container": { @@ -35211,7 +35389,7 @@ "ModelName": { "target": "com.amazonaws.sagemaker#ModelName", "traits": { - "smithy.api#documentation": "

The name of the SageMaker model object that is deployed with the inference\n component.

" + "smithy.api#documentation": "

The name of the SageMaker AI model object that is deployed with the inference\n component.

" } }, "Container": { @@ -37507,7 +37685,7 @@ } }, "traits": { - "smithy.api#documentation": "

The configuration for the file system and kernels in a SageMaker image running as a JupyterLab app. The FileSystemConfig object is not supported.

" + "smithy.api#documentation": "

The configuration for the file system and kernels in a SageMaker AI image running as a JupyterLab app. The FileSystemConfig object is not supported.

" } }, "com.amazonaws.sagemaker#JupyterLabAppSettings": { @@ -37563,7 +37741,7 @@ "DefaultResourceSpec": { "target": "com.amazonaws.sagemaker#ResourceSpec", "traits": { - "smithy.api#documentation": "

The default instance type and the Amazon Resource Name (ARN) of the default SageMaker image used by the JupyterServer app. If you use the\n LifecycleConfigArns parameter, then this parameter is also required.

" + "smithy.api#documentation": "

The default instance type and the Amazon Resource Name (ARN) of the default SageMaker AI image used by the JupyterServer app. If you use the\n LifecycleConfigArns parameter, then this parameter is also required.

" } }, "LifecycleConfigArns": { @@ -37575,7 +37753,7 @@ "CodeRepositories": { "target": "com.amazonaws.sagemaker#CodeRepositories", "traits": { - "smithy.api#documentation": "

A list of Git repositories that SageMaker automatically displays to users for\n cloning in the JupyterServer application.

" + "smithy.api#documentation": "

A list of Git repositories that SageMaker AI automatically displays to users for\n cloning in the JupyterServer application.

" } } }, @@ -37622,13 +37800,13 @@ "DefaultResourceSpec": { "target": "com.amazonaws.sagemaker#ResourceSpec", "traits": { - "smithy.api#documentation": "

The default instance type and the Amazon Resource Name (ARN) of the default SageMaker image used by the KernelGateway app.

\n \n

The Amazon SageMaker Studio UI does not use the default instance type value set\n here. The default instance type set here is used when Apps are created using the CLI or CloudFormation and the instance type parameter value is not\n passed.

\n
" + "smithy.api#documentation": "

The default instance type and the Amazon Resource Name (ARN) of the default SageMaker AI image used by the KernelGateway app.

\n \n

The Amazon SageMaker AI Studio UI does not use the default instance type value set\n here. The default instance type set here is used when Apps are created using the CLI or CloudFormation and the instance type parameter value is not\n passed.

\n
" } }, "CustomImages": { "target": "com.amazonaws.sagemaker#CustomImages", "traits": { - "smithy.api#documentation": "

A list of custom SageMaker images that are configured to run as a KernelGateway\n app.

" + "smithy.api#documentation": "

A list of custom SageMaker AI images that are configured to run as a KernelGateway\n app.

" } }, "LifecycleConfigArns": { @@ -37656,12 +37834,12 @@ "FileSystemConfig": { "target": "com.amazonaws.sagemaker#FileSystemConfig", "traits": { - "smithy.api#documentation": "

The Amazon Elastic File System storage configuration for a SageMaker image.

" + "smithy.api#documentation": "

The Amazon Elastic File System storage configuration for a SageMaker AI image.

" } } }, "traits": { - "smithy.api#documentation": "

The configuration for the file system and kernels in a SageMaker image running as a\n KernelGateway app.

" + "smithy.api#documentation": "

The configuration for the file system and kernels in a SageMaker AI image running as a\n KernelGateway app.

" } }, "com.amazonaws.sagemaker#KernelName": { @@ -38715,7 +38893,7 @@ "SageMakerImageVersionAliases": { "target": "com.amazonaws.sagemaker#SageMakerImageVersionAliases", "traits": { - "smithy.api#documentation": "

A list of SageMaker image version aliases.

" + "smithy.api#documentation": "

A list of SageMaker AI image version aliases.

" } }, "NextToken": { @@ -39831,7 +40009,7 @@ "NextToken": { "target": "com.amazonaws.sagemaker#NextToken", "traits": { - "smithy.api#documentation": "

If the response is truncated, Amazon SageMaker returns this NextToken. To retrieve\n the next set of model compilation jobs, use this token in the next request.

" + "smithy.api#documentation": "

If the response is truncated, Amazon SageMaker AI returns this NextToken. To retrieve\n the next set of model compilation jobs, use this token in the next request.

" } } }, @@ -43610,7 +43788,7 @@ "NextToken": { "target": "com.amazonaws.sagemaker#NextToken", "traits": { - "smithy.api#documentation": "

If the response is truncated, Amazon SageMaker returns this token. To retrieve the\n next set of model quality monitoring job definitions, use it in the next request.

" + "smithy.api#documentation": "

If the response is truncated, Amazon SageMaker AI returns this token. To retrieve the\n next set of model quality monitoring job definitions, use it in the next request.

" } } }, @@ -44234,7 +44412,7 @@ "NextToken": { "target": "com.amazonaws.sagemaker#NextToken", "traits": { - "smithy.api#documentation": "

If the response is truncated, SageMaker returns this token. To get the next\n set of lifecycle configurations, use it in the next request.

" + "smithy.api#documentation": "

If the response is truncated, SageMaker AI returns this token. To get the next\n set of lifecycle configurations, use it in the next request.

" } }, "NotebookInstanceLifecycleConfigs": { @@ -44257,7 +44435,7 @@ "target": "com.amazonaws.sagemaker#ListNotebookInstancesOutput" }, "traits": { - "smithy.api#documentation": "

Returns a list of the SageMaker notebook instances in the requester's\n account in an Amazon Web Services Region.

", + "smithy.api#documentation": "

Returns a list of the SageMaker AI notebook instances in the requester's\n account in an Amazon Web Services Region.

", "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", @@ -44358,7 +44536,7 @@ "NextToken": { "target": "com.amazonaws.sagemaker#NextToken", "traits": { - "smithy.api#documentation": "

If the response to the previous ListNotebookInstances request was\n truncated, SageMaker returns this token. To retrieve the next set of notebook\n instances, use the token in the next request.

" + "smithy.api#documentation": "

If the response to the previous ListNotebookInstances request was\n truncated, SageMaker AI returns this token. To retrieve the next set of notebook\n instances, use the token in the next request.

" } }, "NotebookInstances": { @@ -45359,7 +45537,7 @@ } ], "traits": { - "smithy.api#documentation": "

Lists the Amazon SageMaker Studio Lifecycle Configurations in your Amazon Web Services\n Account.

", + "smithy.api#documentation": "

Lists the Amazon SageMaker AI Studio Lifecycle Configurations in your Amazon Web Services\n Account.

", "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", @@ -47107,12 +47285,6 @@ "smithy.api#enumValue": "PerformanceEvaluation" } }, - "HYPER_POD_CLUSTERS": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "HyperPodClusters" - } - }, "LAKERA_GUARD": { "target": "smithy.api#Unit", "traits": { @@ -47136,6 +47308,12 @@ "traits": { "smithy.api#enumValue": "Fiddler" } + }, + "HYPER_POD_CLUSTERS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "HyperPodClusters" + } } } }, @@ -48916,7 +49094,7 @@ "target": "com.amazonaws.sagemaker#ContainerImage", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The Amazon EC2 Container Registry (Amazon ECR) path where inference code is stored.

\n

If you are using your own custom algorithm instead of an algorithm provided by SageMaker,\n the inference code must meet SageMaker requirements. SageMaker supports both\n registry/repository[:tag] and registry/repository[@digest]\n image path formats. For more information, see Using Your Own Algorithms with Amazon\n SageMaker.

", + "smithy.api#documentation": "

The Amazon Elastic Container Registry (Amazon ECR) path where inference code is stored.

\n

If you are using your own custom algorithm instead of an algorithm provided by SageMaker,\n the inference code must meet SageMaker requirements. SageMaker supports both\n registry/repository[:tag] and registry/repository[@digest]\n image path formats. For more information, see Using Your Own Algorithms with Amazon\n SageMaker.

", "smithy.api#required": {} } }, @@ -48979,6 +49157,12 @@ "traits": { "smithy.api#documentation": "

The additional data source that is used during inference in the Docker container for\n your model package.

" } + }, + "ModelDataETag": { + "target": "com.amazonaws.sagemaker#String", + "traits": { + "smithy.api#documentation": "

The ETag associated with Model Data URL.

" + } } }, "traits": { @@ -50198,7 +50382,7 @@ "VolumeKmsKeyId": { "target": "com.amazonaws.sagemaker#KmsKeyId", "traits": { - "smithy.api#documentation": "

The Key Management Service (KMS) key that Amazon SageMaker uses to\n encrypt data on the storage volume attached to the ML compute instance(s) that run the\n model monitoring job.

" + "smithy.api#documentation": "

The Key Management Service (KMS) key that Amazon SageMaker AI uses to\n encrypt data on the storage volume attached to the ML compute instance(s) that run the\n model monitoring job.

" } } }, @@ -50471,7 +50655,7 @@ "target": "com.amazonaws.sagemaker#MonitoringInputs", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The array of inputs for the monitoring job. Currently we support monitoring an Amazon SageMaker Endpoint.

", + "smithy.api#documentation": "

The array of inputs for the monitoring job. Currently we support monitoring an Amazon SageMaker AI Endpoint.

", "smithy.api#required": {} } }, @@ -50521,7 +50705,7 @@ "target": "com.amazonaws.sagemaker#RoleArn", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker can \n assume to perform tasks on your behalf.

", + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker AI can \n assume to perform tasks on your behalf.

", "smithy.api#required": {} } } @@ -50689,7 +50873,7 @@ "KmsKeyId": { "target": "com.amazonaws.sagemaker#KmsKeyId", "traits": { - "smithy.api#documentation": "

The Key Management Service (KMS) key that Amazon SageMaker uses to\n encrypt the model artifacts at rest using Amazon S3 server-side encryption.

" + "smithy.api#documentation": "

The Key Management Service (KMS) key that Amazon SageMaker AI uses to\n encrypt the model artifacts at rest using Amazon S3 server-side encryption.

" } } }, @@ -50762,7 +50946,7 @@ "target": "com.amazonaws.sagemaker#MonitoringS3Uri", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

A URI that identifies the Amazon S3 storage location where Amazon SageMaker\n saves the results of a monitoring job.

", + "smithy.api#documentation": "

A URI that identifies the Amazon S3 storage location where Amazon SageMaker AI\n saves the results of a monitoring job.

", "smithy.api#required": {} } }, @@ -50770,7 +50954,7 @@ "target": "com.amazonaws.sagemaker#ProcessingLocalPath", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The local path to the Amazon S3 storage location where Amazon SageMaker\n saves the results of a monitoring job. LocalPath is an absolute path for the output\n data.

", + "smithy.api#documentation": "

The local path to the Amazon S3 storage location where Amazon SageMaker AI\n saves the results of a monitoring job. LocalPath is an absolute path for the output\n data.

", "smithy.api#required": {} } }, @@ -51140,7 +51324,7 @@ } }, "traits": { - "smithy.api#documentation": "

The VpcConfig configuration object that specifies the VPC that you want the\n compilation jobs to connect to. For more information on controlling access to your Amazon S3\n buckets used for compilation job, see Give Amazon SageMaker Compilation Jobs Access to\n Resources in Your Amazon VPC.

" + "smithy.api#documentation": "

The VpcConfig configuration object that specifies the VPC that you want the\n compilation jobs to connect to. For more information on controlling access to your Amazon S3\n buckets used for compilation job, see Give Amazon SageMaker AI Compilation Jobs Access to\n Resources in Your Amazon VPC.

" } }, "com.amazonaws.sagemaker#NeoVpcSecurityGroupId": { @@ -51647,18 +51831,18 @@ "DefaultCodeRepository": { "target": "com.amazonaws.sagemaker#CodeRepositoryNameOrUrl", "traits": { - "smithy.api#documentation": "

The Git repository associated with the notebook instance as its default code\n repository. This can be either the name of a Git repository stored as a resource in your\n account, or the URL of a Git repository in Amazon Web Services CodeCommit\n or in any other Git repository. When you open a notebook instance, it opens in the\n directory that contains this repository. For more information, see Associating Git\n Repositories with SageMaker Notebook Instances.

" + "smithy.api#documentation": "

The Git repository associated with the notebook instance as its default code\n repository. This can be either the name of a Git repository stored as a resource in your\n account, or the URL of a Git repository in Amazon Web Services CodeCommit\n or in any other Git repository. When you open a notebook instance, it opens in the\n directory that contains this repository. For more information, see Associating Git\n Repositories with SageMaker AI Notebook Instances.

" } }, "AdditionalCodeRepositories": { "target": "com.amazonaws.sagemaker#AdditionalCodeRepositoryNamesOrUrls", "traits": { - "smithy.api#documentation": "

An array of up to three Git repositories associated with the notebook instance. These\n can be either the names of Git repositories stored as resources in your account, or the\n URL of Git repositories in Amazon Web Services CodeCommit\n or in any other Git repository. These repositories are cloned at the same level as the\n default repository of your notebook instance. For more information, see Associating Git\n Repositories with SageMaker Notebook Instances.

" + "smithy.api#documentation": "

An array of up to three Git repositories associated with the notebook instance. These\n can be either the names of Git repositories stored as resources in your account, or the\n URL of Git repositories in Amazon Web Services CodeCommit\n or in any other Git repository. These repositories are cloned at the same level as the\n default repository of your notebook instance. For more information, see Associating Git\n Repositories with SageMaker AI Notebook Instances.

" } } }, "traits": { - "smithy.api#documentation": "

Provides summary information for an SageMaker notebook instance.

" + "smithy.api#documentation": "

Provides summary information for an SageMaker AI notebook instance.

" } }, "com.amazonaws.sagemaker#NotebookInstanceSummaryList": { @@ -52771,7 +52955,7 @@ "target": "com.amazonaws.sagemaker#S3Uri", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

Identifies the S3 bucket where you want Amazon SageMaker to store the model artifacts. For\n example, s3://bucket-name/key-name-prefix.

", + "smithy.api#documentation": "

Identifies the S3 bucket where you want Amazon SageMaker AI to store the model artifacts. For\n example, s3://bucket-name/key-name-prefix.

", "smithy.api#required": {} } }, @@ -52796,7 +52980,7 @@ "KmsKeyId": { "target": "com.amazonaws.sagemaker#KmsKeyId", "traits": { - "smithy.api#documentation": "

The Amazon Web Services Key Management Service key (Amazon Web Services KMS) that Amazon SageMaker\n uses to encrypt your output models with Amazon S3 server-side encryption after compilation\n job. If you don't provide a KMS key ID, Amazon SageMaker uses the default KMS key for Amazon S3 for your\n role's account. For more information, see KMS-Managed Encryption\n Keys in the Amazon Simple Storage Service Developer\n Guide.\n

\n

The KmsKeyId can be any of the following formats:

\n
    \n
  • \n

    Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab\n

    \n
  • \n
  • \n

    Key ARN:\n arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab\n

    \n
  • \n
  • \n

    Alias name: alias/ExampleAlias\n

    \n
  • \n
  • \n

    Alias name ARN:\n arn:aws:kms:us-west-2:111122223333:alias/ExampleAlias\n

    \n
  • \n
" + "smithy.api#documentation": "

The Amazon Web Services Key Management Service key (Amazon Web Services KMS) that Amazon SageMaker AI\n uses to encrypt your output models with Amazon S3 server-side encryption after compilation\n job. If you don't provide a KMS key ID, Amazon SageMaker AI uses the default KMS key for Amazon S3 for your\n role's account. For more information, see KMS-Managed Encryption\n Keys in the Amazon Simple Storage Service Developer\n Guide.\n

\n

The KmsKeyId can be any of the following formats:

\n
    \n
  • \n

    Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab\n

    \n
  • \n
  • \n

    Key ARN:\n arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab\n

    \n
  • \n
  • \n

    Alias name: alias/ExampleAlias\n

    \n
  • \n
  • \n

    Alias name ARN:\n arn:aws:kms:us-west-2:111122223333:alias/ExampleAlias\n

    \n
  • \n
" } } }, @@ -58099,7 +58283,7 @@ "CustomImages": { "target": "com.amazonaws.sagemaker#CustomImages", "traits": { - "smithy.api#documentation": "

A list of custom SageMaker images that are configured to run as a RSession\n app.

" + "smithy.api#documentation": "

A list of custom SageMaker AI images that are configured to run as a RSession\n app.

" } } }, @@ -59462,6 +59646,12 @@ "smithy.api#enumValue": "ml.p5en.48xlarge" } }, + "ML_TRN1_32XLARGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ml.trn1.32xlarge" + } + }, "ML_TRN2_48XLARGE": { "target": "smithy.api#Unit", "traits": { @@ -60006,7 +60196,7 @@ "SageMakerImageArn": { "target": "com.amazonaws.sagemaker#ImageArn", "traits": { - "smithy.api#documentation": "

The ARN of the SageMaker image that the image version belongs to.

" + "smithy.api#documentation": "

The ARN of the SageMaker AI image that the image version belongs to.

" } }, "SageMakerImageVersionArn": { @@ -60035,7 +60225,7 @@ } }, "traits": { - "smithy.api#documentation": "

Specifies the ARN's of a SageMaker image and SageMaker image version, and the instance type that\n the version runs on.

" + "smithy.api#documentation": "

Specifies the ARN's of a SageMaker AI image and SageMaker AI image version, and the instance type that\n the version runs on.

" } }, "com.amazonaws.sagemaker#ResourceType": { @@ -60446,7 +60636,7 @@ "target": "com.amazonaws.sagemaker#S3DataType", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

If you choose S3Prefix, S3Uri identifies a key name prefix.\n SageMaker uses all objects that match the specified key name prefix for model training.

\n

If you choose ManifestFile, S3Uri identifies an object that\n is a manifest file containing a list of object keys that you want SageMaker to use for model\n training.

\n

If you choose AugmentedManifestFile, S3Uri identifies an object that is\n an augmented manifest file in JSON lines format. This file contains the data you want to\n use for model training. AugmentedManifestFile can only be used if the\n Channel's input mode is Pipe.

", + "smithy.api#documentation": "

If you choose S3Prefix, S3Uri identifies a key name prefix.\n SageMaker uses all objects that match the specified key name prefix for model training.

\n

If you choose ManifestFile, S3Uri identifies an object that\n is a manifest file containing a list of object keys that you want SageMaker to use for model\n training.

\n

If you choose AugmentedManifestFile, S3Uri identifies an object that is\n an augmented manifest file in JSON lines format. This file contains the data you want to\n use for model training. AugmentedManifestFile can only be used if the\n Channel's input mode is Pipe.

", "smithy.api#required": {} } }, @@ -60548,6 +60738,18 @@ "traits": { "smithy.api#documentation": "

The Amazon S3 URI of the manifest file. The manifest file is a CSV file that stores the\n artifact locations.

" } + }, + "ETag": { + "target": "com.amazonaws.sagemaker#String", + "traits": { + "smithy.api#documentation": "

The ETag associated with S3 URI.

" + } + }, + "ManifestEtag": { + "target": "com.amazonaws.sagemaker#String", + "traits": { + "smithy.api#documentation": "

The ETag associated with Manifest S3 URI.

" + } } }, "traits": { @@ -62919,7 +63121,7 @@ "target": "com.amazonaws.sagemaker#ScheduleExpression", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

A cron expression that describes details about the monitoring schedule.

\n

The supported cron expressions are:

\n
    \n
  • \n

    If you want to set the job to start every hour, use the following:

    \n

    \n Hourly: cron(0 * ? * * *)\n

    \n
  • \n
  • \n

    If you want to start the job daily:

    \n

    \n cron(0 [00-23] ? * * *)\n

    \n
  • \n
  • \n

    If you want to run the job one time, immediately, use the following\n keyword:

    \n

    \n NOW\n

    \n
  • \n
\n

For example, the following are valid cron expressions:

\n
    \n
  • \n

    Daily at noon UTC: cron(0 12 ? * * *)\n

    \n
  • \n
  • \n

    Daily at midnight UTC: cron(0 0 ? * * *)\n

    \n
  • \n
\n

To support running every 6, 12 hours, the following are also supported:

\n

\n cron(0 [00-23]/[01-24] ? * * *)\n

\n

For example, the following are valid cron expressions:

\n
    \n
  • \n

    Every 12 hours, starting at 5pm UTC: cron(0 17/12 ? * * *)\n

    \n
  • \n
  • \n

    Every two hours starting at midnight: cron(0 0/2 ? * * *)\n

    \n
  • \n
\n \n
    \n
  • \n

    Even though the cron expression is set to start at 5PM UTC, note that there\n could be a delay of 0-20 minutes from the actual requested time to run the\n execution.

    \n
  • \n
  • \n

    We recommend that if you would like a daily schedule, you do not provide this\n parameter. Amazon SageMaker will pick a time for running every day.

    \n
  • \n
\n
\n

You can also specify the keyword NOW to run the monitoring job immediately,\n one time, without recurring.

", + "smithy.api#documentation": "

A cron expression that describes details about the monitoring schedule.

\n

The supported cron expressions are:

\n
    \n
  • \n

    If you want to set the job to start every hour, use the following:

    \n

    \n Hourly: cron(0 * ? * * *)\n

    \n
  • \n
  • \n

    If you want to start the job daily:

    \n

    \n cron(0 [00-23] ? * * *)\n

    \n
  • \n
  • \n

    If you want to run the job one time, immediately, use the following\n keyword:

    \n

    \n NOW\n

    \n
  • \n
\n

For example, the following are valid cron expressions:

\n
    \n
  • \n

    Daily at noon UTC: cron(0 12 ? * * *)\n

    \n
  • \n
  • \n

    Daily at midnight UTC: cron(0 0 ? * * *)\n

    \n
  • \n
\n

To support running every 6, 12 hours, the following are also supported:

\n

\n cron(0 [00-23]/[01-24] ? * * *)\n

\n

For example, the following are valid cron expressions:

\n
    \n
  • \n

    Every 12 hours, starting at 5pm UTC: cron(0 17/12 ? * * *)\n

    \n
  • \n
  • \n

    Every two hours starting at midnight: cron(0 0/2 ? * * *)\n

    \n
  • \n
\n \n
    \n
  • \n

    Even though the cron expression is set to start at 5PM UTC, note that there\n could be a delay of 0-20 minutes from the actual requested time to run the\n execution.

    \n
  • \n
  • \n

    We recommend that if you would like a daily schedule, you do not provide this\n parameter. Amazon SageMaker AI will pick a time for running every day.

    \n
  • \n
\n
\n

You can also specify the keyword NOW to run the monitoring job immediately,\n one time, without recurring.

", "smithy.api#required": {} } }, @@ -64005,7 +64207,7 @@ } }, "traits": { - "smithy.api#documentation": "

Specifies options for sharing Amazon SageMaker Studio notebooks. These settings are\n specified as part of DefaultUserSettings when the CreateDomain API\n is called, and as part of UserSettings when the CreateUserProfile\n API is called. When SharingSettings is not specified, notebook sharing isn't\n allowed.

" + "smithy.api#documentation": "

Specifies options for sharing Amazon SageMaker AI Studio notebooks. These settings are\n specified as part of DefaultUserSettings when the CreateDomain API\n is called, and as part of UserSettings when the CreateUserProfile\n API is called. When SharingSettings is not specified, notebook sharing isn't\n allowed.

" } }, "com.amazonaws.sagemaker#SharingType": { @@ -64415,6 +64617,12 @@ "smithy.api#documentation": "

Specifies the location of ML model data to deploy during endpoint creation.

" } }, + "ModelDataETag": { + "target": "com.amazonaws.sagemaker#String", + "traits": { + "smithy.api#documentation": "

The ETag associated with Model Data URL.

" + } + }, "AlgorithmName": { "target": "com.amazonaws.sagemaker#ArnOrName", "traits": { @@ -64680,7 +64888,7 @@ "AppType": { "target": "com.amazonaws.sagemaker#AppType", "traits": { - "smithy.api#documentation": "

The type of app created within the space.

" + "smithy.api#documentation": "

The type of app created within the space.

\n

If using the \n UpdateSpace API, you can't change the app type of your\n space by specifying a different value for this field.

" } }, "SpaceStorageSettings": { @@ -64692,7 +64900,7 @@ "CustomFileSystems": { "target": "com.amazonaws.sagemaker#CustomFileSystems", "traits": { - "smithy.api#documentation": "

A file system, created by you, that you assign to a space for an Amazon SageMaker\n Domain. Permitted users can access this file system in Amazon SageMaker Studio.

" + "smithy.api#documentation": "

A file system, created by you, that you assign to a space for an Amazon SageMaker AI\n Domain. Permitted users can access this file system in Amazon SageMaker AI Studio.

" } } }, @@ -65139,7 +65347,7 @@ } ], "traits": { - "smithy.api#documentation": "

Launches an ML compute instance with the latest version of the libraries and\n attaches your ML storage volume. After configuring the notebook instance, SageMaker sets the notebook instance status to InService. A notebook\n instance's status must be InService before you can connect to your Jupyter\n notebook.

" + "smithy.api#documentation": "

Launches an ML compute instance with the latest version of the libraries and\n attaches your ML storage volume. After configuring the notebook instance, SageMaker AI sets the notebook instance status to InService. A notebook\n instance's status must be InService before you can connect to your Jupyter\n notebook.

" } }, "com.amazonaws.sagemaker#StartNotebookInstanceInput": { @@ -65416,7 +65624,7 @@ } ], "traits": { - "smithy.api#documentation": "

Stops a model compilation job.

\n

To stop a job, Amazon SageMaker sends the algorithm the SIGTERM signal. This gracefully shuts the\n job down. If the job hasn't stopped, it sends the SIGKILL signal.

\n

When it receives a StopCompilationJob request, Amazon SageMaker changes the\n CompilationJobStatus of the job to Stopping. After Amazon\n SageMaker stops the job, it sets the CompilationJobStatus to\n Stopped.

" + "smithy.api#documentation": "

Stops a model compilation job.

\n

To stop a job, Amazon SageMaker AI sends the algorithm the SIGTERM signal. This gracefully shuts the\n job down. If the job hasn't stopped, it sends the SIGKILL signal.

\n

When it receives a StopCompilationJob request, Amazon SageMaker AI changes the\n CompilationJobStatus of the job to Stopping. After Amazon\n SageMaker stops the job, it sets the CompilationJobStatus to\n Stopped.

" } }, "com.amazonaws.sagemaker#StopCompilationJobRequest": { @@ -65768,7 +65976,7 @@ "target": "smithy.api#Unit" }, "traits": { - "smithy.api#documentation": "

Terminates the ML compute instance. Before terminating the instance, SageMaker disconnects the ML storage volume from it. SageMaker preserves the\n ML storage volume. SageMaker stops charging you for the ML compute instance when\n you call StopNotebookInstance.

\n

To access data on the ML storage volume for a notebook instance that has been\n terminated, call the StartNotebookInstance API.\n StartNotebookInstance launches another ML compute instance, configures\n it, and attaches the preserved ML storage volume so you can continue your work.\n

" + "smithy.api#documentation": "

Terminates the ML compute instance. Before terminating the instance, SageMaker AI disconnects the ML storage volume from it. SageMaker AI preserves the\n ML storage volume. SageMaker AI stops charging you for the ML compute instance when\n you call StopNotebookInstance.

\n

To access data on the ML storage volume for a notebook instance that has been\n terminated, call the StartNotebookInstance API.\n StartNotebookInstance launches another ML compute instance, configures\n it, and attaches the preserved ML storage volume so you can continue your work.\n

" } }, "com.amazonaws.sagemaker#StopNotebookInstanceInput": { @@ -66177,19 +66385,19 @@ "StudioLifecycleConfigName": { "target": "com.amazonaws.sagemaker#StudioLifecycleConfigName", "traits": { - "smithy.api#documentation": "

The name of the Amazon SageMaker Studio Lifecycle Configuration.

" + "smithy.api#documentation": "

The name of the Amazon SageMaker AI Studio Lifecycle Configuration.

" } }, "CreationTime": { "target": "com.amazonaws.sagemaker#Timestamp", "traits": { - "smithy.api#documentation": "

The creation time of the Amazon SageMaker Studio Lifecycle Configuration.

" + "smithy.api#documentation": "

The creation time of the Amazon SageMaker AI Studio Lifecycle Configuration.

" } }, "LastModifiedTime": { "target": "com.amazonaws.sagemaker#Timestamp", "traits": { - "smithy.api#documentation": "

This value is equivalent to CreationTime because Amazon SageMaker Studio Lifecycle\n Configurations are immutable.

" + "smithy.api#documentation": "

This value is equivalent to CreationTime because Amazon SageMaker AI Studio Lifecycle\n Configurations are immutable.

" } }, "StudioLifecycleConfigAppType": { @@ -66200,7 +66408,7 @@ } }, "traits": { - "smithy.api#documentation": "

Details of the Amazon SageMaker Studio Lifecycle Configuration.

" + "smithy.api#documentation": "

Details of the Amazon SageMaker AI Studio Lifecycle Configuration.

" } }, "com.amazonaws.sagemaker#StudioLifecycleConfigName": { @@ -67061,7 +67269,7 @@ "DefaultResourceSpec": { "target": "com.amazonaws.sagemaker#ResourceSpec", "traits": { - "smithy.api#documentation": "

The default instance type and the Amazon Resource Name (ARN) of the SageMaker\n image created on the instance.

" + "smithy.api#documentation": "

The default instance type and the Amazon Resource Name (ARN) of the SageMaker AI\n image created on the instance.

" } } }, @@ -70282,6 +70490,18 @@ "smithy.api#enumValue": "ml.g5.48xlarge" } }, + "ML_TRN1_2XLARGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ml.trn1.2xlarge" + } + }, + "ML_TRN1_32XLARGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ml.trn1.32xlarge" + } + }, "ML_INF2_XLARGE": { "target": "smithy.api#Unit", "traits": { @@ -72434,7 +72654,7 @@ "AppNetworkAccessType": { "target": "com.amazonaws.sagemaker#AppNetworkAccessType", "traits": { - "smithy.api#documentation": "

Specifies the VPC used for non-EFS traffic.

\n
    \n
  • \n

    \n PublicInternetOnly - Non-EFS traffic is through a VPC managed by Amazon SageMaker, which allows direct internet access.

    \n
  • \n
  • \n

    \n VpcOnly - All Studio traffic is through the specified VPC and\n subnets.

    \n
  • \n
\n

This configuration can only be modified if there are no apps in the\n InService, Pending, or Deleting state. The\n configuration cannot be updated if\n DomainSettings.RStudioServerProDomainSettings.DomainExecutionRoleArn is already\n set or DomainSettings.RStudioServerProDomainSettings.DomainExecutionRoleArn is\n provided as part of the same request.

" + "smithy.api#documentation": "

Specifies the VPC used for non-EFS traffic.

\n
    \n
  • \n

    \n PublicInternetOnly - Non-EFS traffic is through a VPC managed by Amazon SageMaker AI, which allows direct internet access.

    \n
  • \n
  • \n

    \n VpcOnly - All Studio traffic is through the specified VPC and\n subnets.

    \n
  • \n
\n

This configuration can only be modified if there are no apps in the\n InService, Pending, or Deleting state. The\n configuration cannot be updated if\n DomainSettings.RStudioServerProDomainSettings.DomainExecutionRoleArn is already\n set or DomainSettings.RStudioServerProDomainSettings.DomainExecutionRoleArn is\n provided as part of the same request.

" } }, "TagPropagation": { @@ -72872,7 +73092,7 @@ } ], "traits": { - "smithy.api#documentation": "

Updates the properties of a SageMaker image. To change the image's tags, use the\n AddTags and DeleteTags APIs.

" + "smithy.api#documentation": "

Updates the properties of a SageMaker AI image. To change the image's tags, use the\n AddTags and DeleteTags APIs.

" } }, "com.amazonaws.sagemaker#UpdateImageRequest": { @@ -72907,7 +73127,7 @@ "RoleArn": { "target": "com.amazonaws.sagemaker#RoleArn", "traits": { - "smithy.api#documentation": "

The new ARN for the IAM role that enables Amazon SageMaker to perform tasks on your behalf.

" + "smithy.api#documentation": "

The new ARN for the IAM role that enables Amazon SageMaker AI to perform tasks on your behalf.

" } } }, @@ -72946,7 +73166,7 @@ } ], "traits": { - "smithy.api#documentation": "

Updates the properties of a SageMaker image version.

" + "smithy.api#documentation": "

Updates the properties of a SageMaker AI image version.

" } }, "com.amazonaws.sagemaker#UpdateImageVersionRequest": { @@ -72993,7 +73213,7 @@ "JobType": { "target": "com.amazonaws.sagemaker#JobType", "traits": { - "smithy.api#documentation": "

Indicates SageMaker job type compatibility.

\n
    \n
  • \n

    \n TRAINING: The image version is compatible with SageMaker training jobs.

    \n
  • \n
  • \n

    \n INFERENCE: The image version is compatible with SageMaker inference jobs.

    \n
  • \n
  • \n

    \n NOTEBOOK_KERNEL: The image version is compatible with SageMaker notebook kernels.

    \n
  • \n
" + "smithy.api#documentation": "

Indicates SageMaker AI job type compatibility.

\n
    \n
  • \n

    \n TRAINING: The image version is compatible with SageMaker AI training jobs.

    \n
  • \n
  • \n

    \n INFERENCE: The image version is compatible with SageMaker AI inference jobs.

    \n
  • \n
  • \n

    \n NOTEBOOK_KERNEL: The image version is compatible with SageMaker AI notebook kernels.

    \n
  • \n
" } }, "MLFramework": { @@ -73677,7 +73897,7 @@ "RoleArn": { "target": "com.amazonaws.sagemaker#RoleArn", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the IAM role that SageMaker can assume to\n access the notebook instance. For more information, see SageMaker Roles.

\n \n

To be able to pass this role to SageMaker, the caller of this API must\n have the iam:PassRole permission.

\n
" + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the IAM role that SageMaker AI can assume to\n access the notebook instance. For more information, see SageMaker AI Roles.

\n \n

To be able to pass this role to SageMaker AI, the caller of this API must\n have the iam:PassRole permission.

\n
" } }, "LifecycleConfigName": { @@ -73695,19 +73915,19 @@ "VolumeSizeInGB": { "target": "com.amazonaws.sagemaker#NotebookInstanceVolumeSizeInGB", "traits": { - "smithy.api#documentation": "

The size, in GB, of the ML storage volume to attach to the notebook instance. The\n default value is 5 GB. ML storage volumes are encrypted, so SageMaker can't\n determine the amount of available free space on the volume. Because of this, you can\n increase the volume size when you update a notebook instance, but you can't decrease the\n volume size. If you want to decrease the size of the ML storage volume in use, create a\n new notebook instance with the desired size.

" + "smithy.api#documentation": "

The size, in GB, of the ML storage volume to attach to the notebook instance. The\n default value is 5 GB. ML storage volumes are encrypted, so SageMaker AI can't\n determine the amount of available free space on the volume. Because of this, you can\n increase the volume size when you update a notebook instance, but you can't decrease the\n volume size. If you want to decrease the size of the ML storage volume in use, create a\n new notebook instance with the desired size.

" } }, "DefaultCodeRepository": { "target": "com.amazonaws.sagemaker#CodeRepositoryNameOrUrl", "traits": { - "smithy.api#documentation": "

The Git repository to associate with the notebook instance as its default code\n repository. This can be either the name of a Git repository stored as a resource in your\n account, or the URL of a Git repository in Amazon Web Services CodeCommit\n or in any other Git repository. When you open a notebook instance, it opens in the\n directory that contains this repository. For more information, see Associating Git\n Repositories with SageMaker Notebook Instances.

" + "smithy.api#documentation": "

The Git repository to associate with the notebook instance as its default code\n repository. This can be either the name of a Git repository stored as a resource in your\n account, or the URL of a Git repository in Amazon Web Services CodeCommit\n or in any other Git repository. When you open a notebook instance, it opens in the\n directory that contains this repository. For more information, see Associating Git\n Repositories with SageMaker AI Notebook Instances.

" } }, "AdditionalCodeRepositories": { "target": "com.amazonaws.sagemaker#AdditionalCodeRepositoryNamesOrUrls", "traits": { - "smithy.api#documentation": "

An array of up to three Git repositories to associate with the notebook instance.\n These can be either the names of Git repositories stored as resources in your account,\n or the URL of Git repositories in Amazon Web Services CodeCommit\n or in any other Git repository. These repositories are cloned at the same level as the\n default repository of your notebook instance. For more information, see Associating Git\n Repositories with SageMaker Notebook Instances.

" + "smithy.api#documentation": "

An array of up to three Git repositories to associate with the notebook instance.\n These can be either the names of Git repositories stored as resources in your account,\n or the URL of Git repositories in Amazon Web Services CodeCommit\n or in any other Git repository. These repositories are cloned at the same level as the\n default repository of your notebook instance. For more information, see Associating Git\n Repositories with SageMaker AI Notebook Instances.

" } }, "AcceleratorTypes": { @@ -74138,7 +74358,7 @@ } ], "traits": { - "smithy.api#documentation": "

Updates the settings of a space.

" + "smithy.api#documentation": "

Updates the settings of a space.

\n \n

You can't edit the app type of a space in the SpaceSettings.

\n
" } }, "com.amazonaws.sagemaker#UpdateSpaceRequest": { @@ -74822,13 +75042,13 @@ "SecurityGroups": { "target": "com.amazonaws.sagemaker#SecurityGroupIds", "traits": { - "smithy.api#documentation": "

The security groups for the Amazon Virtual Private Cloud (VPC) that the domain uses for\n communication.

\n

Optional when the CreateDomain.AppNetworkAccessType parameter is set to\n PublicInternetOnly.

\n

Required when the CreateDomain.AppNetworkAccessType parameter is set to\n VpcOnly, unless specified as part of the DefaultUserSettings for\n the domain.

\n

Amazon SageMaker adds a security group to allow NFS traffic from Amazon SageMaker Studio. Therefore, the number of security groups that you can specify is one less than the\n maximum number shown.

\n

SageMaker applies these settings only to private spaces that the user creates in the domain. SageMaker doesn't apply these settings to shared spaces.

" + "smithy.api#documentation": "

The security groups for the Amazon Virtual Private Cloud (VPC) that the domain uses for\n communication.

\n

Optional when the CreateDomain.AppNetworkAccessType parameter is set to\n PublicInternetOnly.

\n

Required when the CreateDomain.AppNetworkAccessType parameter is set to\n VpcOnly, unless specified as part of the DefaultUserSettings for\n the domain.

\n

Amazon SageMaker AI adds a security group to allow NFS traffic from Amazon SageMaker AI Studio. Therefore, the number of security groups that you can specify is one less than the\n maximum number shown.

\n

SageMaker applies these settings only to private spaces that the user creates in the domain. SageMaker doesn't apply these settings to shared spaces.

" } }, "SharingSettings": { "target": "com.amazonaws.sagemaker#SharingSettings", "traits": { - "smithy.api#documentation": "

Specifies options for sharing Amazon SageMaker Studio notebooks.

" + "smithy.api#documentation": "

Specifies options for sharing Amazon SageMaker AI Studio notebooks.

" } }, "JupyterServerAppSettings": { @@ -74906,7 +75126,7 @@ "CustomFileSystemConfigs": { "target": "com.amazonaws.sagemaker#CustomFileSystemConfigs", "traits": { - "smithy.api#documentation": "

The settings for assigning a custom file system to a user profile. Permitted users can\n access this file system in Amazon SageMaker Studio.

\n

SageMaker applies these settings only to private spaces that the user creates in the domain. SageMaker doesn't apply these settings to shared spaces.

" + "smithy.api#documentation": "

The settings for assigning a custom file system to a user profile. Permitted users can\n access this file system in Amazon SageMaker AI Studio.

\n

SageMaker applies these settings only to private spaces that the user creates in the domain. SageMaker doesn't apply these settings to shared spaces.

" } }, "StudioWebPortalSettings": { diff --git a/models/security-ir.json b/models/security-ir.json index b52431e028..368387ee41 100644 --- a/models/security-ir.json +++ b/models/security-ir.json @@ -4983,7 +4983,7 @@ "type": "string", "traits": { "smithy.api#length": { - "min": 0, + "min": 1, "max": 500 } } diff --git a/models/securityhub.json b/models/securityhub.json index 86c6b6c2b4..bb61de1e91 100644 --- a/models/securityhub.json +++ b/models/securityhub.json @@ -434,7 +434,7 @@ } }, "traits": { - "smithy.api#documentation": "

\n Information about the threat actor identified in an Amazon GuardDuty Extended Threat Detection attack sequence. GuardDuty generates an attack \n sequence finding when multiple events align to a potentially suspicious activity. To receive GuardDuty attack sequence findings in Security Hub, you \n\t\t\t\tmust have GuardDuty and GuardDuty S3 Protection enabled. For more information, see GuardDuty Extended Threat Detection in the Amazon GuardDuty User Guide.

" + "smithy.api#documentation": "

\n Information about the threat actor identified in an Amazon GuardDuty Extended Threat Detection attack sequence. GuardDuty generates an attack \n sequence finding when multiple events align to a potentially suspicious activity. To receive GuardDuty attack sequence findings in Security Hub, you \n\t\t\t\tmust have GuardDuty enabled. For more information, see GuardDuty Extended Threat Detection in the Amazon GuardDuty User Guide.

" } }, "com.amazonaws.securityhub#ActorSession": { @@ -466,7 +466,7 @@ } }, "traits": { - "smithy.api#documentation": "

\n Contains information about the authenticated session used by the threat actor identified in an Amazon GuardDuty Extended Threat Detection attack sequence. GuardDuty generates an attack \n sequence finding when multiple events align to a potentially suspicious activity. To receive GuardDuty attack sequence findings in Security Hub, you \n\t\t\t\tmust have GuardDuty and GuardDuty S3 Protection enabled. For more information, see GuardDuty Extended Threat Detection in the Amazon GuardDuty User Guide.

" + "smithy.api#documentation": "

\n Contains information about the authenticated session used by the threat actor identified in an Amazon GuardDuty Extended Threat Detection attack sequence. GuardDuty generates an attack \n sequence finding when multiple events align to a potentially suspicious activity. To receive GuardDuty attack sequence findings in Security Hub, you \n\t\t\t\tmust have GuardDuty enabled. For more information, see GuardDuty Extended Threat Detection in the Amazon GuardDuty User Guide.

" } }, "com.amazonaws.securityhub#ActorSessionMfaStatus": { @@ -521,7 +521,7 @@ } }, "traits": { - "smithy.api#documentation": "

\n Contains information about the credentials used by the threat actor identified in an Amazon GuardDuty Extended Threat Detection attack sequence. GuardDuty generates an attack \n sequence finding when multiple events align to a potentially suspicious activity. To receive GuardDuty attack sequence findings in Security Hub, you \n\t\t\t\tmust have GuardDuty and GuardDuty S3 Protection enabled. For more information, see GuardDuty Extended Threat Detection in the Amazon GuardDuty User Guide.

" + "smithy.api#documentation": "

\n Contains information about the credentials used by the threat actor identified in an Amazon GuardDuty Extended Threat Detection attack sequence. GuardDuty generates an attack \n sequence finding when multiple events align to a potentially suspicious activity. To receive GuardDuty attack sequence findings in Security Hub, you \n\t\t\t\tmust have GuardDuty enabled. For more information, see GuardDuty Extended Threat Detection in the Amazon GuardDuty User Guide.

" } }, "com.amazonaws.securityhub#ActorsList": { @@ -890,13 +890,13 @@ "CreatedAt": { "target": "com.amazonaws.securityhub#Timestamp", "traits": { - "smithy.api#documentation": "

\n A timestamp that indicates when the rule was created.\n

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

\n A timestamp that indicates when the rule was created.\n

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "UpdatedAt": { "target": "com.amazonaws.securityhub#Timestamp", "traits": { - "smithy.api#documentation": "

\n A timestamp that indicates when the rule was most recently updated.\n

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

\n A timestamp that indicates when the rule was most recently updated.\n

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "CreatedBy": { @@ -1005,25 +1005,25 @@ "FirstObservedAt": { "target": "com.amazonaws.securityhub#DateFilterList", "traits": { - "smithy.api#documentation": "

\n A timestamp that indicates when the potential security issue captured by a \n finding was first observed by the security findings product.\n

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
\n

\n \t\tArray Members: Minimum number of 1 item. Maximum number of 20 items.\n \t

" + "smithy.api#documentation": "

\n A timestamp that indicates when the potential security issue captured by a \n finding was first observed by the security findings product.\n

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

\n

\n \t\tArray Members: Minimum number of 1 item. Maximum number of 20 items.\n \t

" } }, "LastObservedAt": { "target": "com.amazonaws.securityhub#DateFilterList", "traits": { - "smithy.api#documentation": "

\n A timestamp that indicates when the potential security issue captured by a finding \n was most recently observed by the security findings product.\n

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
\n

\n \t\tArray Members: Minimum number of 1 item. Maximum number of 20 items.\n \t

" + "smithy.api#documentation": "

\n A timestamp that indicates when the security findings provider most recently observed a change in the resource that is involved in the finding.\n

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

\n

\n \t\tArray Members: Minimum number of 1 item. Maximum number of 20 items.\n \t

" } }, "CreatedAt": { "target": "com.amazonaws.securityhub#DateFilterList", "traits": { - "smithy.api#documentation": "

\n A timestamp that indicates when this finding record was created.\n

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
\n

\n \t\tArray Members: Minimum number of 1 item. Maximum number of 20 items.\n \t

" + "smithy.api#documentation": "

\n A timestamp that indicates when this finding record was created.\n

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

\n

\n \t\tArray Members: Minimum number of 1 item. Maximum number of 20 items.\n \t

" } }, "UpdatedAt": { "target": "com.amazonaws.securityhub#DateFilterList", "traits": { - "smithy.api#documentation": "

\n A timestamp that indicates when the finding record was most recently updated. \n

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
\n

\n \t\tArray Members: Minimum number of 1 item. Maximum number of 20 items.\n \t

" + "smithy.api#documentation": "

\n A timestamp that indicates when the finding record was most recently updated. \n

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

\n

\n \t\tArray Members: Minimum number of 1 item. Maximum number of 20 items.\n \t

" } }, "Confidence": { @@ -1167,7 +1167,7 @@ "NoteUpdatedAt": { "target": "com.amazonaws.securityhub#DateFilterList", "traits": { - "smithy.api#documentation": "

\n The timestamp of when the note was updated.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
\n

\n \t\tArray Members: Minimum number of 1 item. Maximum number of 20 items.\n \t

" + "smithy.api#documentation": "

\n The timestamp of when the note was updated.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

\n

\n \t\tArray Members: Minimum number of 1 item. Maximum number of 20 items.\n \t

" } }, "NoteUpdatedBy": { @@ -1247,13 +1247,13 @@ "CreatedAt": { "target": "com.amazonaws.securityhub#Timestamp", "traits": { - "smithy.api#documentation": "

\n A timestamp that indicates when the rule was created.\n

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

\n A timestamp that indicates when the rule was created.\n

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "UpdatedAt": { "target": "com.amazonaws.securityhub#Timestamp", "traits": { - "smithy.api#documentation": "

\n A timestamp that indicates when the rule was most recently updated.\n

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

\n A timestamp that indicates when the rule was most recently updated.\n

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "CreatedBy": { @@ -1655,13 +1655,13 @@ "FirstSeen": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

A timestamp that indicates when the API call was first\n observed.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

A timestamp that indicates when the API call was first\n observed.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "LastSeen": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

A timestamp that indicates when the API call was most recently\n observed.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

A timestamp that indicates when the API call was most recently\n observed.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } } }, @@ -1859,7 +1859,7 @@ "CreatedDate": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

Indicates when the API was created.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

Indicates when the API was created.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "Version": { @@ -1981,13 +1981,13 @@ "CreatedDate": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

Indicates when the stage was created.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

Indicates when the stage was created.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "LastUpdatedDate": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

Indicates when the stage was most recently updated.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

Indicates when the stage was most recently updated.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "WebAclArn": { @@ -2025,7 +2025,7 @@ "CreatedDate": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

Indicates when the API was created.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

Indicates when the API was created.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "Description": { @@ -2119,7 +2119,7 @@ "CreatedDate": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

Indicates when the stage was created.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

Indicates when the stage was created.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "Description": { @@ -2143,7 +2143,7 @@ "LastUpdatedDate": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

Indicates when the stage was most recently updated.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

Indicates when the stage was most recently updated.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "RouteSettings": { @@ -2557,7 +2557,7 @@ "CreatedTime": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

Indicates when the auto scaling group was created.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

Indicates when the auto scaling group was created.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "MixedInstancesPolicy": { @@ -2863,7 +2863,7 @@ "CreatedTime": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

The creation date and time for the launch configuration.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

The creation date and time for the launch configuration.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "EbsOptimized": { @@ -3457,7 +3457,7 @@ "CreatedAt": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

Indicates when the certificate was requested.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

Indicates when the certificate was requested.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "DomainName": { @@ -3487,7 +3487,7 @@ "ImportedAt": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

Indicates when the certificate was imported. Provided if the certificate type is\n IMPORTED.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

Indicates when the certificate was imported. Provided if the certificate type is\n IMPORTED.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "InUseBy": { @@ -3499,7 +3499,7 @@ "IssuedAt": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

Indicates when the certificate was issued. Provided if the certificate type is\n AMAZON_ISSUED.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

Indicates when the certificate was issued. Provided if the certificate type is\n AMAZON_ISSUED.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "Issuer": { @@ -3523,13 +3523,13 @@ "NotAfter": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

The time after which the certificate becomes invalid.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

The time after which the certificate becomes invalid.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "NotBefore": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

The time before which the certificate is not valid.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

The time before which the certificate is not valid.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "Options": { @@ -3725,7 +3725,7 @@ "UpdatedAt": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

Indicates when the renewal summary was last updated.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

Indicates when the renewal summary was last updated.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } } }, @@ -3987,7 +3987,7 @@ "LastModifiedTime": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

Indicates when that the distribution was last modified.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

Indicates when that the distribution was last modified.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "Logging": { @@ -5311,7 +5311,7 @@ "LastUpdateToPayPerRequestDateTime": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

If the billing mode is PAY_PER_REQUEST, indicates when the billing mode was\n set to that value.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

If the billing mode is PAY_PER_REQUEST, indicates when the billing mode was\n set to that value.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } } }, @@ -5337,7 +5337,7 @@ "CreationDateTime": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

Indicates when the table was created.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

Indicates when the table was created.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "GlobalSecondaryIndexes": { @@ -5605,13 +5605,13 @@ "LastDecreaseDateTime": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

Indicates when the provisioned throughput was last decreased.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

Indicates when the provisioned throughput was last decreased.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "LastIncreaseDateTime": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

Indicates when the provisioned throughput was last increased.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

Indicates when the provisioned throughput was last increased.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "NumberOfDecreasesToday": { @@ -5745,7 +5745,7 @@ "RestoreDateTime": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

Indicates the point in time that the table was restored to.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

Indicates the point in time that the table was restored to.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "RestoreInProgress": { @@ -5765,7 +5765,7 @@ "InaccessibleEncryptionDateTime": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

If the key is inaccessible, the date and time when DynamoDB detected that the key was\n inaccessible.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

If the key is inaccessible, the date and time when DynamoDB detected that the key was\n inaccessible.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "Status": { @@ -6215,7 +6215,7 @@ "LaunchedAt": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

Indicates when the instance was launched.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

Indicates when the instance was launched.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "NetworkInterfaces": { @@ -7685,7 +7685,7 @@ "AttachTime": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

Indicates when the attachment initiated.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

Indicates when the attachment initiated.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "AttachmentId": { @@ -8321,7 +8321,7 @@ "CreateTime": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

Indicates when the volume was created.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

Indicates when the volume was created.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "DeviceName": { @@ -8871,7 +8871,7 @@ "LastStatusChange": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

The date and time of the last change in status.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

The date and time of the last change in status.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "OutsideIpAddress": { @@ -8939,7 +8939,7 @@ "ImagePublishedAt": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

The date and time when the image was pushed to the repository.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

The date and time when the image was pushed to the repository.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } } }, @@ -11948,7 +11948,7 @@ "CreatedTime": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

Indicates when the load balancer was created.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

Indicates when the load balancer was created.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "DnsName": { @@ -12240,7 +12240,7 @@ "CreatedTime": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

Indicates when the load balancer was created.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

Indicates when the load balancer was created.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "DNSName": { @@ -12785,7 +12785,7 @@ "CreatedAt": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

Indicates when the IAM access key was created.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

Indicates when the IAM access key was created.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "PrincipalId": { @@ -12861,7 +12861,7 @@ "CreationDate": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

Indicates when the session was created.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

Indicates when the session was created.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } } }, @@ -12962,7 +12962,7 @@ "CreateDate": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

Indicates when the IAM group was created.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

Indicates when the IAM group was created.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "GroupId": { @@ -13026,7 +13026,7 @@ "CreateDate": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

Indicates when the instance profile was created.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

Indicates when the instance profile was created.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "InstanceProfileId": { @@ -13082,7 +13082,7 @@ "CreateDate": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

Indicates when the role was created.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

Indicates when the role was created.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "Path": { @@ -13146,7 +13146,7 @@ "CreateDate": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

When the policy was created.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

When the policy was created.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "DefaultVersionId": { @@ -13200,7 +13200,7 @@ "UpdateDate": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

When the policy was most recently updated.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

When the policy was most recently updated.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } } }, @@ -13226,7 +13226,7 @@ "CreateDate": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

Indicates when the version was created.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

Indicates when the version was created.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } } }, @@ -13268,7 +13268,7 @@ "CreateDate": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

Indicates when the role was created.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

Indicates when the role was created.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "InstanceProfileList": { @@ -13347,7 +13347,7 @@ "CreateDate": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

Indicates when the user was created.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

Indicates when the user was created.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "GroupList": { @@ -13481,7 +13481,7 @@ "CreationDate": { "target": "com.amazonaws.securityhub#Double", "traits": { - "smithy.api#documentation": "

Indicates when the KMS key was created.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

Indicates when the KMS key was created.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "KeyId": { @@ -13619,7 +13619,7 @@ "LastModified": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

Indicates when the function was last updated.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

Indicates when the function was last updated.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "Layers": { @@ -13823,7 +13823,7 @@ "CreatedDate": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

Indicates when the version was created.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

Indicates when the version was created.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } } }, @@ -14846,7 +14846,7 @@ "ClusterCreateTime": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

Indicates when the DB cluster was created, in Universal Coordinated Time (UTC).

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

Indicates when the DB cluster was created, in Universal Coordinated Time (UTC).

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "EnabledCloudWatchLogsExports": { @@ -15046,7 +15046,7 @@ "SnapshotCreateTime": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

Indicates when the snapshot was taken.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

Indicates when the snapshot was taken.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "Engine": { @@ -15082,7 +15082,7 @@ "ClusterCreateTime": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

Indicates when the DB cluster was created, in Universal Coordinated Time (UTC).

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

Indicates when the DB cluster was created, in Universal Coordinated Time (UTC).

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "MasterUsername": { @@ -15310,7 +15310,7 @@ "InstanceCreateTime": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

Indicates when the DB instance was created.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

Indicates when the DB instance was created.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "KmsKeyId": { @@ -15424,7 +15424,7 @@ "LatestRestorableTime": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

Specifies the latest time to which a database can be restored with point-in-time\n restore.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

Specifies the latest time to which a database can be restored with point-in-time\n restore.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "AutoMinorVersionUpgrade": { @@ -16273,7 +16273,7 @@ "SubscriptionCreationTime": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

The datetime when the event notification subscription was created.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

The datetime when the event notification subscription was created.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } } }, @@ -16461,7 +16461,7 @@ "DeferMaintenanceEndTime": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

The end of the time window for which maintenance was deferred.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

The end of the time window for which maintenance was deferred.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "DeferMaintenanceIdentifier": { @@ -16473,7 +16473,7 @@ "DeferMaintenanceStartTime": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

The start of the time window for which maintenance was deferred.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

The start of the time window for which maintenance was deferred.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } } }, @@ -16517,7 +16517,7 @@ "ClusterCreateTime": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

Indicates when the cluster was created.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

Indicates when the cluster was created.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "ClusterIdentifier": { @@ -16625,7 +16625,7 @@ "ExpectedNextSnapshotScheduleTime": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

Indicates when the next snapshot is expected to be taken. The cluster must have a valid\n snapshot schedule and have backups enabled.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

Indicates when the next snapshot is expected to be taken. The cluster must have a valid\n snapshot schedule and have backups enabled.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "ExpectedNextSnapshotScheduleTimeStatus": { @@ -16673,7 +16673,7 @@ "NextMaintenanceWindowStartTime": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

Indicates the start of the next maintenance window.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

Indicates the start of the next maintenance window.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "NodeType": { @@ -16869,13 +16869,13 @@ "LastFailureTime": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

The last time when logs failed to be delivered.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

The last time when logs failed to be delivered.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "LastSuccessfulDeliveryTime": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

The last time that logs were delivered successfully.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

The last time that logs were delivered successfully.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "LoggingEnabled": { @@ -17316,7 +17316,7 @@ "ExpirationDate": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

The date when objects are moved or deleted.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

The date when objects are moved or deleted.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "ExpirationInDays": { @@ -17534,7 +17534,7 @@ "Date": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

A date on which to transition objects to the specified storage class. If you provide Date, you cannot provide Days.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

A date on which to transition objects to the specified storage class. If you provide Date, you cannot provide Days.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "Days": { @@ -17604,7 +17604,7 @@ "CreatedAt": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

Indicates when the S3 bucket was created.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

Indicates when the S3 bucket was created.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "ServerSideEncryptionConfiguration": { @@ -18077,7 +18077,7 @@ "LastModified": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

Indicates when the object was last modified.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

Indicates when the object was last modified.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "ETag": { @@ -18127,19 +18127,19 @@ "AdditionalCodeRepositories": { "target": "com.amazonaws.securityhub#NonEmptyStringList", "traits": { - "smithy.api#documentation": "

\n An array of up to three Git repositories associated with the notebook instance. These can be either the names of Git repositories stored as resources in your account, or the URL of Git repositories in CodeCommit or in any other Git repository. \n These repositories are cloned at the same level as the default repository of your notebook instance. For more information, see Associating Git repositories with SageMaker notebook instances in the Amazon SageMaker Developer Guide.\n

" + "smithy.api#documentation": "

\n An array of up to three Git repositories associated with the notebook instance. These can be either the names of Git repositories stored as resources in your account, or the URL of Git repositories in CodeCommit or in any other Git repository. \n These repositories are cloned at the same level as the default repository of your notebook instance. For more information, see Associating Git repositories with SageMaker AI notebook instances in the Amazon SageMaker AI Developer Guide.\n

" } }, "DefaultCodeRepository": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

\n The Git repository associated with the notebook instance as its default code repository. This can be either the name of a Git repository stored as a resource in your account, or the URL of a Git repository in CodeCommit or in any other Git repository. \n When you open a notebook instance, it opens in the directory that contains this repository. For more information, see Associating Git repositories with SageMaker notebook instances in the Amazon SageMaker Developer Guide.\n

" + "smithy.api#documentation": "

\n The Git repository associated with the notebook instance as its default code repository. This can be either the name of a Git repository stored as a resource in your account, or the URL of a Git repository in CodeCommit or in any other Git repository. \n When you open a notebook instance, it opens in the directory that contains this repository. For more information, see Associating Git repositories with SageMaker AI notebook instances in the Amazon SageMaker AI Developer Guide.\n

" } }, "DirectInternetAccess": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

\n Sets whether SageMaker provides internet access to the notebook instance. If you set this to Disabled, this notebook instance is able to access resources only in your VPC, and is not be able to connect to SageMaker training and endpoint services unless you configure a Network Address Translation (NAT) Gateway in your VPC.\n

" + "smithy.api#documentation": "

\n Sets whether SageMaker AI provides internet access to the notebook instance. If you set this to Disabled, this notebook instance is able to access resources only in your VPC, and is not be able to connect to SageMaker AI training and endpoint services unless you configure a Network Address Translation (NAT) Gateway in your VPC.\n

" } }, "FailureReason": { @@ -18163,13 +18163,13 @@ "KmsKeyId": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

\n The Amazon Resource Name (ARN) of an Key Management Service (KMS) key that SageMaker uses to encrypt data on the storage volume attached to your notebook instance. The KMS key you provide must be enabled. For information, see \n Enabling and disabling keys in the Key Management Service Developer Guide.\n

" + "smithy.api#documentation": "

\n The Amazon Resource Name (ARN) of an Key Management Service (KMS) key that SageMaker AI uses to encrypt data on the storage volume attached to your notebook instance. The KMS key you provide must be enabled. For information, see \n Enabling and disabling keys in the Key Management Service Developer Guide.\n

" } }, "NetworkInterfaceId": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

\n The network interface ID that SageMaker created when the instance was created.\n

" + "smithy.api#documentation": "

\n The network interface ID that SageMaker AI created when the instance was created.\n

" } }, "NotebookInstanceArn": { @@ -18240,7 +18240,7 @@ } }, "traits": { - "smithy.api#documentation": "

\n Provides details about an Amazon SageMaker notebook instance.\n

" + "smithy.api#documentation": "

\n Provides details about an Amazon SageMaker AI notebook instance.\n

" } }, "com.amazonaws.securityhub#AwsSageMakerNotebookInstanceMetadataServiceConfigurationDetails": { @@ -18397,20 +18397,20 @@ "FirstObservedAt": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

Indicates when the security findings provider first observed the potential security\n issue that a finding captured.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

Indicates when the security findings provider first observed the potential security\n issue that a finding captured.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "LastObservedAt": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

Indicates when the security findings provider most recently observed the potential\n security issue that a finding captured.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

Indicates when the security findings provider most recently observed a change in the resource that is involved in the finding.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "CreatedAt": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

Indicates when the security findings provider created the potential security issue that\n a finding captured.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
", + "smithy.api#documentation": "

Indicates when the security findings provider created the potential security issue that\n a finding captured.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

", "smithy.api#required": {} } }, @@ -18418,7 +18418,7 @@ "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

Indicates when the security findings provider last updated the finding record.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
", + "smithy.api#documentation": "

Indicates when the security findings provider last updated the finding record.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

", "smithy.api#required": {} } }, @@ -18605,7 +18605,7 @@ "ProcessedAt": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

A timestamp that indicates when Security Hub received a finding and begins to process it.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

A timestamp that indicates when Security Hub received a finding and begins to process it.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "AwsAccountName": { @@ -18617,7 +18617,7 @@ "Detection": { "target": "com.amazonaws.securityhub#Detection", "traits": { - "smithy.api#documentation": "

\n Provides details about an Amazon GuardDuty Extended Threat Detection attack sequence. GuardDuty generates an attack \n sequence finding when multiple events align to a potentially suspicious activity. To receive GuardDuty attack sequence findings in Security Hub, you \n\t\t\t\tmust have GuardDuty and GuardDuty S3 Protection enabled. For more information, see GuardDuty Extended Threat Detection in the Amazon GuardDuty User Guide.\n

" + "smithy.api#documentation": "

\n Provides details about an Amazon GuardDuty Extended Threat Detection attack sequence. GuardDuty generates an attack \n sequence finding when multiple events align to a potentially suspicious activity. To receive GuardDuty attack sequence findings in Security Hub, you \n\t\t\t\tmust have GuardDuty enabled. For more information, see GuardDuty Extended Threat Detection in the Amazon GuardDuty User Guide.\n

" } } }, @@ -18667,25 +18667,25 @@ "FirstObservedAt": { "target": "com.amazonaws.securityhub#DateFilterList", "traits": { - "smithy.api#documentation": "

A timestamp that indicates when the security findings provider first\n observed the potential security issue that a finding captured.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

A timestamp that indicates when the security findings provider first\n observed the potential security issue that a finding captured.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "LastObservedAt": { "target": "com.amazonaws.securityhub#DateFilterList", "traits": { - "smithy.api#documentation": "

A timestamp that indicates when the security findings provider most\n recently observed the potential security issue that a finding captured.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

A timestamp that indicates when the security findings provider most recently observed a change in the resource that is involved in the finding.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "CreatedAt": { "target": "com.amazonaws.securityhub#DateFilterList", "traits": { - "smithy.api#documentation": "

A timestamp that indicates when the security findings provider\n created the potential security issue that a finding reflects.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

A timestamp that indicates when the security findings provider\n created the potential security issue that a finding reflects.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "UpdatedAt": { "target": "com.amazonaws.securityhub#DateFilterList", "traits": { - "smithy.api#documentation": "

A timestamp that indicates when the security findings provider last\n updated the finding record.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

A timestamp that indicates when the security findings provider last\n updated the finding record.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "SeverityProduct": { @@ -18889,13 +18889,13 @@ "ProcessLaunchedAt": { "target": "com.amazonaws.securityhub#DateFilterList", "traits": { - "smithy.api#documentation": "

A timestamp that identifies when the process was launched.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

A timestamp that identifies when the process was launched.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "ProcessTerminatedAt": { "target": "com.amazonaws.securityhub#DateFilterList", "traits": { - "smithy.api#documentation": "

A timestamp that identifies when the process was terminated.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

A timestamp that identifies when the process was terminated.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "ThreatIntelIndicatorType": { @@ -18919,7 +18919,7 @@ "ThreatIntelIndicatorLastObservedAt": { "target": "com.amazonaws.securityhub#DateFilterList", "traits": { - "smithy.api#documentation": "

A timestamp that identifies the last observation of a threat intelligence indicator.

" + "smithy.api#documentation": "

A timestamp that identifies the last observation of a threat intelligence indicator.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "ThreatIntelIndicatorSource": { @@ -19084,7 +19084,7 @@ "ResourceContainerLaunchedAt": { "target": "com.amazonaws.securityhub#DateFilterList", "traits": { - "smithy.api#documentation": "

A timestamp that identifies when the container was started.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

A timestamp that identifies when the container was started.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "ResourceDetailsOther": { @@ -22636,7 +22636,7 @@ "LaunchedAt": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

Indicates when the container started.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

Indicates when the container started.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "VolumeMounts": { @@ -23628,13 +23628,13 @@ "Start": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

A timestamp that provides the start date for the date filter.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

A timestamp that provides the start date for the date filter.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "End": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

A timestamp that provides the end date for the date filter.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

A timestamp that provides the end date for the date filter.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "DateRange": { @@ -24399,7 +24399,7 @@ "AutoEnableControls": { "target": "com.amazonaws.securityhub#Boolean", "traits": { - "smithy.api#documentation": "

Whether to automatically enable new controls when they are added to standards that are\n enabled.

\n

If set to true, then new controls for enabled standards are enabled\n automatically. If set to false, then new controls are not enabled.

" + "smithy.api#documentation": "

Whether to automatically enable new controls when they are added to standards that are\n enabled.

\n

If set to true, then new controls for enabled standards are enabled\n automatically. If set to false, then new controls are not enabled.

\n

When you automatically enable new controls, you can interact with the controls in \n the console and programmatically immediately after release. However, automatically enabled controls have a temporary default status of \n DISABLED. It can take up to several days for Security Hub to process the control release and designate the \n control as ENABLED in your account. During the processing period, you can manually enable or disable a \n control, and Security Hub will maintain that designation regardless of whether you have AutoEnableControls set to \n true.

" } }, "ControlFindingGenerator": { @@ -24826,7 +24826,7 @@ } }, "traits": { - "smithy.api#documentation": "

\n A top-level object field that provides details about an Amazon GuardDuty Extended Threat Detection attack sequence. GuardDuty generates an attack \n sequence finding when multiple events align to a potentially suspicious activity. To receive GuardDuty attack sequence findings in Security Hub, you \n\t\t\t\tmust have GuardDuty and GuardDuty S3 Protection enabled. For more information, see GuardDuty Extended Threat Detection in the Amazon GuardDuty User Guide.

" + "smithy.api#documentation": "

\n A top-level object field that provides details about an Amazon GuardDuty Extended Threat Detection attack sequence. GuardDuty generates an attack \n sequence finding when multiple events align to a potentially suspicious activity. To receive GuardDuty attack sequence findings in Security Hub, you \n\t\t\t\tmust have GuardDuty enabled. For more information, see GuardDuty Extended Threat Detection in the Amazon GuardDuty User Guide.

" } }, "com.amazonaws.securityhub#DisableImportFindingsForProduct": { @@ -25608,7 +25608,7 @@ "UpdateTime": { "target": "com.amazonaws.securityhub#Timestamp", "traits": { - "smithy.api#documentation": "

A timestamp that indicates when Security Hub \n processed the updated finding record.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

A timestamp that indicates when Security Hub \n processed the updated finding record.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "FindingCreated": { @@ -26508,13 +26508,13 @@ "StartTime": { "target": "com.amazonaws.securityhub#Timestamp", "traits": { - "smithy.api#documentation": "

A timestamp that indicates the start time of the requested finding history.

\n

If you provide values for both StartTime and EndTime,\n Security Hub returns finding history for the specified time period. If you\n provide a value for StartTime but not for EndTime, Security Hub returns finding history from the StartTime to the time at\n which the API is called. If you provide a value for EndTime but not for\n StartTime, Security Hub returns finding history from the CreatedAt timestamp of the finding to the EndTime. If you\n provide neither StartTime nor EndTime, Security Hub\n returns finding history from the CreatedAt timestamp of the finding to the time at which\n the API is called. In all of these scenarios, the response is limited to 100 results, and the maximum time period is \n limited to 90 days.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

A timestamp that indicates the start time of the requested finding history.

\n

If you provide values for both StartTime and EndTime,\n Security Hub returns finding history for the specified time period. If you\n provide a value for StartTime but not for EndTime, Security Hub returns finding history from the StartTime to the time at\n which the API is called. If you provide a value for EndTime but not for\n StartTime, Security Hub returns finding history from the CreatedAt timestamp of the finding to the EndTime. If you\n provide neither StartTime nor EndTime, Security Hub\n returns finding history from the CreatedAt timestamp of the finding to the time at which\n the API is called. In all of these scenarios, the response is limited to 100 results, and the maximum time period is \n limited to 90 days.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "EndTime": { "target": "com.amazonaws.securityhub#Timestamp", "traits": { - "smithy.api#documentation": "

\n An ISO 8601-formatted timestamp that indicates the end time of the requested finding history.

\n

If you provide values for both StartTime and EndTime,\n Security Hub returns finding history for the specified time period. If you\n provide a value for StartTime but not for EndTime, Security Hub returns finding history from the StartTime to the time at\n which the API is called. If you provide a value for EndTime but not for\n StartTime, Security Hub returns finding history from the CreatedAt timestamp of the finding to the EndTime. If you\n provide neither StartTime nor EndTime, Security Hub\n returns finding history from the CreatedAt timestamp of the finding to the time at which\n the API is called. In all of these scenarios, the response is limited to 100 results, and the maximum time period is \n limited to 90 days.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

\n An ISO 8601-formatted timestamp that indicates the end time of the requested finding history.

\n

If you provide values for both StartTime and EndTime,\n Security Hub returns finding history for the specified time period. If you\n provide a value for StartTime but not for EndTime, Security Hub returns finding history from the StartTime to the time at\n which the API is called. If you provide a value for EndTime but not for\n StartTime, Security Hub returns finding history from the CreatedAt timestamp of the finding to the EndTime. If you\n provide neither StartTime nor EndTime, Security Hub\n returns finding history from the CreatedAt timestamp of the finding to the time at which\n the API is called. In all of these scenarios, the response is limited to 100 results, and the maximum time period is \n limited to 90 days.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "NextToken": { @@ -29317,7 +29317,7 @@ } }, "traits": { - "smithy.api#documentation": "

\n Contains information about the Autonomous System (AS) of the network \n endpoints involved in an Amazon GuardDuty Extended Threat Detection attack sequence. GuardDuty generates an attack \n sequence finding when multiple events align to a potentially suspicious activity. To receive GuardDuty attack sequence findings in Security Hub, you \n\t\t\t\tmust have GuardDuty and GuardDuty S3 Protection enabled. For more information, see GuardDuty Extended Threat Detection in the Amazon GuardDuty User Guide.\n

" + "smithy.api#documentation": "

\n Contains information about the Autonomous System (AS) of the network \n endpoints involved in an Amazon GuardDuty Extended Threat Detection attack sequence. GuardDuty generates an attack \n sequence finding when multiple events align to a potentially suspicious activity. To receive GuardDuty attack sequence findings in Security Hub, you \n\t\t\t\tmust have GuardDuty enabled. For more information, see GuardDuty Extended Threat Detection in the Amazon GuardDuty User Guide.\n

" } }, "com.amazonaws.securityhub#NetworkConnection": { @@ -29331,7 +29331,7 @@ } }, "traits": { - "smithy.api#documentation": "

\n Contains information about the network connection involved in an Amazon GuardDuty Extended Threat Detection attack sequence. GuardDuty generates an attack \n sequence finding when multiple events align to a potentially suspicious activity. To receive GuardDuty attack sequence findings in Security Hub, you \n\t\t\t\tmust have GuardDuty and GuardDuty S3 Protection enabled. For more information, see GuardDuty Extended Threat Detection in the Amazon GuardDuty User Guide.\n

" + "smithy.api#documentation": "

\n Contains information about the network connection involved in an Amazon GuardDuty Extended Threat Detection attack sequence. GuardDuty generates an attack \n sequence finding when multiple events align to a potentially suspicious activity. To receive GuardDuty attack sequence findings in Security Hub, you \n\t\t\t\tmust have GuardDuty enabled. For more information, see GuardDuty Extended Threat Detection in the Amazon GuardDuty User Guide.\n

" } }, "com.amazonaws.securityhub#NetworkConnectionAction": { @@ -29442,7 +29442,7 @@ } }, "traits": { - "smithy.api#documentation": "

\n Contains information about network endpoints involved in an Amazon GuardDuty Extended Threat Detection attack sequence. GuardDuty generates an attack \n sequence finding when multiple events align to a potentially suspicious activity. To receive GuardDuty attack sequence findings in Security Hub, you \n\t\t\t\tmust have GuardDuty and GuardDuty S3 Protection enabled. For more information, see GuardDuty Extended Threat Detection in the Amazon GuardDuty User Guide.\n

\n

This field can provide information about the network endpoints associated with the resource in the attack sequence finding, \nor about a specific network endpoint used for the attack.

" + "smithy.api#documentation": "

\n Contains information about network endpoints involved in an Amazon GuardDuty Extended Threat Detection attack sequence. GuardDuty generates an attack \n sequence finding when multiple events align to a potentially suspicious activity. To receive GuardDuty attack sequence findings in Security Hub, you \n\t\t\t\tmust have GuardDuty enabled. For more information, see GuardDuty Extended Threat Detection in the Amazon GuardDuty User Guide.\n

\n

This field can provide information about the network endpoints associated with the resource in the attack sequence finding, \nor about a specific network endpoint used for the attack.

" } }, "com.amazonaws.securityhub#NetworkEndpointsList": { @@ -29486,7 +29486,7 @@ } }, "traits": { - "smithy.api#documentation": "

\n Contains information about the location of a network endpoint involved in an Amazon GuardDuty Extended Threat Detection attack sequence. \nGuardDuty generates an attack \n sequence finding when multiple events align to a potentially suspicious activity. To receive GuardDuty attack sequence findings in Security Hub, you \n\t\t\t\tmust have GuardDuty and GuardDuty S3 Protection enabled. For more information, see GuardDuty Extended Threat Detection in the Amazon GuardDuty User Guide.\n

" + "smithy.api#documentation": "

\n Contains information about the location of a network endpoint involved in an Amazon GuardDuty Extended Threat Detection attack sequence. \nGuardDuty generates an attack \n sequence finding when multiple events align to a potentially suspicious activity. To receive GuardDuty attack sequence findings in Security Hub, you \n\t\t\t\tmust have GuardDuty enabled. For more information, see GuardDuty Extended Threat Detection in the Amazon GuardDuty User Guide.\n

" } }, "com.amazonaws.securityhub#NetworkHeader": { @@ -29611,7 +29611,7 @@ "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

A timestamp that indicates when the note was updated.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
", + "smithy.api#documentation": "

A timestamp that indicates when the note was updated.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

", "smithy.api#required": {} } } @@ -30034,13 +30034,13 @@ "OperationStartTime": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

Indicates when the operation started.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

Indicates when the operation started.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "OperationEndTime": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

Indicates when the operation completed.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

Indicates when the operation completed.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "RebootOption": { @@ -30202,13 +30202,13 @@ "LaunchedAt": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

Indicates when the process was launched.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

Indicates when the process was launched.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "TerminatedAt": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

Indicates when the process was terminated.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

Indicates when the process was terminated.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } } }, @@ -33480,7 +33480,7 @@ } }, "traits": { - "smithy.api#documentation": "

\n Contains information about an Amazon GuardDuty Extended Threat Detection attack sequence finding. GuardDuty generates an attack \n sequence finding when multiple events align to a potentially suspicious activity. To receive GuardDuty attack sequence findings in Security Hub, you \n\t\t\t\tmust have GuardDuty and GuardDuty S3 Protection enabled. For more information, see GuardDuty Extended Threat Detection in the Amazon GuardDuty User Guide.\n

" + "smithy.api#documentation": "

\n Contains information about an Amazon GuardDuty Extended Threat Detection attack sequence finding. GuardDuty generates an attack \n sequence finding when multiple events align to a potentially suspicious activity. To receive GuardDuty attack sequence findings in Security Hub, you \n\t\t\t\tmust have GuardDuty enabled. For more information, see GuardDuty Extended Threat Detection in the Amazon GuardDuty User Guide.\n

" } }, "com.amazonaws.securityhub#Severity": { @@ -33677,7 +33677,7 @@ "Severity": { "target": "com.amazonaws.securityhub#Double", "traits": { - "smithy.api#documentation": "

The severity associated with the signal. For more information about severity, see \n Findings severity levels\n in the Amazon GuardDuty User Guide.

" + "smithy.api#documentation": "

The severity associated with the signal. For more information about severity, see \n Severity levels for GuardDuty findings\n in the Amazon GuardDuty User Guide.

" } }, "Count": { @@ -35032,7 +35032,7 @@ "LastObservedAt": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

Indicates when the most recent instance of a threat intelligence indicator was\n observed.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

Indicates when the most recent instance of a threat intelligence indicator was\n observed.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "Source": { @@ -36338,7 +36338,7 @@ "AutoEnableControls": { "target": "com.amazonaws.securityhub#Boolean", "traits": { - "smithy.api#documentation": "

Whether to automatically enable new controls when they are added to standards that are\n enabled.

\n

By default, this is set to true, and new controls are enabled\n automatically. To not automatically enable new controls, set this to false.\n

" + "smithy.api#documentation": "

Whether to automatically enable new controls when they are added to standards that are\n enabled.

\n

By default, this is set to true, and new controls are enabled\n automatically. To not automatically enable new controls, set this to false.\n

\n

When you automatically enable new controls, you can interact with the controls in \n the console and programmatically immediately after release. However, automatically enabled controls have a temporary default status of \n DISABLED. It can take up to several days for Security Hub to process the control release and designate the \n control as ENABLED in your account. During the processing period, you can manually enable or disable a \n control, and Security Hub will maintain that designation regardless of whether you have AutoEnableControls set to \n true.

" } }, "ControlFindingGenerator": { @@ -36473,7 +36473,7 @@ } }, "traits": { - "smithy.api#documentation": "

\n Provides Amazon Web Services account information of the user involved in an Amazon GuardDuty Extended Threat Detection attack sequence. GuardDuty generates an attack \n sequence finding when multiple events align to a potentially suspicious activity. To receive GuardDuty attack sequence findings in Security Hub, you \n\t\t\t\tmust have GuardDuty and GuardDuty S3 Protection enabled. For more information, see GuardDuty Extended Threat Detection in the Amazon GuardDuty User Guide. \n

" + "smithy.api#documentation": "

\n Provides Amazon Web Services account information of the user involved in an Amazon GuardDuty Extended Threat Detection attack sequence. GuardDuty generates an attack \n sequence finding when multiple events align to a potentially suspicious activity. To receive GuardDuty attack sequence findings in Security Hub, you \n\t\t\t\tmust have GuardDuty enabled. For more information, see GuardDuty Extended Threat Detection in the Amazon GuardDuty User Guide. \n

" } }, "com.amazonaws.securityhub#VerificationState": { @@ -36777,13 +36777,13 @@ "VendorCreatedAt": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

Indicates when the vulnerability advisory was created.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

Indicates when the vulnerability advisory was created.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "VendorUpdatedAt": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

Indicates when the vulnerability advisory was last updated.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

Indicates when the vulnerability advisory was last updated.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } } }, diff --git a/models/securitylake.json b/models/securitylake.json index b1029f3591..5265736769 100644 --- a/models/securitylake.json +++ b/models/securitylake.json @@ -427,7 +427,7 @@ "eventClasses": { "target": "com.amazonaws.securitylake#OcsfEventClassList", "traits": { - "smithy.api#documentation": "

The Open Cybersecurity Schema Framework (OCSF) event classes which describes the type of\n data that the custom source will send to Security Lake. The supported event classes are:

\n
    \n
  • \n

    \n ACCESS_ACTIVITY\n

    \n
  • \n
  • \n

    \n FILE_ACTIVITY\n

    \n
  • \n
  • \n

    \n KERNEL_ACTIVITY\n

    \n
  • \n
  • \n

    \n KERNEL_EXTENSION\n

    \n
  • \n
  • \n

    \n MEMORY_ACTIVITY\n

    \n
  • \n
  • \n

    \n MODULE_ACTIVITY\n

    \n
  • \n
  • \n

    \n PROCESS_ACTIVITY\n

    \n
  • \n
  • \n

    \n REGISTRY_KEY_ACTIVITY\n

    \n
  • \n
  • \n

    \n REGISTRY_VALUE_ACTIVITY\n

    \n
  • \n
  • \n

    \n RESOURCE_ACTIVITY\n

    \n
  • \n
  • \n

    \n SCHEDULED_JOB_ACTIVITY\n

    \n
  • \n
  • \n

    \n SECURITY_FINDING\n

    \n
  • \n
  • \n

    \n ACCOUNT_CHANGE\n

    \n
  • \n
  • \n

    \n AUTHENTICATION\n

    \n
  • \n
  • \n

    \n AUTHORIZATION\n

    \n
  • \n
  • \n

    \n ENTITY_MANAGEMENT_AUDIT\n

    \n
  • \n
  • \n

    \n DHCP_ACTIVITY\n

    \n
  • \n
  • \n

    \n NETWORK_ACTIVITY\n

    \n
  • \n
  • \n

    \n DNS_ACTIVITY\n

    \n
  • \n
  • \n

    \n FTP_ACTIVITY\n

    \n
  • \n
  • \n

    \n HTTP_ACTIVITY\n

    \n
  • \n
  • \n

    \n RDP_ACTIVITY\n

    \n
  • \n
  • \n

    \n SMB_ACTIVITY\n

    \n
  • \n
  • \n

    \n SSH_ACTIVITY\n

    \n
  • \n
  • \n

    \n CONFIG_STATE\n

    \n
  • \n
  • \n

    \n INVENTORY_INFO\n

    \n
  • \n
  • \n

    \n EMAIL_ACTIVITY\n

    \n
  • \n
  • \n

    \n API_ACTIVITY\n

    \n
  • \n
  • \n

    \n CLOUD_API\n

    \n
  • \n
" + "smithy.api#documentation": "

The Open Cybersecurity Schema Framework (OCSF) event classes which describes the type of\n data that the custom source will send to Security Lake. For the list of supported event classes, see the Amazon Security Lake User Guide.

" } }, "configuration": { @@ -630,7 +630,7 @@ ], "traits": { "aws.iam#actionPermissionDescription": "Grants permission to automatically enable Amazon Security Lake for new member accounts in your organization. or Grants permission to automatically enable Amazon Security Lake for new organization accounts", - "smithy.api#documentation": "

Automatically enables Amazon Security Lake for new member accounts in your organization.\n Security Lake is not automatically enabled for any existing member accounts in your\n organization.

", + "smithy.api#documentation": "

Automatically enables Amazon Security Lake for new member accounts in your organization.\n Security Lake is not automatically enabled for any existing member accounts in your\n organization.

\n

This operation merges the new data lake organization configuration with the existing configuration for Security Lake in your organization. If you want to create a new data lake organization configuration, you must delete the existing one using DeleteDataLakeOrganizationConfiguration.

", "smithy.api#http": { "method": "POST", "uri": "/v1/datalake/organization/configuration", @@ -1402,7 +1402,7 @@ "eventClasses": { "target": "com.amazonaws.securitylake#OcsfEventClassList", "traits": { - "smithy.api#documentation": "

The Open Cybersecurity Schema Framework (OCSF) event classes which describes the type of\n data that the custom source will send to Security Lake. The supported event classes are:

\n
    \n
  • \n

    \n ACCESS_ACTIVITY\n

    \n
  • \n
  • \n

    \n FILE_ACTIVITY\n

    \n
  • \n
  • \n

    \n KERNEL_ACTIVITY\n

    \n
  • \n
  • \n

    \n KERNEL_EXTENSION\n

    \n
  • \n
  • \n

    \n MEMORY_ACTIVITY\n

    \n
  • \n
  • \n

    \n MODULE_ACTIVITY\n

    \n
  • \n
  • \n

    \n PROCESS_ACTIVITY\n

    \n
  • \n
  • \n

    \n REGISTRY_KEY_ACTIVITY\n

    \n
  • \n
  • \n

    \n REGISTRY_VALUE_ACTIVITY\n

    \n
  • \n
  • \n

    \n RESOURCE_ACTIVITY\n

    \n
  • \n
  • \n

    \n SCHEDULED_JOB_ACTIVITY\n

    \n
  • \n
  • \n

    \n SECURITY_FINDING\n

    \n
  • \n
  • \n

    \n ACCOUNT_CHANGE\n

    \n
  • \n
  • \n

    \n AUTHENTICATION\n

    \n
  • \n
  • \n

    \n AUTHORIZATION\n

    \n
  • \n
  • \n

    \n ENTITY_MANAGEMENT_AUDIT\n

    \n
  • \n
  • \n

    \n DHCP_ACTIVITY\n

    \n
  • \n
  • \n

    \n NETWORK_ACTIVITY\n

    \n
  • \n
  • \n

    \n DNS_ACTIVITY\n

    \n
  • \n
  • \n

    \n FTP_ACTIVITY\n

    \n
  • \n
  • \n

    \n HTTP_ACTIVITY\n

    \n
  • \n
  • \n

    \n RDP_ACTIVITY\n

    \n
  • \n
  • \n

    \n SMB_ACTIVITY\n

    \n
  • \n
  • \n

    \n SSH_ACTIVITY\n

    \n
  • \n
  • \n

    \n CONFIG_STATE\n

    \n
  • \n
  • \n

    \n INVENTORY_INFO\n

    \n
  • \n
  • \n

    \n EMAIL_ACTIVITY\n

    \n
  • \n
  • \n

    \n API_ACTIVITY\n

    \n
  • \n
  • \n

    \n CLOUD_API\n

    \n
  • \n
" + "smithy.api#documentation": "

The Open Cybersecurity Schema Framework (OCSF) event classes describes the type of\n data that the custom source will send to Security Lake. For the list of supported event classes, see Supported OCSF Event classes in the Amazon Security Lake User Guide.

" } }, "sourceStatuses": { diff --git a/models/sesv2.json b/models/sesv2.json index b0dd599785..cee7ee3e02 100644 --- a/models/sesv2.json +++ b/models/sesv2.json @@ -10173,6 +10173,12 @@ "traits": { "smithy.api#enumValue": "BIMI" } + }, + "COMPLAINT": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "COMPLAINT" + } } } }, diff --git a/models/sns.json b/models/sns.json index 7ee3165008..282103c587 100644 --- a/models/sns.json +++ b/models/sns.json @@ -1794,7 +1794,7 @@ "Attributes": { "target": "com.amazonaws.sns#TopicAttributesMap", "traits": { - "smithy.api#documentation": "

A map of attributes with their corresponding values.

\n

The following lists names, descriptions, and values of the special request parameters\n that the CreateTopic action uses:

\n
    \n
  • \n

    \n DeliveryPolicy – The policy that defines how Amazon SNS retries\n failed deliveries to HTTP/S endpoints.

    \n
  • \n
  • \n

    \n DisplayName – The display name to use for a topic with SMS\n subscriptions.

    \n
  • \n
  • \n

    \n FifoTopic – Set to true to create a FIFO topic.

    \n
  • \n
  • \n

    \n Policy – The policy that defines who can access your\n topic. By default, only the topic owner can publish or subscribe to the\n topic.

    \n
  • \n
  • \n

    \n SignatureVersion – The signature version corresponds to\n the hashing algorithm used while creating the signature of the notifications,\n subscription confirmations, or unsubscribe confirmation messages sent by Amazon SNS.\n By default, SignatureVersion is set to 1.

    \n
  • \n
  • \n

    \n TracingConfig – Tracing mode of an Amazon SNS topic. By default\n TracingConfig is set to PassThrough, and the topic\n passes through the tracing header it receives from an Amazon SNS publisher to its\n subscriptions. If set to Active, Amazon SNS will vend X-Ray segment data\n to topic owner account if the sampled flag in the tracing header is true. This\n is only supported on standard topics.

    \n
  • \n
\n

The following attribute applies only to server-side\n encryption:

\n
    \n
  • \n

    \n KmsMasterKeyId – The ID of an Amazon Web Services managed customer master\n key (CMK) for Amazon SNS or a custom CMK. For more information, see Key\n Terms. For more examples, see KeyId in the Key Management Service API Reference.

    \n
  • \n
\n

The following attributes apply only to FIFO topics:

\n
    \n
  • \n

    \n ArchivePolicy – The policy that sets the retention period\n for messages stored in the message archive of an Amazon SNS FIFO\n topic.

    \n
  • \n
  • \n

    \n ContentBasedDeduplication – Enables content-based\n deduplication for FIFO topics.

    \n
      \n
    • \n

      By default, ContentBasedDeduplication is set to\n false. If you create a FIFO topic and this attribute is\n false, you must specify a value for the\n MessageDeduplicationId parameter for the Publish\n action.

      \n
    • \n
    • \n

      When you set ContentBasedDeduplication to true,\n Amazon SNS uses a SHA-256 hash to generate the\n MessageDeduplicationId using the body of the message (but not\n the attributes of the message).

      \n

      (Optional) To override the generated value, you can specify a value for the\n MessageDeduplicationId parameter for the Publish\n action.

      \n
    • \n
    \n
  • \n
" + "smithy.api#documentation": "

A map of attributes with their corresponding values.

\n

The following lists names, descriptions, and values of the special request parameters\n that the CreateTopic action uses:

\n
    \n
  • \n

    \n DeliveryPolicy – The policy that defines how Amazon SNS retries\n failed deliveries to HTTP/S endpoints.

    \n
  • \n
  • \n

    \n DisplayName – The display name to use for a topic with SMS\n subscriptions.

    \n
  • \n
  • \n

    \n FifoTopic – Set to true to create a FIFO topic.

    \n
  • \n
  • \n

    \n Policy – The policy that defines who can access your\n topic. By default, only the topic owner can publish or subscribe to the\n topic.

    \n
  • \n
  • \n

    \n SignatureVersion – The signature version corresponds to\n the hashing algorithm used while creating the signature of the notifications,\n subscription confirmations, or unsubscribe confirmation messages sent by Amazon SNS.\n By default, SignatureVersion is set to 1.

    \n
  • \n
  • \n

    \n TracingConfig – Tracing mode of an Amazon SNS topic. By default\n TracingConfig is set to PassThrough, and the topic\n passes through the tracing header it receives from an Amazon SNS publisher to its\n subscriptions. If set to Active, Amazon SNS will vend X-Ray segment data\n to topic owner account if the sampled flag in the tracing header is true. This\n is only supported on standard topics.

    \n
  • \n
\n

The following attribute applies only to server-side\n encryption:

\n
    \n
  • \n

    \n KmsMasterKeyId – The ID of an Amazon Web Services managed customer master\n key (CMK) for Amazon SNS or a custom CMK. For more information, see Key\n Terms. For more examples, see KeyId in the Key Management Service API Reference.

    \n
  • \n
\n

The following attributes apply only to FIFO topics:

\n
    \n
  • \n

    \n ArchivePolicy – The policy that sets the retention period\n for messages stored in the message archive of an Amazon SNS FIFO\n topic.

    \n
  • \n
  • \n

    \n ContentBasedDeduplication – Enables content-based\n deduplication for FIFO topics.

    \n
      \n
    • \n

      By default, ContentBasedDeduplication is set to\n false. If you create a FIFO topic and this attribute is\n false, you must specify a value for the\n MessageDeduplicationId parameter for the Publish action.

      \n
    • \n
    • \n

      When you set ContentBasedDeduplication to\n true, Amazon SNS uses a SHA-256 hash to\n generate the MessageDeduplicationId using the body of the\n message (but not the attributes of the message).

      \n

      (Optional) To override the generated value, you can specify a value\n for the MessageDeduplicationId parameter for the\n Publish action.

      \n
    • \n
    \n
  • \n
\n
    \n
  • \n

    \n FifoThroughputScope – Enables higher throughput for your FIFO topic by adjusting the scope of deduplication. This attribute has two possible values:

    \n
      \n
    • \n

      \n Topic – The scope of message deduplication is across the entire topic. This is the default value and maintains existing behavior, with a maximum throughput of 3000 messages per second or 20MB per second, whichever comes first.

      \n
    • \n
    • \n

      \n MessageGroup – The scope of deduplication is within each individual message group, which enables higher throughput per topic subject to regional quotas. For more information on quotas or to request an increase, see Amazon SNS service quotas in the Amazon Web Services General Reference.

      \n
    • \n
    \n
  • \n
" } }, "Tags": { @@ -3947,7 +3947,7 @@ "MessageDeduplicationId": { "target": "com.amazonaws.sns#String", "traits": { - "smithy.api#documentation": "

This parameter applies only to FIFO (first-in-first-out) topics.

\n

The token used for deduplication of messages within a 5-minute minimum deduplication\n interval. If a message with a particular MessageDeduplicationId is sent\n successfully, subsequent messages with the same MessageDeduplicationId are\n accepted successfully but aren't delivered.

\n
    \n
  • \n

    Every message must have a unique MessageDeduplicationId.

    \n
      \n
    • \n

      You may provide a MessageDeduplicationId\n explicitly.

      \n
    • \n
    • \n

      If you aren't able to provide a MessageDeduplicationId\n and you enable ContentBasedDeduplication for your topic,\n Amazon SNS uses a SHA-256 hash to generate the\n MessageDeduplicationId using the body of the message\n (but not the attributes of the message).

      \n
    • \n
    • \n

      If you don't provide a MessageDeduplicationId and the\n topic doesn't have ContentBasedDeduplication set, the\n action fails with an error.

      \n
    • \n
    • \n

      If the topic has a ContentBasedDeduplication set, your\n MessageDeduplicationId overrides the generated one.\n

      \n
    • \n
    \n
  • \n
  • \n

    When ContentBasedDeduplication is in effect, messages with\n identical content sent within the deduplication interval are treated as\n duplicates and only one copy of the message is delivered.

    \n
  • \n
  • \n

    If you send one message with ContentBasedDeduplication enabled,\n and then another message with a MessageDeduplicationId that is the\n same as the one generated for the first MessageDeduplicationId, the\n two messages are treated as duplicates and only one copy of the message is\n delivered.

    \n
  • \n
\n \n

The MessageDeduplicationId is available to the consumer of the\n message (this can be useful for troubleshooting delivery issues).

\n

If a message is sent successfully but the acknowledgement is lost and the message\n is resent with the same MessageDeduplicationId after the deduplication\n interval, Amazon SNS can't detect duplicate messages.

\n

Amazon SNS continues to keep track of the message deduplication ID even after the\n message is received and deleted.

\n
\n

The length of MessageDeduplicationId is 128 characters.

\n

\n MessageDeduplicationId can contain alphanumeric characters (a-z,\n A-Z, 0-9) and punctuation\n (!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~).

" + "smithy.api#documentation": "

This parameter applies only to FIFO (first-in-first-out) topics.

\n
    \n
  • \n

    This parameter applies only to FIFO (first-in-first-out) topics. The\n MessageDeduplicationId can contain up to 128 alphanumeric\n characters (a-z, A-Z, 0-9) and punctuation\n (!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~).

    \n
  • \n
  • \n

    Every message must have a unique MessageDeduplicationId, which is\n a token used for deduplication of sent messages within the 5 minute minimum\n deduplication interval.

    \n
  • \n
  • \n

    The scope of deduplication depends on the FifoThroughputScope\n attribute, when set to Topic the message deduplication scope is\n across the entire topic, when set to MessageGroup the message\n deduplication scope is within each individual message group.

    \n
  • \n
  • \n

    If a message with a particular MessageDeduplicationId is sent\n successfully, subsequent messages within the deduplication scope and interval,\n with the same MessageDeduplicationId, are accepted successfully but\n aren't delivered.

    \n
  • \n
  • \n

    Every message must have a unique MessageDeduplicationId.

    \n
      \n
    • \n

      You may provide a MessageDeduplicationId\n explicitly.

      \n
    • \n
    • \n

      If you aren't able to provide a MessageDeduplicationId\n and you enable ContentBasedDeduplication for your topic,\n Amazon SNS uses a SHA-256 hash to generate the\n MessageDeduplicationId using the body of the message\n (but not the attributes of the message).

      \n
    • \n
    • \n

      If you don't provide a MessageDeduplicationId and the\n topic doesn't have ContentBasedDeduplication set, the\n action fails with an error.

      \n
    • \n
    • \n

      If the topic has a ContentBasedDeduplication set, your\n MessageDeduplicationId overrides the generated one.\n

      \n
    • \n
    \n
  • \n
  • \n

    When ContentBasedDeduplication is in effect, messages with\n identical content sent within the deduplication scope and interval are treated\n as duplicates and only one copy of the message is delivered.

    \n
  • \n
  • \n

    If you send one message with ContentBasedDeduplication enabled,\n and then another message with a MessageDeduplicationId that is the\n same as the one generated for the first MessageDeduplicationId, the\n two messages are treated as duplicates, within the deduplication scope and\n interval, and only one copy of the message is delivered.

    \n
  • \n
\n \n

The MessageDeduplicationId is available to the consumer of the\n message (this can be useful for troubleshooting delivery issues).

\n

If a message is sent successfully but the acknowledgement is lost and the message\n is resent with the same MessageDeduplicationId after the deduplication\n interval, Amazon SNS can't detect duplicate messages.

\n

Amazon SNS continues to keep track of the message deduplication ID even after the\n message is received and deleted.

\n
" } }, "MessageGroupId": { @@ -4068,7 +4068,7 @@ "MessageDeduplicationId": { "target": "com.amazonaws.sns#String", "traits": { - "smithy.api#documentation": "

This parameter applies only to FIFO (first-in-first-out) topics. The\n MessageDeduplicationId can contain up to 128 alphanumeric characters\n (a-z, A-Z, 0-9) and punctuation\n (!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~).

\n

Every message must have a unique MessageDeduplicationId, which is a token\n used for deduplication of sent messages. If a message with a particular\n MessageDeduplicationId is sent successfully, any message sent with the\n same MessageDeduplicationId during the 5-minute deduplication interval is\n treated as a duplicate.

\n

If the topic has ContentBasedDeduplication set, the system generates a\n MessageDeduplicationId based on the contents of the message. Your\n MessageDeduplicationId overrides the generated one.

" + "smithy.api#documentation": "
    \n
  • \n

    This parameter applies only to FIFO (first-in-first-out) topics. The\n MessageDeduplicationId can contain up to 128 alphanumeric\n characters (a-z, A-Z, 0-9) and punctuation\n (!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~).

    \n
  • \n
  • \n

    Every message must have a unique MessageDeduplicationId, which is\n a token used for deduplication of sent messages within the 5 minute minimum\n deduplication interval.

    \n
  • \n
  • \n

    The scope of deduplication depends on the FifoThroughputScope\n attribute, when set to Topic the message deduplication scope is\n across the entire topic, when set to MessageGroup the message\n deduplication scope is within each individual message group.

    \n
  • \n
  • \n

    If a message with a particular MessageDeduplicationId is sent\n successfully, subsequent messages within the deduplication scope and interval,\n with the same MessageDeduplicationId, are accepted successfully but\n aren't delivered.

    \n
  • \n
  • \n

    Every message must have a unique MessageDeduplicationId:

    \n
      \n
    • \n

      You may provide a MessageDeduplicationId\n explicitly.

      \n
    • \n
    • \n

      If you aren't able to provide a MessageDeduplicationId\n and you enable ContentBasedDeduplication for your topic,\n Amazon SNS uses a SHA-256 hash to generate the\n MessageDeduplicationId using the body of the message\n (but not the attributes of the message).

      \n
    • \n
    • \n

      If you don't provide a MessageDeduplicationId and the\n topic doesn't have ContentBasedDeduplication set, the\n action fails with an error.

      \n
    • \n
    • \n

      If the topic has a ContentBasedDeduplication set, your\n MessageDeduplicationId overrides the generated one.\n

      \n
    • \n
    \n
  • \n
  • \n

    When ContentBasedDeduplication is in effect, messages with\n identical content sent within the deduplication scope and interval are treated\n as duplicates and only one copy of the message is delivered.

    \n
  • \n
  • \n

    If you send one message with ContentBasedDeduplication enabled,\n and then another message with a MessageDeduplicationId that is the\n same as the one generated for the first MessageDeduplicationId, the\n two messages are treated as duplicates, within the deduplication scope and\n interval, and only one copy of the message is delivered.

    \n
  • \n
" } }, "MessageGroupId": { @@ -4561,7 +4561,7 @@ "AttributeName": { "target": "com.amazonaws.sns#attributeName", "traits": { - "smithy.api#documentation": "

A map of attributes with their corresponding values.

\n

The following lists the names, descriptions, and values of the special request\n parameters that the SetTopicAttributes action uses:

\n
    \n
  • \n

    \n ApplicationSuccessFeedbackRoleArn – Indicates failed\n message delivery status for an Amazon SNS topic that is subscribed to a platform\n application endpoint.

    \n
  • \n
  • \n

    \n DeliveryPolicy – The policy that defines how Amazon SNS retries\n failed deliveries to HTTP/S endpoints.

    \n
  • \n
  • \n

    \n DisplayName – The display name to use for a topic with SMS\n subscriptions.

    \n
  • \n
  • \n

    \n Policy – The policy that defines who can access your\n topic. By default, only the topic owner can publish or subscribe to the\n topic.

    \n
  • \n
  • \n

    \n TracingConfig – Tracing mode of an Amazon SNS topic. By default\n TracingConfig is set to PassThrough, and the topic\n passes through the tracing header it receives from an Amazon SNS publisher to its\n subscriptions. If set to Active, Amazon SNS will vend X-Ray segment data\n to topic owner account if the sampled flag in the tracing header is true. This\n is only supported on standard topics.

    \n
  • \n
  • \n

    HTTP

    \n
      \n
    • \n

      \n HTTPSuccessFeedbackRoleArn – Indicates successful\n message delivery status for an Amazon SNS topic that is subscribed to an HTTP\n endpoint.

      \n
    • \n
    • \n

      \n HTTPSuccessFeedbackSampleRate – Indicates\n percentage of successful messages to sample for an Amazon SNS topic that is\n subscribed to an HTTP endpoint.

      \n
    • \n
    • \n

      \n HTTPFailureFeedbackRoleArn – Indicates failed\n message delivery status for an Amazon SNS topic that is subscribed to an HTTP\n endpoint.

      \n
    • \n
    \n
  • \n
  • \n

    Amazon Kinesis Data Firehose

    \n
      \n
    • \n

      \n FirehoseSuccessFeedbackRoleArn – Indicates\n successful message delivery status for an Amazon SNS topic that is subscribed\n to an Amazon Kinesis Data Firehose endpoint.

      \n
    • \n
    • \n

      \n FirehoseSuccessFeedbackSampleRate – Indicates\n percentage of successful messages to sample for an Amazon SNS topic that is\n subscribed to an Amazon Kinesis Data Firehose endpoint.

      \n
    • \n
    • \n

      \n FirehoseFailureFeedbackRoleArn – Indicates failed\n message delivery status for an Amazon SNS topic that is subscribed to an\n Amazon Kinesis Data Firehose endpoint.

      \n
    • \n
    \n
  • \n
  • \n

    Lambda

    \n
      \n
    • \n

      \n LambdaSuccessFeedbackRoleArn – Indicates\n successful message delivery status for an Amazon SNS topic that is subscribed\n to an Lambda endpoint.

      \n
    • \n
    • \n

      \n LambdaSuccessFeedbackSampleRate – Indicates\n percentage of successful messages to sample for an Amazon SNS topic that is\n subscribed to an Lambda endpoint.

      \n
    • \n
    • \n

      \n LambdaFailureFeedbackRoleArn – Indicates failed\n message delivery status for an Amazon SNS topic that is subscribed to an\n Lambda endpoint.

      \n
    • \n
    \n
  • \n
  • \n

    Platform application endpoint

    \n
      \n
    • \n

      \n ApplicationSuccessFeedbackRoleArn – Indicates\n successful message delivery status for an Amazon SNS topic that is subscribed\n to an Amazon Web Services application endpoint.

      \n
    • \n
    • \n

      \n ApplicationSuccessFeedbackSampleRate – Indicates\n percentage of successful messages to sample for an Amazon SNS topic that is\n subscribed to an Amazon Web Services application endpoint.

      \n
    • \n
    • \n

      \n ApplicationFailureFeedbackRoleArn – Indicates\n failed message delivery status for an Amazon SNS topic that is subscribed to\n an Amazon Web Services application endpoint.

      \n
    • \n
    \n \n

    In addition to being able to configure topic attributes for message\n delivery status of notification messages sent to Amazon SNS application\n endpoints, you can also configure application attributes for the delivery\n status of push notification messages sent to push notification\n services.

    \n

    For example, For more information, see Using Amazon SNS Application\n Attributes for Message Delivery Status.

    \n
    \n
  • \n
  • \n

    Amazon SQS

    \n
      \n
    • \n

      \n SQSSuccessFeedbackRoleArn – Indicates successful\n message delivery status for an Amazon SNS topic that is subscribed to an\n Amazon SQS endpoint.

      \n
    • \n
    • \n

      \n SQSSuccessFeedbackSampleRate – Indicates\n percentage of successful messages to sample for an Amazon SNS topic that is\n subscribed to an Amazon SQS endpoint.

      \n
    • \n
    • \n

      \n SQSFailureFeedbackRoleArn – Indicates failed\n message delivery status for an Amazon SNS topic that is subscribed to an\n Amazon SQS endpoint.

      \n
    • \n
    \n
  • \n
\n \n

The SuccessFeedbackRoleArn and FailureFeedbackRoleArn\n attributes are used to give Amazon SNS write access to use CloudWatch Logs on your\n behalf. The SuccessFeedbackSampleRate attribute is for specifying the\n sample rate percentage (0-100) of successfully delivered messages. After you\n configure the FailureFeedbackRoleArn attribute, then all failed message\n deliveries generate CloudWatch Logs.

\n
\n

The following attribute applies only to server-side-encryption:

\n
    \n
  • \n

    \n KmsMasterKeyId – The ID of an Amazon Web Services managed customer master\n key (CMK) for Amazon SNS or a custom CMK. For more information, see Key\n Terms. For more examples, see KeyId in the Key Management Service API Reference.

    \n
  • \n
  • \n

    \n SignatureVersion – The signature version corresponds to the\n hashing algorithm used while creating the signature of the notifications,\n subscription confirmations, or unsubscribe confirmation messages sent by Amazon SNS.\n By default, SignatureVersion is set to 1.

    \n
  • \n
\n

The following attribute applies only to FIFO topics:

\n
    \n
  • \n

    \n ArchivePolicy – The policy that sets the retention period\n for messages stored in the message archive of an Amazon SNS FIFO\n topic.

    \n
  • \n
  • \n

    \n ContentBasedDeduplication – Enables content-based\n deduplication for FIFO topics.

    \n
      \n
    • \n

      By default, ContentBasedDeduplication is set to\n false. If you create a FIFO topic and this attribute is\n false, you must specify a value for the\n MessageDeduplicationId parameter for the Publish\n action.

      \n
    • \n
    • \n

      When you set ContentBasedDeduplication to true,\n Amazon SNS uses a SHA-256 hash to generate the\n MessageDeduplicationId using the body of the message (but not\n the attributes of the message).

      \n

      (Optional) To override the generated value, you can specify a value for the\n MessageDeduplicationId parameter for the Publish\n action.

      \n
    • \n
    \n
  • \n
", + "smithy.api#documentation": "

A map of attributes with their corresponding values.

\n

The following lists the names, descriptions, and values of the special request\n parameters that the SetTopicAttributes action uses:

\n
    \n
  • \n

    \n ApplicationSuccessFeedbackRoleArn – Indicates failed\n message delivery status for an Amazon SNS topic that is subscribed to a platform\n application endpoint.

    \n
  • \n
  • \n

    \n DeliveryPolicy – The policy that defines how Amazon SNS retries\n failed deliveries to HTTP/S endpoints.

    \n
  • \n
  • \n

    \n DisplayName – The display name to use for a topic with SMS\n subscriptions.

    \n
  • \n
  • \n

    \n Policy – The policy that defines who can access your\n topic. By default, only the topic owner can publish or subscribe to the\n topic.

    \n
  • \n
  • \n

    \n TracingConfig – Tracing mode of an Amazon SNS topic. By default\n TracingConfig is set to PassThrough, and the topic\n passes through the tracing header it receives from an Amazon SNS publisher to its\n subscriptions. If set to Active, Amazon SNS will vend X-Ray segment data\n to topic owner account if the sampled flag in the tracing header is true. This\n is only supported on standard topics.

    \n
  • \n
  • \n

    HTTP

    \n
      \n
    • \n

      \n HTTPSuccessFeedbackRoleArn – Indicates successful\n message delivery status for an Amazon SNS topic that is subscribed to an HTTP\n endpoint.

      \n
    • \n
    • \n

      \n HTTPSuccessFeedbackSampleRate – Indicates\n percentage of successful messages to sample for an Amazon SNS topic that is\n subscribed to an HTTP endpoint.

      \n
    • \n
    • \n

      \n HTTPFailureFeedbackRoleArn – Indicates failed\n message delivery status for an Amazon SNS topic that is subscribed to an HTTP\n endpoint.

      \n
    • \n
    \n
  • \n
  • \n

    Amazon Kinesis Data Firehose

    \n
      \n
    • \n

      \n FirehoseSuccessFeedbackRoleArn – Indicates\n successful message delivery status for an Amazon SNS topic that is subscribed\n to an Amazon Kinesis Data Firehose endpoint.

      \n
    • \n
    • \n

      \n FirehoseSuccessFeedbackSampleRate – Indicates\n percentage of successful messages to sample for an Amazon SNS topic that is\n subscribed to an Amazon Kinesis Data Firehose endpoint.

      \n
    • \n
    • \n

      \n FirehoseFailureFeedbackRoleArn – Indicates failed\n message delivery status for an Amazon SNS topic that is subscribed to an\n Amazon Kinesis Data Firehose endpoint.

      \n
    • \n
    \n
  • \n
  • \n

    Lambda

    \n
      \n
    • \n

      \n LambdaSuccessFeedbackRoleArn – Indicates\n successful message delivery status for an Amazon SNS topic that is subscribed\n to an Lambda endpoint.

      \n
    • \n
    • \n

      \n LambdaSuccessFeedbackSampleRate – Indicates\n percentage of successful messages to sample for an Amazon SNS topic that is\n subscribed to an Lambda endpoint.

      \n
    • \n
    • \n

      \n LambdaFailureFeedbackRoleArn – Indicates failed\n message delivery status for an Amazon SNS topic that is subscribed to an\n Lambda endpoint.

      \n
    • \n
    \n
  • \n
  • \n

    Platform application endpoint

    \n
      \n
    • \n

      \n ApplicationSuccessFeedbackRoleArn – Indicates\n successful message delivery status for an Amazon SNS topic that is subscribed\n to an Amazon Web Services application endpoint.

      \n
    • \n
    • \n

      \n ApplicationSuccessFeedbackSampleRate – Indicates\n percentage of successful messages to sample for an Amazon SNS topic that is\n subscribed to an Amazon Web Services application endpoint.

      \n
    • \n
    • \n

      \n ApplicationFailureFeedbackRoleArn – Indicates\n failed message delivery status for an Amazon SNS topic that is subscribed to\n an Amazon Web Services application endpoint.

      \n
    • \n
    \n \n

    In addition to being able to configure topic attributes for message\n delivery status of notification messages sent to Amazon SNS application\n endpoints, you can also configure application attributes for the delivery\n status of push notification messages sent to push notification\n services.

    \n

    For example, For more information, see Using Amazon SNS Application\n Attributes for Message Delivery Status.

    \n
    \n
  • \n
  • \n

    Amazon SQS

    \n
      \n
    • \n

      \n SQSSuccessFeedbackRoleArn – Indicates successful\n message delivery status for an Amazon SNS topic that is subscribed to an\n Amazon SQS endpoint.

      \n
    • \n
    • \n

      \n SQSSuccessFeedbackSampleRate – Indicates\n percentage of successful messages to sample for an Amazon SNS topic that is\n subscribed to an Amazon SQS endpoint.

      \n
    • \n
    • \n

      \n SQSFailureFeedbackRoleArn – Indicates failed\n message delivery status for an Amazon SNS topic that is subscribed to an\n Amazon SQS endpoint.

      \n
    • \n
    \n
  • \n
\n \n

The SuccessFeedbackRoleArn and FailureFeedbackRoleArn\n attributes are used to give Amazon SNS write access to use CloudWatch Logs on your\n behalf. The SuccessFeedbackSampleRate attribute is for specifying the\n sample rate percentage (0-100) of successfully delivered messages. After you\n configure the FailureFeedbackRoleArn attribute, then all failed message\n deliveries generate CloudWatch Logs.

\n
\n

The following attribute applies only to server-side-encryption:

\n
    \n
  • \n

    \n KmsMasterKeyId – The ID of an Amazon Web Services managed customer master\n key (CMK) for Amazon SNS or a custom CMK. For more information, see Key\n Terms. For more examples, see KeyId in the Key Management Service API Reference.

    \n
  • \n
  • \n

    \n SignatureVersion – The signature version corresponds to the\n hashing algorithm used while creating the signature of the notifications,\n subscription confirmations, or unsubscribe confirmation messages sent by Amazon SNS.\n By default, SignatureVersion is set to 1.

    \n
  • \n
\n

The following attribute applies only to FIFO topics:

\n
    \n
  • \n

    \n ArchivePolicy – The policy that sets the retention period\n for messages stored in the message archive of an Amazon SNS FIFO\n topic.

    \n
  • \n
  • \n

    \n ContentBasedDeduplication – Enables content-based\n deduplication for FIFO topics.

    \n
      \n
    • \n

      By default, ContentBasedDeduplication is set to\n false. If you create a FIFO topic and this attribute is\n false, you must specify a value for the\n MessageDeduplicationId parameter for the Publish action.

      \n
    • \n
    • \n

      When you set ContentBasedDeduplication to\n true, Amazon SNS uses a SHA-256 hash to\n generate the MessageDeduplicationId using the body of the\n message (but not the attributes of the message).

      \n

      (Optional) To override the generated value, you can specify a value\n for the MessageDeduplicationId parameter for the\n Publish action.

      \n
    • \n
    \n
  • \n
\n
    \n
  • \n

    \n FifoThroughputScope – Enables higher throughput for your FIFO topic by adjusting the scope of deduplication. This attribute has two possible values:

    \n
      \n
    • \n

      \n Topic – The scope of message deduplication is across the entire topic. This is the default value and maintains existing behavior, with a maximum throughput of 3000 messages per second or 20MB per second, whichever comes first.

      \n
    • \n
    • \n

      \n MessageGroup – The scope of deduplication is within each individual message group, which enables higher throughput per topic subject to regional quotas. For more information on quotas or to request an increase, see Amazon SNS service quotas in the Amazon Web Services General Reference.

      \n
    • \n
    \n
  • \n
", "smithy.api#required": {} } }, diff --git a/models/sqs.json b/models/sqs.json index 9a96b4f9d0..737f3c4585 100644 --- a/models/sqs.json +++ b/models/sqs.json @@ -1403,7 +1403,7 @@ } ], "traits": { - "smithy.api#documentation": "

Changes the visibility timeout of a specified message in a queue to a new value. The\n default visibility timeout for a message is 30 seconds. The minimum is 0 seconds. The\n maximum is 12 hours. For more information, see Visibility Timeout in the Amazon SQS Developer\n Guide.

\n

For example, if the default timeout for a queue is 60 seconds, 15 seconds have elapsed\n since you received the message, and you send a ChangeMessageVisibility call with\n VisibilityTimeout set to 10 seconds, the 10 seconds begin to count from\n the time that you make the ChangeMessageVisibility call. Thus, any attempt\n to change the visibility timeout or to delete that message 10 seconds after you\n initially change the visibility timeout (a total of 25 seconds) might result in an\n error.

\n

An Amazon SQS message has three basic states:

\n
    \n
  1. \n

    Sent to a queue by a producer.

    \n
  2. \n
  3. \n

    Received from the queue by a consumer.

    \n
  4. \n
  5. \n

    Deleted from the queue.

    \n
  6. \n
\n

A message is considered to be stored after it is sent to a queue by a producer, but not yet received from the queue by a consumer (that is, between states 1 and 2). There is no limit to the number of stored messages.\n A message is considered to be in flight after it is received from a queue by a consumer, but not yet deleted from the queue (that is, between states 2 and 3). There is a limit to the number of in flight messages.

\n

Limits that apply to in flight messages are unrelated to the unlimited number of stored messages.

\n

For most standard queues (depending on queue traffic and message backlog), there can be a maximum of approximately 120,000 in flight messages (received from a queue by a consumer, but not yet deleted from the queue). \n If you reach this limit, Amazon SQS returns the OverLimit error message.\n To avoid reaching the limit, you should delete messages from the queue after they're processed. You can also increase the number of queues you use to process your messages.\n To request a limit increase, file a support request.

\n

For FIFO queues, there can be a maximum of 20,000 in flight messages (received from a queue by a consumer, but not yet deleted from the queue). If you reach this limit, Amazon SQS returns no error messages.

\n \n

If you attempt to set the VisibilityTimeout to a value greater than\n the maximum time left, Amazon SQS returns an error. Amazon SQS doesn't automatically\n recalculate and increase the timeout to the maximum remaining time.

\n

Unlike with a queue, when you change the visibility timeout for a specific message\n the timeout value is applied immediately but isn't saved in memory for that message.\n If you don't delete a message after it is received, the visibility timeout for the\n message reverts to the original timeout value (not to the value you set using the\n ChangeMessageVisibility action) the next time the message is\n received.

\n
" + "smithy.api#documentation": "

Changes the visibility timeout of a specified message in a queue to a new value. The\n default visibility timeout for a message is 30 seconds. The minimum is 0 seconds. The\n maximum is 12 hours. For more information, see Visibility Timeout in the Amazon SQS Developer\n Guide.

\n

For example, if the default timeout for a queue is 60 seconds, 15 seconds have elapsed\n since you received the message, and you send a ChangeMessageVisibility call with\n VisibilityTimeout set to 10 seconds, the 10 seconds begin to count from\n the time that you make the ChangeMessageVisibility call. Thus, any attempt\n to change the visibility timeout or to delete that message 10 seconds after you\n initially change the visibility timeout (a total of 25 seconds) might result in an\n error.

\n

An Amazon SQS message has three basic states:

\n
    \n
  1. \n

    Sent to a queue by a producer.

    \n
  2. \n
  3. \n

    Received from the queue by a consumer.

    \n
  4. \n
  5. \n

    Deleted from the queue.

    \n
  6. \n
\n

A message is considered to be stored after it is sent to a queue by a producer, but not yet received from the queue by a consumer (that is, between states 1 and 2). There is no limit to the number of stored messages.\n A message is considered to be in flight after it is received from a queue by a consumer, but not yet deleted from the queue (that is, between states 2 and 3). There is a limit to the number of in flight messages.

\n

Limits that apply to in flight messages are unrelated to the unlimited number of stored messages.

\n

For most standard queues (depending on queue traffic and message backlog), there can be a maximum of approximately 120,000 in flight messages (received from a queue by a consumer, but not yet deleted from the queue). \n If you reach this limit, Amazon SQS returns the OverLimit error message.\n To avoid reaching the limit, you should delete messages from the queue after they're processed. You can also increase the number of queues you use to process your messages.\n To request a limit increase, file a support request.

\n

For FIFO queues, there can be a maximum of 120,000 in flight messages (received from a queue by a consumer, but not yet deleted from the queue). If you reach this limit, Amazon SQS returns no error messages.

\n \n

If you attempt to set the VisibilityTimeout to a value greater than\n the maximum time left, Amazon SQS returns an error. Amazon SQS doesn't automatically\n recalculate and increase the timeout to the maximum remaining time.

\n

Unlike with a queue, when you change the visibility timeout for a specific message\n the timeout value is applied immediately but isn't saved in memory for that message.\n If you don't delete a message after it is received, the visibility timeout for the\n message reverts to the original timeout value (not to the value you set using the\n ChangeMessageVisibility action) the next time the message is\n received.

\n
" } }, "com.amazonaws.sqs#ChangeMessageVisibilityBatch": { @@ -1618,7 +1618,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates a new standard or FIFO queue. You can pass one or more attributes in\n the request. Keep the following in mind:

\n
    \n
  • \n

    If you don't specify the FifoQueue attribute, Amazon SQS creates a standard queue.

    \n \n

    You can't change the queue type after you create it and you can't convert\n an existing standard queue into a FIFO queue. You must either create a new\n FIFO queue for your application or delete your existing standard queue and\n recreate it as a FIFO queue. For more information, see Moving From a Standard Queue to a FIFO Queue in the\n Amazon SQS Developer Guide.

    \n
    \n
  • \n
  • \n

    If you don't provide a value for an attribute, the queue is created with the\n default value for the attribute.

    \n
  • \n
  • \n

    If you delete a queue, you must wait at least 60 seconds before creating a\n queue with the same name.

    \n
  • \n
\n

To successfully create a new queue, you must provide a queue name that adheres to the\n limits\n related to queues and is unique within the scope of your queues.

\n \n

After you create a queue, you must wait at least one second after the queue is\n created to be able to use the queue.

\n
\n

To get the queue URL, use the \n GetQueueUrl\n action.\n \n GetQueueUrl\n requires only the\n QueueName parameter. be aware of existing queue names:

\n
    \n
  • \n

    If you provide the name of an existing queue along with the exact names and\n values of all the queue's attributes, CreateQueue returns the queue\n URL for the existing queue.

    \n
  • \n
  • \n

    If the queue name, attribute names, or attribute values don't match an\n existing queue, CreateQueue returns an error.

    \n
  • \n
\n \n

Cross-account permissions don't apply to this action. For more information, \nsee Grant \ncross-account permissions to a role and a username in the Amazon SQS Developer Guide.

\n
" + "smithy.api#documentation": "

Creates a new standard or FIFO queue. You can pass one or more attributes in\n the request. Keep the following in mind:

\n
    \n
  • \n

    If you don't specify the FifoQueue attribute, Amazon SQS creates a standard queue.

    \n \n

    You can't change the queue type after you create it and you can't convert\n an existing standard queue into a FIFO queue. You must either create a new\n FIFO queue for your application or delete your existing standard queue and\n recreate it as a FIFO queue. For more information, see Moving From a Standard Queue to a FIFO Queue in the\n Amazon SQS Developer Guide.

    \n
    \n
  • \n
  • \n

    If you don't provide a value for an attribute, the queue is created with the\n default value for the attribute.

    \n
  • \n
  • \n

    If you delete a queue, you must wait at least 60 seconds before creating a\n queue with the same name.

    \n
  • \n
\n

To successfully create a new queue, you must provide a queue name that adheres to the\n limits\n related to queues and is unique within the scope of your queues.

\n \n

After you create a queue, you must wait at least one second after the queue is\n created to be able to use the queue.

\n
\n

To retrieve the URL of a queue, use the \n GetQueueUrl\n action. This action only requires the \n QueueName\n parameter.

\n

When creating queues, keep the following points in mind:

\n
    \n
  • \n

    If you specify the name of an existing queue and provide the exact same names\n and values for all its attributes, the \n CreateQueue\n action will return the URL of the\n existing queue instead of creating a new one.

    \n
  • \n
  • \n

    If you attempt to create a queue with a name that already exists but with\n different attribute names or values, the CreateQueue action will\n return an error. This ensures that existing queues are not inadvertently\n altered.

    \n
  • \n
\n \n

Cross-account permissions don't apply to this action. For more information, \nsee Grant \ncross-account permissions to a role and a username in the Amazon SQS Developer Guide.

\n
" } }, "com.amazonaws.sqs#CreateQueueRequest": { @@ -1700,7 +1700,7 @@ } ], "traits": { - "smithy.api#documentation": "

Deletes the specified message from the specified queue. To select the message to\n delete, use the ReceiptHandle of the message (not the\n MessageId which you receive when you send the message). Amazon SQS can\n delete a message from a queue even if a visibility timeout setting causes the message to\n be locked by another consumer. Amazon SQS automatically deletes messages left in a queue\n longer than the retention period configured for the queue.

\n \n

The ReceiptHandle is associated with a specific\n instance of receiving a message. If you receive a message more than\n once, the ReceiptHandle is different each time you receive a message.\n When you use the DeleteMessage action, you must provide the most\n recently received ReceiptHandle for the message (otherwise, the request\n succeeds, but the message will not be deleted).

\n

For standard queues, it is possible to receive a message even after you\n delete it. This might happen on rare occasions if one of the servers which stores a\n copy of the message is unavailable when you send the request to delete the message.\n The copy remains on the server and might be returned to you during a subsequent\n receive request. You should ensure that your application is idempotent, so that\n receiving a message more than once does not cause issues.

\n
" + "smithy.api#documentation": "

Deletes the specified message from the specified queue. To select the message to\n delete, use the ReceiptHandle of the message (not the\n MessageId which you receive when you send the message). Amazon SQS can\n delete a message from a queue even if a visibility timeout setting causes the message to\n be locked by another consumer. Amazon SQS automatically deletes messages left in a queue\n longer than the retention period configured for the queue.

\n \n

Each time you receive a message, meaning when a consumer retrieves a message from\n the queue, it comes with a unique ReceiptHandle. If you receive the\n same message more than once, you will get a different ReceiptHandle\n each time. When you want to delete a message using the DeleteMessage\n action, you must use the ReceiptHandle from the most recent time you\n received the message. If you use an old ReceiptHandle, the request will\n succeed, but the message might not be deleted.

\n

For standard queues, it is possible to receive a message even after you\n delete it. This might happen on rare occasions if one of the servers which stores a\n copy of the message is unavailable when you send the request to delete the message.\n The copy remains on the server and might be returned to you during a subsequent\n receive request. You should ensure that your application is idempotent, so that\n receiving a message more than once does not cause issues.

\n
" } }, "com.amazonaws.sqs#DeleteMessageBatch": { @@ -2032,7 +2032,7 @@ } ], "traits": { - "smithy.api#documentation": "

Returns the URL of an existing Amazon SQS queue.

\n

To access a queue that belongs to another AWS account, use the\n QueueOwnerAWSAccountId parameter to specify the account ID of the\n queue's owner. The queue's owner must grant you permission to access the queue. For more\n information about shared queue access, see \n AddPermission\n \n or see Allow Developers to Write Messages to a Shared Queue in the Amazon SQS\n Developer Guide.

" + "smithy.api#documentation": "

The GetQueueUrl API returns the URL of an existing Amazon SQS queue. This is\n useful when you know the queue's name but need to retrieve its URL for further\n operations.

\n

To access a queue owned by another Amazon Web Services account, use the\n QueueOwnerAWSAccountId parameter to specify the account ID of the\n queue's owner. Note that the queue owner must grant you the necessary permissions to\n access the queue. For more information about accessing shared queues, see the\n \n AddPermission\n API or Allow developers to write messages to a shared queue in the Amazon SQS\n Developer Guide.

" } }, "com.amazonaws.sqs#GetQueueUrlRequest": { @@ -2041,19 +2041,19 @@ "QueueName": { "target": "com.amazonaws.sqs#String", "traits": { - "smithy.api#documentation": "

The name of the queue whose URL must be fetched. Maximum 80 characters. Valid values:\n alphanumeric characters, hyphens (-), and underscores\n (_).

\n

Queue URLs and names are case-sensitive.

", + "smithy.api#documentation": "

(Required) The name of the queue for which you want to fetch the URL. The name can be\n up to 80 characters long and can include alphanumeric characters, hyphens (-), and\n underscores (_). Queue URLs and names are case-sensitive.

", "smithy.api#required": {} } }, "QueueOwnerAWSAccountId": { "target": "com.amazonaws.sqs#String", "traits": { - "smithy.api#documentation": "

The Amazon Web Services account ID of the account that created the queue.

" + "smithy.api#documentation": "

(Optional) The Amazon Web Services account ID of the account that created the queue. This is only\n required when you are attempting to access a queue owned by another\n Amazon Web Services account.

" } } }, "traits": { - "smithy.api#documentation": "

", + "smithy.api#documentation": "

Retrieves the URL of an existing queue based on its name and, optionally, the Amazon Web Services\n account ID.

", "smithy.api#input": {} } }, @@ -2084,7 +2084,7 @@ "code": "InvalidAddress", "httpResponseCode": 404 }, - "smithy.api#documentation": "

The accountId is invalid.

", + "smithy.api#documentation": "

The specified ID is invalid.

", "smithy.api#error": "client", "smithy.api#httpError": 404 } @@ -2165,7 +2165,7 @@ "code": "InvalidSecurity", "httpResponseCode": 403 }, - "smithy.api#documentation": "

When the request to a queue is not HTTPS and SigV4.

", + "smithy.api#documentation": "

The request was not made over HTTPS or did not use SigV4 for signing.

", "smithy.api#error": "client", "smithy.api#httpError": 403 } @@ -3225,7 +3225,7 @@ "code": "AWS.SimpleQueueService.NonExistentQueue", "httpResponseCode": 400 }, - "smithy.api#documentation": "

The specified queue doesn't exist.

", + "smithy.api#documentation": "

Ensure that the QueueUrl is correct and that the queue has not been\n deleted.

", "smithy.api#error": "client", "smithy.api#httpError": 400 } @@ -3320,7 +3320,7 @@ } ], "traits": { - "smithy.api#documentation": "

Retrieves one or more messages (up to 10), from the specified queue. Using the\n WaitTimeSeconds parameter enables long-poll support. For more\n information, see Amazon SQS\n Long Polling in the Amazon SQS Developer Guide.

\n

Short poll is the default behavior where a weighted random set of machines is sampled\n on a ReceiveMessage call. Thus, only the messages on the sampled machines\n are returned. If the number of messages in the queue is small (fewer than 1,000), you\n most likely get fewer messages than you requested per ReceiveMessage call.\n If the number of messages in the queue is extremely small, you might not receive any\n messages in a particular ReceiveMessage response. If this happens, repeat\n the request.

\n

For each message returned, the response includes the following:

\n
    \n
  • \n

    The message body.

    \n
  • \n
  • \n

    An MD5 digest of the message body. For information about MD5, see RFC1321.

    \n
  • \n
  • \n

    The MessageId you received when you sent the message to the\n queue.

    \n
  • \n
  • \n

    The receipt handle.

    \n
  • \n
  • \n

    The message attributes.

    \n
  • \n
  • \n

    An MD5 digest of the message attributes.

    \n
  • \n
\n

The receipt handle is the identifier you must provide when deleting the message. For\n more information, see Queue and Message Identifiers in the Amazon SQS Developer\n Guide.

\n

You can provide the VisibilityTimeout parameter in your request. The\n parameter is applied to the messages that Amazon SQS returns in the response. If you don't\n include the parameter, the overall visibility timeout for the queue is used for the\n returned messages. For more information, see Visibility Timeout in the Amazon SQS Developer\n Guide.

\n

A message that isn't deleted or a message whose visibility isn't extended before the\n visibility timeout expires counts as a failed receive. Depending on the configuration of\n the queue, the message might be sent to the dead-letter queue.

\n \n

In the future, new attributes might be added. If you write code that calls this action, we recommend that you structure your code so that it can handle new attributes gracefully.

\n
" + "smithy.api#documentation": "

Retrieves one or more messages (up to 10), from the specified queue. Using the\n WaitTimeSeconds parameter enables long-poll support. For more\n information, see Amazon SQS\n Long Polling in the Amazon SQS Developer Guide.

\n

Short poll is the default behavior where a weighted random set of machines is sampled\n on a ReceiveMessage call. Therefore, only the messages on the sampled\n machines are returned. If the number of messages in the queue is small (fewer than\n 1,000), you most likely get fewer messages than you requested per\n ReceiveMessage call. If the number of messages in the queue is\n extremely small, you might not receive any messages in a particular\n ReceiveMessage response. If this happens, repeat the request.

\n

For each message returned, the response includes the following:

\n
    \n
  • \n

    The message body.

    \n
  • \n
  • \n

    An MD5 digest of the message body. For information about MD5, see RFC1321.

    \n
  • \n
  • \n

    The MessageId you received when you sent the message to the\n queue.

    \n
  • \n
  • \n

    The receipt handle.

    \n
  • \n
  • \n

    The message attributes.

    \n
  • \n
  • \n

    An MD5 digest of the message attributes.

    \n
  • \n
\n

The receipt handle is the identifier you must provide when deleting the message. For\n more information, see Queue and Message Identifiers in the Amazon SQS Developer\n Guide.

\n

You can provide the VisibilityTimeout parameter in your request. The\n parameter is applied to the messages that Amazon SQS returns in the response. If you don't\n include the parameter, the overall visibility timeout for the queue is used for the\n returned messages. The default visibility timeout for a queue is 30 seconds.

\n \n

In the future, new attributes might be added. If you write code that calls this action, we recommend that you structure your code so that it can handle new attributes gracefully.

\n
" } }, "com.amazonaws.sqs#ReceiveMessageRequest": { @@ -3339,7 +3339,7 @@ "smithy.api#deprecated": { "message": "AttributeNames has been replaced by MessageSystemAttributeNames" }, - "smithy.api#documentation": "\n

This parameter has been deprecated but will be supported for backward\n compatibility. To provide attribute names, you are encouraged to use\n MessageSystemAttributeNames.

\n
\n

A list of attributes that need to be returned along with each message. These\n attributes include:

\n
    \n
  • \n

    \n All – Returns all values.

    \n
  • \n
  • \n

    \n ApproximateFirstReceiveTimestamp – Returns the time the\n message was first received from the queue (epoch time in\n milliseconds).

    \n
  • \n
  • \n

    \n ApproximateReceiveCount – Returns the number of times a\n message has been received across all queues but not deleted.

    \n
  • \n
  • \n

    \n AWSTraceHeader – Returns the X-Ray trace\n header string.

    \n
  • \n
  • \n

    \n SenderId\n

    \n
      \n
    • \n

      For a user, returns the user ID, for example\n ABCDEFGHI1JKLMNOPQ23R.

      \n
    • \n
    • \n

      For an IAM role, returns the IAM role ID, for example\n ABCDE1F2GH3I4JK5LMNOP:i-a123b456.

      \n
    • \n
    \n
  • \n
  • \n

    \n SentTimestamp – Returns the time the message was sent to the\n queue (epoch time in\n milliseconds).

    \n
  • \n
  • \n

    \n SqsManagedSseEnabled – Enables server-side queue encryption\n using SQS owned encryption keys. Only one server-side encryption option is\n supported per queue (for example, SSE-KMS or SSE-SQS).

    \n
  • \n
  • \n

    \n MessageDeduplicationId – Returns the value provided by the\n producer that calls the \n SendMessage\n \n action.

    \n
  • \n
  • \n

    \n MessageGroupId – Returns the value provided by the\n producer that calls the \n SendMessage\n action.\n Messages with the same MessageGroupId are returned in\n sequence.

    \n
  • \n
  • \n

    \n SequenceNumber – Returns the value provided by\n Amazon SQS.

    \n
  • \n
", + "smithy.api#documentation": "\n

This parameter has been discontinued but will be supported for backward\n compatibility. To provide attribute names, you are encouraged to use\n MessageSystemAttributeNames.

\n
\n

A list of attributes that need to be returned along with each message. These\n attributes include:

\n
    \n
  • \n

    \n All – Returns all values.

    \n
  • \n
  • \n

    \n ApproximateFirstReceiveTimestamp – Returns the time the\n message was first received from the queue (epoch time in\n milliseconds).

    \n
  • \n
  • \n

    \n ApproximateReceiveCount – Returns the number of times a\n message has been received across all queues but not deleted.

    \n
  • \n
  • \n

    \n AWSTraceHeader – Returns the X-Ray trace\n header string.

    \n
  • \n
  • \n

    \n SenderId\n

    \n
      \n
    • \n

      For a user, returns the user ID, for example\n ABCDEFGHI1JKLMNOPQ23R.

      \n
    • \n
    • \n

      For an IAM role, returns the IAM role ID, for example\n ABCDE1F2GH3I4JK5LMNOP:i-a123b456.

      \n
    • \n
    \n
  • \n
  • \n

    \n SentTimestamp – Returns the time the message was sent to the\n queue (epoch time in\n milliseconds).

    \n
  • \n
  • \n

    \n SqsManagedSseEnabled – Enables server-side queue encryption\n using SQS owned encryption keys. Only one server-side encryption option is\n supported per queue (for example, SSE-KMS or SSE-SQS).

    \n
  • \n
  • \n

    \n MessageDeduplicationId – Returns the value provided by the\n producer that calls the \n SendMessage\n \n action.

    \n
  • \n
  • \n

    \n MessageGroupId – Returns the value provided by the\n producer that calls the \n SendMessage\n action.\n Messages with the same MessageGroupId are returned in\n sequence.

    \n
  • \n
  • \n

    \n SequenceNumber – Returns the value provided by\n Amazon SQS.

    \n
  • \n
", "smithy.api#xmlFlattened": {}, "smithy.api#xmlName": "AttributeName" } @@ -3369,13 +3369,13 @@ "VisibilityTimeout": { "target": "com.amazonaws.sqs#NullableInteger", "traits": { - "smithy.api#documentation": "

The duration (in seconds) that the received messages are hidden from subsequent\n retrieve requests after being retrieved by a ReceiveMessage request.

" + "smithy.api#documentation": "

The duration (in seconds) that the received messages are hidden from subsequent\n retrieve requests after being retrieved by a ReceiveMessage request. If not\n specified, the default visibility timeout for the queue is used, which is 30\n seconds.

\n

Understanding VisibilityTimeout:

\n
    \n
  • \n

    When a message is received from a queue, it becomes temporarily invisible to\n other consumers for the duration of the visibility timeout. This prevents\n multiple consumers from processing the same message simultaneously. If the\n message is not deleted or its visibility timeout is not extended before the\n timeout expires, it becomes visible again and can be retrieved by other\n consumers.

    \n
  • \n
  • \n

    Setting an appropriate visibility timeout is crucial. If it's too short, the\n message might become visible again before processing is complete, leading to\n duplicate processing. If it's too long, it delays the reprocessing of messages\n if the initial processing fails.

    \n
  • \n
  • \n

    You can adjust the visibility timeout using the\n --visibility-timeout parameter in the\n receive-message command to match the processing time required\n by your application.

    \n
  • \n
  • \n

    A message that isn't deleted or a message whose visibility isn't extended\n before the visibility timeout expires counts as a failed receive. Depending on\n the configuration of the queue, the message might be sent to the dead-letter\n queue.

    \n
  • \n
\n

For more information, see Visibility Timeout in the Amazon SQS Developer\n Guide.

" } }, "WaitTimeSeconds": { "target": "com.amazonaws.sqs#NullableInteger", "traits": { - "smithy.api#documentation": "

The duration (in seconds) for which the call waits for a message to arrive in the\n queue before returning. If a message is available, the call returns sooner than\n WaitTimeSeconds. If no messages are available and the wait time\n expires, the call does not return a message list.

\n \n

To avoid HTTP errors, ensure that the HTTP response timeout for\n ReceiveMessage requests is longer than the\n WaitTimeSeconds parameter. For example, with the Java SDK, you can\n set HTTP transport settings using the NettyNioAsyncHttpClient for asynchronous clients, or the ApacheHttpClient for synchronous clients.

\n
" + "smithy.api#documentation": "

The duration (in seconds) for which the call waits for a message to arrive in the\n queue before returning. If a message is available, the call returns sooner than\n WaitTimeSeconds. If no messages are available and the wait time\n expires, the call does not return a message list. If you are using the Java SDK, it\n returns a ReceiveMessageResponse object, which has a empty list instead of\n a Null object.

\n \n

To avoid HTTP errors, ensure that the HTTP response timeout for\n ReceiveMessage requests is longer than the\n WaitTimeSeconds parameter. For example, with the Java SDK, you can\n set HTTP transport settings using the NettyNioAsyncHttpClient for asynchronous clients, or the ApacheHttpClient for synchronous clients.

\n
" } }, "ReceiveRequestAttemptId": { @@ -3386,7 +3386,7 @@ } }, "traits": { - "smithy.api#documentation": "

", + "smithy.api#documentation": "

Retrieves one or more messages from a specified queue.

", "smithy.api#input": {} } }, @@ -3471,7 +3471,7 @@ "code": "RequestThrottled", "httpResponseCode": 403 }, - "smithy.api#documentation": "

The request was denied due to request throttling.

\n
    \n
  • \n

    The rate of requests per second exceeds the Amazon Web Services KMS request\n quota for an account and Region.

    \n
  • \n
  • \n

    A burst or sustained high rate of requests to change the state of the same KMS\n key. This condition is often known as a \"hot key.\"

    \n
  • \n
  • \n

    Requests for operations on KMS keys in a Amazon Web Services CloudHSM key store\n might be throttled at a lower-than-expected rate when the Amazon Web Services\n CloudHSM cluster associated with the Amazon Web Services CloudHSM key store is\n processing numerous commands, including those unrelated to the Amazon Web Services CloudHSM key store.

    \n
  • \n
", + "smithy.api#documentation": "

The request was denied due to request throttling.

\n
    \n
  • \n

    Exceeds the permitted request rate for the queue or for the recipient of the\n request.

    \n
  • \n
  • \n

    Ensure that the request rate is within the Amazon SQS limits for\n sending messages. For more information, see Amazon SQS quotas in the Amazon SQS\n Developer Guide.

    \n
  • \n
", "smithy.api#error": "client", "smithy.api#httpError": 403 } @@ -4114,7 +4114,7 @@ "code": "AWS.SimpleQueueService.TooManyEntriesInBatchRequest", "httpResponseCode": 400 }, - "smithy.api#documentation": "

The batch request contains more entries than permissible.

", + "smithy.api#documentation": "

The batch request contains more entries than permissible. For Amazon SQS, the\n maximum number of entries you can include in a single SendMessageBatch, DeleteMessageBatch, or ChangeMessageVisibilityBatch request is 10.

", "smithy.api#error": "client", "smithy.api#httpError": 400 } diff --git a/models/ssm-sap.json b/models/ssm-sap.json index fb223f7620..e437809fe8 100644 --- a/models/ssm-sap.json +++ b/models/ssm-sap.json @@ -572,6 +572,47 @@ "target": "com.amazonaws.ssmsap#ComponentId" } }, + "com.amazonaws.ssmsap#ComponentInfo": { + "type": "structure", + "members": { + "ComponentType": { + "target": "com.amazonaws.ssmsap#ComponentType", + "traits": { + "smithy.api#documentation": "

This string is the type of the component.

\n

Accepted value is WD.

", + "smithy.api#required": {} + } + }, + "Sid": { + "target": "com.amazonaws.ssmsap#SID", + "traits": { + "smithy.api#documentation": "

This string is the SAP System ID of the component.

\n

Accepted values are alphanumeric.

", + "smithy.api#required": {} + } + }, + "Ec2InstanceId": { + "target": "com.amazonaws.ssmsap#InstanceId", + "traits": { + "smithy.api#documentation": "

This is the Amazon EC2 instance on which your SAP component is running.

\n

Accepted values are alphanumeric.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

This is information about the component of \n your SAP application, such as Web Dispatcher.

" + } + }, + "com.amazonaws.ssmsap#ComponentInfoList": { + "type": "list", + "member": { + "target": "com.amazonaws.ssmsap#ComponentInfo" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 5 + } + } + }, "com.amazonaws.ssmsap#ComponentStatus": { "type": "enum", "members": { @@ -2518,6 +2559,12 @@ "traits": { "smithy.api#documentation": "

The Amazon Resource Name of the SAP HANA database.

" } + }, + "ComponentsInfo": { + "target": "com.amazonaws.ssmsap#ComponentInfoList", + "traits": { + "smithy.api#documentation": "

This is an optional parameter for component details \n to which the SAP ABAP application is attached, \n such as Web Dispatcher.

\n

This is an array of ApplicationComponent objects. \n You may input 0 to 5 items.

" + } } } }, diff --git a/models/ssm.json b/models/ssm.json index fb11af8ac7..3d8dad7d0d 100644 --- a/models/ssm.json +++ b/models/ssm.json @@ -858,7 +858,7 @@ "name": "ssm" }, "aws.protocols#awsJson1_1": {}, - "smithy.api#documentation": "

Amazon Web Services Systems Manager is the operations hub for your Amazon Web Services applications and resources and a secure\n end-to-end management solution for hybrid cloud environments that enables safe and secure\n operations at scale.

\n

This reference is intended to be used with the Amazon Web Services Systems Manager User Guide. To get started, see Setting up Amazon Web Services Systems Manager.

\n

\n Related resources\n

\n ", + "smithy.api#documentation": "

Amazon Web Services Systems Manager is the operations hub for your Amazon Web Services applications and resources and a secure\n end-to-end management solution for hybrid cloud environments that enables safe and secure\n operations at scale.

\n

This reference is intended to be used with the Amazon Web Services Systems Manager User Guide. To get started, see Setting up Amazon Web Services Systems Manager.

\n

\n Related resources\n

\n ", "smithy.api#title": "Amazon Simple Systems Manager (SSM)", "smithy.api#xmlNamespace": { "uri": "http://ssm.amazonaws.com/doc/2014-11-06/" @@ -1936,7 +1936,7 @@ } ], "traits": { - "smithy.api#documentation": "

Associates a related item to a Systems Manager OpsCenter OpsItem. For example, you can associate an\n Incident Manager incident or analysis with an OpsItem. Incident Manager and OpsCenter are capabilities of\n Amazon Web Services Systems Manager.

" + "smithy.api#documentation": "

Associates a related item to a Systems Manager OpsCenter OpsItem. For example, you can associate an\n Incident Manager incident or analysis with an OpsItem. Incident Manager and OpsCenter are tools in\n Amazon Web Services Systems Manager.

" } }, "com.amazonaws.ssm#AssociateOpsItemRelatedItemRequest": { @@ -2188,7 +2188,7 @@ "AutomationTargetParameterName": { "target": "com.amazonaws.ssm#AutomationTargetParameterName", "traits": { - "smithy.api#documentation": "

Choose the parameter that will define how your automation will branch out. This target is\n required for associations that use an Automation runbook and target resources by using rate\n controls. Automation is a capability of Amazon Web Services Systems Manager.

" + "smithy.api#documentation": "

Choose the parameter that will define how your automation will branch out. This target is\n required for associations that use an Automation runbook and target resources by using rate\n controls. Automation is a tool in Amazon Web Services Systems Manager.

" } }, "Parameters": { @@ -2260,7 +2260,7 @@ "SyncCompliance": { "target": "com.amazonaws.ssm#AssociationSyncCompliance", "traits": { - "smithy.api#documentation": "

The mode for generating association compliance. You can specify AUTO or\n MANUAL. In AUTO mode, the system uses the status of the association\n execution to determine the compliance status. If the association execution runs successfully,\n then the association is COMPLIANT. If the association execution doesn't run\n successfully, the association is NON-COMPLIANT.

\n

In MANUAL mode, you must specify the AssociationId as a parameter\n for the PutComplianceItems API operation. In this case, compliance data isn't\n managed by State Manager, a capability of Amazon Web Services Systems Manager. It is managed by your direct call to the\n PutComplianceItems API operation.

\n

By default, all associations use AUTO mode.

" + "smithy.api#documentation": "

The mode for generating association compliance. You can specify AUTO or\n MANUAL. In AUTO mode, the system uses the status of the association\n execution to determine the compliance status. If the association execution runs successfully,\n then the association is COMPLIANT. If the association execution doesn't run\n successfully, the association is NON-COMPLIANT.

\n

In MANUAL mode, you must specify the AssociationId as a parameter\n for the PutComplianceItems API operation. In this case, compliance data isn't\n managed by State Manager, a tool in Amazon Web Services Systems Manager. It is managed by your direct call to the PutComplianceItems API operation.

\n

By default, all associations use AUTO mode.

" } }, "ApplyOnlyAtCronInterval": { @@ -3030,7 +3030,7 @@ "SyncCompliance": { "target": "com.amazonaws.ssm#AssociationSyncCompliance", "traits": { - "smithy.api#documentation": "

The mode for generating association compliance. You can specify AUTO or\n MANUAL. In AUTO mode, the system uses the status of the association\n execution to determine the compliance status. If the association execution runs successfully,\n then the association is COMPLIANT. If the association execution doesn't run\n successfully, the association is NON-COMPLIANT.

\n

In MANUAL mode, you must specify the AssociationId as a parameter\n for the PutComplianceItems API operation. In this case, compliance data isn't\n managed by State Manager, a capability of Amazon Web Services Systems Manager. It is managed by your direct call to the\n PutComplianceItems API operation.

\n

By default, all associations use AUTO mode.

" + "smithy.api#documentation": "

The mode for generating association compliance. You can specify AUTO or\n MANUAL. In AUTO mode, the system uses the status of the association\n execution to determine the compliance status. If the association execution runs successfully,\n then the association is COMPLIANT. If the association execution doesn't run\n successfully, the association is NON-COMPLIANT.

\n

In MANUAL mode, you must specify the AssociationId as a parameter\n for the PutComplianceItems API operation. In this case, compliance data isn't\n managed by State Manager, a tool in Amazon Web Services Systems Manager. It is managed by your direct call to the PutComplianceItems API operation.

\n

By default, all associations use AUTO mode.

" } }, "ApplyOnlyAtCronInterval": { @@ -4700,7 +4700,7 @@ "ServiceRole": { "target": "com.amazonaws.ssm#ServiceRole", "traits": { - "smithy.api#documentation": "

The Identity and Access Management (IAM) service role that Run Command, a capability\n of Amazon Web Services Systems Manager, uses to act on your behalf when sending notifications about command status changes.\n

" + "smithy.api#documentation": "

The Identity and Access Management (IAM) service role that Run Command, a tool in\n Amazon Web Services Systems Manager, uses to act on your behalf when sending notifications about command status changes.\n

" } }, "NotificationConfig": { @@ -4909,7 +4909,7 @@ "ServiceRole": { "target": "com.amazonaws.ssm#ServiceRole", "traits": { - "smithy.api#documentation": "

The Identity and Access Management (IAM) service role that Run Command, a capability\n of Amazon Web Services Systems Manager, uses to act on your behalf when sending notifications about command status changes\n on a per managed node basis.

" + "smithy.api#documentation": "

The Identity and Access Management (IAM) service role that Run Command, a tool in\n Amazon Web Services Systems Manager, uses to act on your behalf when sending notifications about command status changes on\n a per managed node basis.

" } }, "NotificationConfig": { @@ -5757,7 +5757,7 @@ } ], "traits": { - "smithy.api#documentation": "

Generates an activation code and activation ID you can use to register your on-premises\n servers, edge devices, or virtual machine (VM) with Amazon Web Services Systems Manager. Registering these machines with\n Systems Manager makes it possible to manage them using Systems Manager capabilities. You use the activation code and\n ID when installing SSM Agent on machines in your hybrid environment. For more information about\n requirements for managing on-premises machines using Systems Manager, see Using Amazon Web Services Systems Manager in\n hybrid and multicloud environments in the Amazon Web Services Systems Manager User Guide.

\n \n

Amazon Elastic Compute Cloud (Amazon EC2) instances, edge devices, and on-premises servers and VMs that are\n configured for Systems Manager are all called managed nodes.

\n
" + "smithy.api#documentation": "

Generates an activation code and activation ID you can use to register your on-premises\n servers, edge devices, or virtual machine (VM) with Amazon Web Services Systems Manager. Registering these machines with\n Systems Manager makes it possible to manage them using Systems Manager tools. You use the activation code and ID when\n installing SSM Agent on machines in your hybrid environment. For more information about\n requirements for managing on-premises machines using Systems Manager, see Using Amazon Web Services Systems Manager in\n hybrid and multicloud environments in the Amazon Web Services Systems Manager User Guide.

\n \n

Amazon Elastic Compute Cloud (Amazon EC2) instances, edge devices, and on-premises servers and VMs that are\n configured for Systems Manager are all called managed nodes.

\n
" } }, "com.amazonaws.ssm#CreateActivationRequest": { @@ -5881,7 +5881,7 @@ } ], "traits": { - "smithy.api#documentation": "

A State Manager association defines the state that you want to maintain on your managed\n nodes. For example, an association can specify that anti-virus software must be installed and\n running on your managed nodes, or that certain ports must be closed. For static targets, the\n association specifies a schedule for when the configuration is reapplied. For dynamic targets,\n such as an Amazon Web Services resource group or an Amazon Web Services autoscaling group, State Manager, a capability of\n Amazon Web Services Systems Manager applies the configuration when new managed nodes are added to the group. The\n association also specifies actions to take when applying the configuration. For example, an\n association for anti-virus software might run once a day. If the software isn't installed, then\n State Manager installs it. If the software is installed, but the service isn't running, then the\n association might instruct State Manager to start the service.

" + "smithy.api#documentation": "

A State Manager association defines the state that you want to maintain on your managed\n nodes. For example, an association can specify that anti-virus software must be installed and\n running on your managed nodes, or that certain ports must be closed. For static targets, the\n association specifies a schedule for when the configuration is reapplied. For dynamic targets,\n such as an Amazon Web Services resource group or an Amazon Web Services autoscaling group, State Manager, a tool in Amazon Web Services Systems Manager\n applies the configuration when new managed nodes are added to the group. The association also\n specifies actions to take when applying the configuration. For example, an association for\n anti-virus software might run once a day. If the software isn't installed, then State Manager\n installs it. If the software is installed, but the service isn't running, then the association\n might instruct State Manager to start the service.

" } }, "com.amazonaws.ssm#CreateAssociationBatch": { @@ -5988,7 +5988,7 @@ "AutomationTargetParameterName": { "target": "com.amazonaws.ssm#AutomationTargetParameterName", "traits": { - "smithy.api#documentation": "

Specify the target for the association. This target is required for associations that use an\n Automation runbook and target resources by using rate controls. Automation is a capability of\n Amazon Web Services Systems Manager.

" + "smithy.api#documentation": "

Specify the target for the association. This target is required for associations that use an\n Automation runbook and target resources by using rate controls. Automation is a tool in\n Amazon Web Services Systems Manager.

" } }, "DocumentVersion": { @@ -6042,7 +6042,7 @@ "SyncCompliance": { "target": "com.amazonaws.ssm#AssociationSyncCompliance", "traits": { - "smithy.api#documentation": "

The mode for generating association compliance. You can specify AUTO or\n MANUAL. In AUTO mode, the system uses the status of the association\n execution to determine the compliance status. If the association execution runs successfully,\n then the association is COMPLIANT. If the association execution doesn't run\n successfully, the association is NON-COMPLIANT.

\n

In MANUAL mode, you must specify the AssociationId as a parameter\n for the PutComplianceItems API operation. In this case, compliance data isn't\n managed by State Manager, a capability of Amazon Web Services Systems Manager. It is managed by your direct call to the\n PutComplianceItems API operation.

\n

By default, all associations use AUTO mode.

" + "smithy.api#documentation": "

The mode for generating association compliance. You can specify AUTO or\n MANUAL. In AUTO mode, the system uses the status of the association\n execution to determine the compliance status. If the association execution runs successfully,\n then the association is COMPLIANT. If the association execution doesn't run\n successfully, the association is NON-COMPLIANT.

\n

In MANUAL mode, you must specify the AssociationId as a parameter\n for the PutComplianceItems API operation. In this case, compliance data isn't\n managed by State Manager, a tool in Amazon Web Services Systems Manager. It is managed by your direct call to the PutComplianceItems API operation.

\n

By default, all associations use AUTO mode.

" } }, "ApplyOnlyAtCronInterval": { @@ -6165,7 +6165,7 @@ "AutomationTargetParameterName": { "target": "com.amazonaws.ssm#AutomationTargetParameterName", "traits": { - "smithy.api#documentation": "

Choose the parameter that will define how your automation will branch out. This target is\n required for associations that use an Automation runbook and target resources by using rate\n controls. Automation is a capability of Amazon Web Services Systems Manager.

" + "smithy.api#documentation": "

Choose the parameter that will define how your automation will branch out. This target is\n required for associations that use an Automation runbook and target resources by using rate\n controls. Automation is a tool in Amazon Web Services Systems Manager.

" } }, "MaxErrors": { @@ -10327,7 +10327,7 @@ "target": "com.amazonaws.ssm#Integer", "traits": { "smithy.api#default": null, - "smithy.api#documentation": "

The number of managed nodes with NotApplicable patches beyond the supported\n limit, which aren't reported by name to Inventory. Inventory is a capability of Amazon Web Services Systems Manager.

" + "smithy.api#documentation": "

The number of managed nodes with NotApplicable patches beyond the supported\n limit, which aren't reported by name to Inventory. Inventory is a tool in Amazon Web Services Systems Manager.

" } }, "InstancesWithCriticalNonCompliantPatches": { @@ -10619,7 +10619,7 @@ } ], "traits": { - "smithy.api#documentation": "

Deletes the association between an OpsItem and a related item. For example, this API\n operation can delete an Incident Manager incident from an OpsItem. Incident Manager is a capability of\n Amazon Web Services Systems Manager.

" + "smithy.api#documentation": "

Deletes the association between an OpsItem and a related item. For example, this API\n operation can delete an Incident Manager incident from an OpsItem. Incident Manager is a tool in\n Amazon Web Services Systems Manager.

" } }, "com.amazonaws.ssm#DisassociateOpsItemRelatedItemRequest": { @@ -12220,7 +12220,7 @@ } ], "traits": { - "smithy.api#documentation": "

Gets the state of a Amazon Web Services Systems Manager change calendar at the current time or a specified time. If\n you specify a time, GetCalendarState returns the state of the calendar at that\n specific time, and returns the next time that the change calendar state will transition. If you\n don't specify a time, GetCalendarState uses the current time. Change Calendar\n entries have two possible states: OPEN or CLOSED.

\n

If you specify more than one calendar in a request, the command returns the status of\n OPEN only if all calendars in the request are open. If one or more calendars in the\n request are closed, the status returned is CLOSED.

\n

For more information about Change Calendar, a capability of Amazon Web Services Systems Manager, see Amazon Web Services Systems Manager Change Calendar in the Amazon Web Services Systems Manager User Guide.

" + "smithy.api#documentation": "

Gets the state of a Amazon Web Services Systems Manager change calendar at the current time or a specified time. If\n you specify a time, GetCalendarState returns the state of the calendar at that\n specific time, and returns the next time that the change calendar state will transition. If you\n don't specify a time, GetCalendarState uses the current time. Change Calendar\n entries have two possible states: OPEN or CLOSED.

\n

If you specify more than one calendar in a request, the command returns the status of\n OPEN only if all calendars in the request are open. If one or more calendars in the\n request are closed, the status returned is CLOSED.

\n

For more information about Change Calendar, a tool in Amazon Web Services Systems Manager, see Amazon Web Services Systems Manager Change Calendar in the Amazon Web Services Systems Manager User Guide.

" } }, "com.amazonaws.ssm#GetCalendarStateRequest": { @@ -12654,7 +12654,7 @@ } ], "traits": { - "smithy.api#documentation": "

Retrieves the current snapshot for the patch baseline the managed node uses. This API is\n primarily used by the AWS-RunPatchBaseline Systems Manager document (SSM document).

\n \n

If you run the command locally, such as with the Command Line Interface (CLI), the system attempts to use your local Amazon Web Services credentials and the operation fails. To avoid\n this, you can run the command in the Amazon Web Services Systems Manager console. Use Run Command, a capability of\n Amazon Web Services Systems Manager, with an SSM document that enables you to target a managed node with a script or\n command. For example, run the command using the AWS-RunShellScript document or the\n AWS-RunPowerShellScript document.

\n
" + "smithy.api#documentation": "

Retrieves the current snapshot for the patch baseline the managed node uses. This API is\n primarily used by the AWS-RunPatchBaseline Systems Manager document (SSM document).

\n \n

If you run the command locally, such as with the Command Line Interface (CLI), the system attempts to use your local Amazon Web Services credentials and the operation fails. To avoid\n this, you can run the command in the Amazon Web Services Systems Manager console. Use Run Command, a tool in Amazon Web Services Systems Manager,\n with an SSM document that enables you to target a managed node with a script or command. For\n example, run the command using the AWS-RunShellScript document or the\n AWS-RunPowerShellScript document.

\n
" } }, "com.amazonaws.ssm#GetDeployablePatchSnapshotForInstanceRequest": { @@ -14199,7 +14199,7 @@ } ], "traits": { - "smithy.api#documentation": "

Retrieve information about one or more parameters in a specific hierarchy.

\n

Request results are returned on a best-effort basis. If you specify MaxResults\n in the request, the response includes information up to the limit specified. The number of items\n returned, however, can be between zero and the value of MaxResults. If the service\n reaches an internal limit while processing the results, it stops the operation and returns the\n matching values up to that point and a NextToken. You can specify the\n NextToken in a subsequent call to get the next set of results.

", + "smithy.api#documentation": "

Retrieve information about one or more parameters under a specified level in a hierarchy.

\n

Request results are returned on a best-effort basis. If you specify MaxResults\n in the request, the response includes information up to the limit specified. The number of items\n returned, however, can be between zero and the value of MaxResults. If the service\n reaches an internal limit while processing the results, it stops the operation and returns the\n matching values up to that point and a NextToken. You can specify the\n NextToken in a subsequent call to get the next set of results.

", "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", @@ -15499,7 +15499,7 @@ "target": "com.amazonaws.ssm#PatchUnreportedNotApplicableCount", "traits": { "smithy.api#default": null, - "smithy.api#documentation": "

The number of patches beyond the supported limit of NotApplicableCount that\n aren't reported by name to Inventory. Inventory is a capability of Amazon Web Services Systems Manager.

" + "smithy.api#documentation": "

The number of patches beyond the supported limit of NotApplicableCount that\n aren't reported by name to Inventory. Inventory is a tool in Amazon Web Services Systems Manager.

" } }, "NotApplicableCount": { @@ -17900,7 +17900,7 @@ } ], "traits": { - "smithy.api#documentation": "

Returns all State Manager associations in the current Amazon Web Services account and Amazon Web Services Region. You\n can limit the results to a specific State Manager association document or managed node by\n specifying a filter. State Manager is a capability of Amazon Web Services Systems Manager.

", + "smithy.api#documentation": "

Returns all State Manager associations in the current Amazon Web Services account and Amazon Web Services Region. You\n can limit the results to a specific State Manager association document or managed node by\n specifying a filter. State Manager is a tool in Amazon Web Services Systems Manager.

", "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", @@ -19021,7 +19021,7 @@ } ], "traits": { - "smithy.api#documentation": "

Lists all related-item resources associated with a Systems Manager OpsCenter OpsItem. OpsCenter is a\n capability of Amazon Web Services Systems Manager.

", + "smithy.api#documentation": "

Lists all related-item resources associated with a Systems Manager OpsCenter OpsItem. OpsCenter is a\n tool in Amazon Web Services Systems Manager.

", "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", @@ -23617,7 +23617,7 @@ "PolicyType": { "target": "com.amazonaws.ssm#String", "traits": { - "smithy.api#documentation": "

The type of policy. Parameter Store, a capability of Amazon Web Services Systems Manager, supports the following\n policy types: Expiration, ExpirationNotification, and NoChangeNotification.

" + "smithy.api#documentation": "

The type of policy. Parameter Store, a tool in Amazon Web Services Systems Manager, supports the following policy\n types: Expiration, ExpirationNotification, and NoChangeNotification.

" } }, "PolicyStatus": { @@ -25690,7 +25690,7 @@ "Value": { "target": "com.amazonaws.ssm#PSParameterValue", "traits": { - "smithy.api#documentation": "

The parameter value that you want to add to the system. Standard parameters have a value\n limit of 4 KB. Advanced parameters have a value limit of 8 KB.

\n \n

Parameters can't be referenced or nested in the values of other parameters. You can't\n include {{}} or {{ssm:parameter-name}} in a\n parameter value.

\n
", + "smithy.api#documentation": "

The parameter value that you want to add to the system. Standard parameters have a value\n limit of 4 KB. Advanced parameters have a value limit of 8 KB.

\n \n

Parameters can't be referenced or nested in the values of other parameters. You can't\n include values wrapped in double brackets {{}} or\n {{ssm:parameter-name}} in a parameter value.

\n
", "smithy.api#required": {} } }, @@ -25734,7 +25734,7 @@ "Policies": { "target": "com.amazonaws.ssm#ParameterPolicies", "traits": { - "smithy.api#documentation": "

One or more policies to apply to a parameter. This operation takes a JSON array. Parameter\n Store, a capability of Amazon Web Services Systems Manager supports the following policy types:

\n

Expiration: This policy deletes the parameter after it expires. When you create the policy,\n you specify the expiration date. You can update the expiration date and time by updating the\n policy. Updating the parameter doesn't affect the expiration date and time.\n When the expiration time is reached, Parameter Store deletes the parameter.

\n

ExpirationNotification: This policy initiates an event in Amazon CloudWatch Events that\n notifies you about the expiration. By using this policy, you can receive notification before or\n after the expiration time is reached, in units of days or hours.

\n

NoChangeNotification: This policy initiates a CloudWatch Events event if a parameter hasn't\n been modified for a specified period of time. This policy type is useful when, for example, a\n secret needs to be changed within a period of time, but it hasn't been changed.

\n

All existing policies are preserved until you send new policies or an empty policy. For more\n information about parameter policies, see Assigning parameter\n policies.

" + "smithy.api#documentation": "

One or more policies to apply to a parameter. This operation takes a JSON array. Parameter\n Store, a tool in Amazon Web Services Systems Manager supports the following policy types:

\n

Expiration: This policy deletes the parameter after it expires. When you create the policy,\n you specify the expiration date. You can update the expiration date and time by updating the\n policy. Updating the parameter doesn't affect the expiration date and time.\n When the expiration time is reached, Parameter Store deletes the parameter.

\n

ExpirationNotification: This policy initiates an event in Amazon CloudWatch Events that\n notifies you about the expiration. By using this policy, you can receive notification before or\n after the expiration time is reached, in units of days or hours.

\n

NoChangeNotification: This policy initiates a CloudWatch Events event if a parameter hasn't\n been modified for a specified period of time. This policy type is useful when, for example, a\n secret needs to be changed within a period of time, but it hasn't been changed.

\n

All existing policies are preserved until you send new policies or an empty policy. For more\n information about parameter policies, see Assigning parameter\n policies.

" } }, "DataType": { @@ -27892,7 +27892,7 @@ "CloudWatchOutputConfig": { "target": "com.amazonaws.ssm#CloudWatchOutputConfig", "traits": { - "smithy.api#documentation": "

Enables Amazon Web Services Systems Manager to send Run Command output to Amazon CloudWatch Logs. Run Command is a\n capability of Amazon Web Services Systems Manager.

" + "smithy.api#documentation": "

Enables Amazon Web Services Systems Manager to send Run Command output to Amazon CloudWatch Logs. Run Command is a\n tool in Amazon Web Services Systems Manager.

" } }, "AlarmConfiguration": { @@ -28659,7 +28659,7 @@ "Tags": { "target": "com.amazonaws.ssm#TagList", "traits": { - "smithy.api#documentation": "

Optional metadata that you assign to a resource. You can specify a maximum of five tags for\n an automation. Tags enable you to categorize a resource in different ways, such as by purpose,\n owner, or environment. For example, you might want to tag an automation to identify an\n environment or operating system. In this case, you could specify the following key-value\n pairs:

\n
    \n
  • \n

    \n Key=environment,Value=test\n

    \n
  • \n
  • \n

    \n Key=OS,Value=Windows\n

    \n
  • \n
\n \n

To add tags to an existing automation, use the AddTagsToResource\n operation.

\n
" + "smithy.api#documentation": "

Optional metadata that you assign to a resource. You can specify a maximum of five tags for\n an automation. Tags enable you to categorize a resource in different ways, such as by purpose,\n owner, or environment. For example, you might want to tag an automation to identify an\n environment or operating system. In this case, you could specify the following key-value\n pairs:

\n
    \n
  • \n

    \n Key=environment,Value=test\n

    \n
  • \n
  • \n

    \n Key=OS,Value=Windows\n

    \n
  • \n
\n \n

The Array Members maximum value is reported as 1000. This number includes\n capacity reserved for internal operations. When calling the\n StartAutomationExecution action, you can specify a maximum of 5 tags. You can,\n however, use the AddTagsToResource action to add up to a total of 50 tags to\n an existing automation configuration.

\n
" } }, "AlarmConfiguration": { @@ -28785,7 +28785,7 @@ "Tags": { "target": "com.amazonaws.ssm#TagList", "traits": { - "smithy.api#documentation": "

Optional metadata that you assign to a resource. You can specify a maximum of five tags for\n a change request. Tags enable you to categorize a resource in different ways, such as by\n purpose, owner, or environment. For example, you might want to tag a change request to identify\n an environment or target Amazon Web Services Region. In this case, you could specify the following key-value\n pairs:

\n
    \n
  • \n

    \n Key=Environment,Value=Production\n

    \n
  • \n
  • \n

    \n Key=Region,Value=us-east-2\n

    \n
  • \n
" + "smithy.api#documentation": "

Optional metadata that you assign to a resource. You can specify a maximum of five tags for\n a change request. Tags enable you to categorize a resource in different ways, such as by\n purpose, owner, or environment. For example, you might want to tag a change request to identify\n an environment or target Amazon Web Services Region. In this case, you could specify the following key-value\n pairs:

\n
    \n
  • \n

    \n Key=Environment,Value=Production\n

    \n
  • \n
  • \n

    \n Key=Region,Value=us-east-2\n

    \n
  • \n
\n \n

The Array Members maximum value is reported as 1000. This number includes\n capacity reserved for internal operations. When calling the\n StartChangeRequestExecution action, you can specify a maximum of 5 tags. You can,\n however, use the AddTagsToResource action to add up to a total of 50 tags to\n an existing change request configuration.

\n
" } }, "ScheduledEndTime": { @@ -28940,7 +28940,7 @@ "Parameters": { "target": "com.amazonaws.ssm#SessionManagerParameters", "traits": { - "smithy.api#documentation": "

The values you want to specify for the parameters defined in the Session\n document.

" + "smithy.api#documentation": "

The values you want to specify for the parameters defined in the Session document.\n For more information about these parameters, see Create a\n Session Manager preferences document in the\n Amazon Web Services Systems Manager User Guide.

" } } }, @@ -29474,7 +29474,7 @@ } }, "traits": { - "smithy.api#documentation": "

An array of search criteria that targets managed nodes using a key-value pair that you\n specify.

\n \n

One or more targets must be specified for maintenance window Run Command-type tasks.\n Depending on the task, targets are optional for other maintenance window task types (Automation,\n Lambda, and Step Functions). For more information about running tasks\n that don't specify targets, see Registering\n maintenance window tasks without targets in the\n Amazon Web Services Systems Manager User Guide.

\n
\n

Supported formats include the following.

\n

\n For all Systems Manager capabilities:\n

\n
    \n
  • \n

    \n Key=tag-key,Values=tag-value-1,tag-value-2\n

    \n
  • \n
\n

\n For Automation and Change Manager:\n

\n
    \n
  • \n

    \n Key=tag:tag-key,Values=tag-value\n

    \n
  • \n
  • \n

    \n Key=ResourceGroup,Values=resource-group-name\n

    \n
  • \n
  • \n

    \n Key=ParameterValues,Values=value-1,value-2,value-3\n

    \n
  • \n
  • \n

    To target all instances in the Amazon Web Services Region:

    \n
      \n
    • \n

      \n Key=AWS::EC2::Instance,Values=*\n

      \n
    • \n
    • \n

      \n Key=InstanceIds,Values=*\n

      \n
    • \n
    \n
  • \n
\n

\n For Run Command and Maintenance Windows:\n

\n
    \n
  • \n

    \n Key=InstanceIds,Values=instance-id-1,instance-id-2,instance-id-3\n

    \n
  • \n
  • \n

    \n Key=tag:tag-key,Values=tag-value-1,tag-value-2\n

    \n
  • \n
  • \n

    \n Key=resource-groups:Name,Values=resource-group-name\n

    \n
  • \n
  • \n

    Additionally, Maintenance Windows support targeting resource types:

    \n
      \n
    • \n

      \n Key=resource-groups:ResourceTypeFilters,Values=resource-type-1,resource-type-2\n

      \n
    • \n
    \n
  • \n
\n

\n For State Manager:\n

\n
    \n
  • \n

    \n Key=InstanceIds,Values=instance-id-1,instance-id-2,instance-id-3\n

    \n
  • \n
  • \n

    \n Key=tag:tag-key,Values=tag-value-1,tag-value-2\n

    \n
  • \n
  • \n

    To target all instances in the Amazon Web Services Region:

    \n
      \n
    • \n

      \n Key=InstanceIds,Values=*\n

      \n
    • \n
    \n
  • \n
\n

For more information about how to send commands that target managed nodes using\n Key,Value parameters, see Targeting multiple managed nodes in the Amazon Web Services Systems Manager User Guide.

" + "smithy.api#documentation": "

An array of search criteria that targets managed nodes using a key-value pair that you\n specify.

\n \n

One or more targets must be specified for maintenance window Run Command-type tasks.\n Depending on the task, targets are optional for other maintenance window task types (Automation,\n Lambda, and Step Functions). For more information about running tasks\n that don't specify targets, see Registering\n maintenance window tasks without targets in the\n Amazon Web Services Systems Manager User Guide.

\n
\n

Supported formats include the following.

\n

\n For all Systems Manager tools:\n

\n
    \n
  • \n

    \n Key=tag-key,Values=tag-value-1,tag-value-2\n

    \n
  • \n
\n

\n For Automation and Change Manager:\n

\n
    \n
  • \n

    \n Key=tag:tag-key,Values=tag-value\n

    \n
  • \n
  • \n

    \n Key=ResourceGroup,Values=resource-group-name\n

    \n
  • \n
  • \n

    \n Key=ParameterValues,Values=value-1,value-2,value-3\n

    \n
  • \n
  • \n

    To target all instances in the Amazon Web Services Region:

    \n
      \n
    • \n

      \n Key=AWS::EC2::Instance,Values=*\n

      \n
    • \n
    • \n

      \n Key=InstanceIds,Values=*\n

      \n
    • \n
    \n
  • \n
\n

\n For Run Command and Maintenance Windows:\n

\n
    \n
  • \n

    \n Key=InstanceIds,Values=instance-id-1,instance-id-2,instance-id-3\n

    \n
  • \n
  • \n

    \n Key=tag:tag-key,Values=tag-value-1,tag-value-2\n

    \n
  • \n
  • \n

    \n Key=resource-groups:Name,Values=resource-group-name\n

    \n
  • \n
  • \n

    Additionally, Maintenance Windows support targeting resource types:

    \n
      \n
    • \n

      \n Key=resource-groups:ResourceTypeFilters,Values=resource-type-1,resource-type-2\n

      \n
    • \n
    \n
  • \n
\n

\n For State Manager:\n

\n
    \n
  • \n

    \n Key=InstanceIds,Values=instance-id-1,instance-id-2,instance-id-3\n

    \n
  • \n
  • \n

    \n Key=tag:tag-key,Values=tag-value-1,tag-value-2\n

    \n
  • \n
  • \n

    To target all instances in the Amazon Web Services Region:

    \n
      \n
    • \n

      \n Key=InstanceIds,Values=*\n

      \n
    • \n
    \n
  • \n
\n

For more information about how to send commands that target managed nodes using\n Key,Value parameters, see Targeting multiple managed nodes in the Amazon Web Services Systems Manager User Guide.

" } }, "com.amazonaws.ssm#TargetCount": { @@ -30141,7 +30141,7 @@ "Parameters": { "target": "com.amazonaws.ssm#Parameters", "traits": { - "smithy.api#documentation": "

The parameters you want to update for the association. If you create a parameter using\n Parameter Store, a capability of Amazon Web Services Systems Manager, you can reference the parameter using\n {{ssm:parameter-name}}.

" + "smithy.api#documentation": "

The parameters you want to update for the association. If you create a parameter using\n Parameter Store, a tool in Amazon Web Services Systems Manager, you can reference the parameter using\n {{ssm:parameter-name}}.

" } }, "DocumentVersion": { @@ -30189,7 +30189,7 @@ "AutomationTargetParameterName": { "target": "com.amazonaws.ssm#AutomationTargetParameterName", "traits": { - "smithy.api#documentation": "

Choose the parameter that will define how your automation will branch out. This target is\n required for associations that use an Automation runbook and target resources by using rate\n controls. Automation is a capability of Amazon Web Services Systems Manager.

" + "smithy.api#documentation": "

Choose the parameter that will define how your automation will branch out. This target is\n required for associations that use an Automation runbook and target resources by using rate\n controls. Automation is a tool in Amazon Web Services Systems Manager.

" } }, "MaxErrors": { @@ -30213,7 +30213,7 @@ "SyncCompliance": { "target": "com.amazonaws.ssm#AssociationSyncCompliance", "traits": { - "smithy.api#documentation": "

The mode for generating association compliance. You can specify AUTO or\n MANUAL. In AUTO mode, the system uses the status of the association\n execution to determine the compliance status. If the association execution runs successfully,\n then the association is COMPLIANT. If the association execution doesn't run\n successfully, the association is NON-COMPLIANT.

\n

In MANUAL mode, you must specify the AssociationId as a parameter\n for the PutComplianceItems API operation. In this case, compliance data isn't\n managed by State Manager, a capability of Amazon Web Services Systems Manager. It is managed by your direct call to the\n PutComplianceItems API operation.

\n

By default, all associations use AUTO mode.

" + "smithy.api#documentation": "

The mode for generating association compliance. You can specify AUTO or\n MANUAL. In AUTO mode, the system uses the status of the association\n execution to determine the compliance status. If the association execution runs successfully,\n then the association is COMPLIANT. If the association execution doesn't run\n successfully, the association is NON-COMPLIANT.

\n

In MANUAL mode, you must specify the AssociationId as a parameter\n for the PutComplianceItems API operation. In this case, compliance data isn't\n managed by State Manager, a tool in Amazon Web Services Systems Manager. It is managed by your direct call to the PutComplianceItems API operation.

\n

By default, all associations use AUTO mode.

" } }, "ApplyOnlyAtCronInterval": { diff --git a/models/sso-oidc.json b/models/sso-oidc.json index ad40043c8d..7cd97dd122 100644 --- a/models/sso-oidc.json +++ b/models/sso-oidc.json @@ -58,7 +58,7 @@ "name": "sso-oauth" }, "aws.protocols#restJson1": {}, - "smithy.api#documentation": "

IAM Identity Center OpenID Connect (OIDC) is a web service that enables a client (such as CLI\n or a native application) to register with IAM Identity Center. The service also enables the client to\n fetch the user’s access token upon successful authentication and authorization with\n IAM Identity Center.

\n \n

IAM Identity Center uses the sso and identitystore API namespaces.

\n
\n

\n Considerations for Using This Guide\n

\n

Before you begin using this guide, we recommend that you first review the following\n important information about how the IAM Identity Center OIDC service works.

\n
    \n
  • \n

    The IAM Identity Center OIDC service currently implements only the portions of the OAuth 2.0 Device\n Authorization Grant standard (https://tools.ietf.org/html/rfc8628) that are necessary to enable single\n sign-on authentication with the CLI.

    \n
  • \n
  • \n

    With older versions of the CLI, the service only emits OIDC access tokens, so to\n obtain a new token, users must explicitly re-authenticate. To access the OIDC flow that\n supports token refresh and doesn’t require re-authentication, update to the latest CLI\n version (1.27.10 for CLI V1 and 2.9.0 for CLI V2) with support for OIDC token refresh and\n configurable IAM Identity Center session durations. For more information, see Configure Amazon Web Services access portal session duration .

    \n
  • \n
  • \n

    The access tokens provided by this service grant access to all Amazon Web Services account\n entitlements assigned to an IAM Identity Center user, not just a particular application.

    \n
  • \n
  • \n

    The documentation in this guide does not describe the mechanism to convert the access\n token into Amazon Web Services Auth (“sigv4”) credentials for use with IAM-protected Amazon Web Services service\n endpoints. For more information, see GetRoleCredentials in the IAM Identity Center Portal API Reference\n Guide.

    \n
  • \n
\n

For general information about IAM Identity Center, see What is\n IAM Identity Center? in the IAM Identity Center User Guide.

", + "smithy.api#documentation": "

IAM Identity Center OpenID Connect (OIDC) is a web service that enables a client (such as CLI or a\n native application) to register with IAM Identity Center. The service also enables the client to fetch the\n user’s access token upon successful authentication and authorization with IAM Identity Center.

\n

\n API namespaces\n

\n

IAM Identity Center uses the sso and identitystore API namespaces. IAM Identity Center\n OpenID Connect uses the sso-oidc namespace.

\n

\n Considerations for using this guide\n

\n

Before you begin using this guide, we recommend that you first review the following\n important information about how the IAM Identity Center OIDC service works.

\n
    \n
  • \n

    The IAM Identity Center OIDC service currently implements only the portions of the OAuth 2.0 Device\n Authorization Grant standard (https://tools.ietf.org/html/rfc8628) that are necessary to enable single\n sign-on authentication with the CLI.

    \n
  • \n
  • \n

    With older versions of the CLI, the service only emits OIDC access tokens, so to\n obtain a new token, users must explicitly re-authenticate. To access the OIDC flow that\n supports token refresh and doesn’t require re-authentication, update to the latest CLI\n version (1.27.10 for CLI V1 and 2.9.0 for CLI V2) with support for OIDC token refresh\n and configurable IAM Identity Center session durations. For more information, see Configure Amazon Web Services access portal session duration .

    \n
  • \n
  • \n

    The access tokens provided by this service grant access to all Amazon Web Services account\n entitlements assigned to an IAM Identity Center user, not just a particular application.

    \n
  • \n
  • \n

    The documentation in this guide does not describe the mechanism to convert the access\n token into Amazon Web Services Auth (“sigv4”) credentials for use with IAM-protected Amazon Web Services service\n endpoints. For more information, see GetRoleCredentials in the IAM Identity Center Portal API Reference\n Guide.

    \n
  • \n
\n

For general information about IAM Identity Center, see What is\n IAM Identity Center? in the IAM Identity Center User Guide.

", "smithy.api#title": "AWS SSO OIDC", "smithy.rules#endpointRuleSet": { "version": "1.0", @@ -968,13 +968,13 @@ "error": { "target": "com.amazonaws.ssooidc#Error", "traits": { - "smithy.api#documentation": "

Single error code.\n For this exception the value will be access_denied.

" + "smithy.api#documentation": "

Single error code. For this exception the value will be access_denied.

" } }, "error_description": { "target": "com.amazonaws.ssooidc#ErrorDescription", "traits": { - "smithy.api#documentation": "

Human-readable text providing additional information, used to assist the\n client developer in understanding the error that occurred.

" + "smithy.api#documentation": "

Human-readable text providing additional information, used to assist the client developer\n in understanding the error that occurred.

" } } }, @@ -1008,13 +1008,13 @@ "error": { "target": "com.amazonaws.ssooidc#Error", "traits": { - "smithy.api#documentation": "

Single error code.\n For this exception the value will be authorization_pending.

" + "smithy.api#documentation": "

Single error code. For this exception the value will be\n authorization_pending.

" } }, "error_description": { "target": "com.amazonaws.ssooidc#ErrorDescription", "traits": { - "smithy.api#documentation": "

Human-readable text providing additional information, used to assist the\n client developer in understanding the error that occurred.

" + "smithy.api#documentation": "

Human-readable text providing additional information, used to assist the client developer\n in understanding the error that occurred.

" } } }, @@ -1090,7 +1090,7 @@ ], "traits": { "smithy.api#auth": [], - "smithy.api#documentation": "

Creates and returns access and refresh tokens for clients that are authenticated using\n client secrets. The access token can be used to fetch short-term credentials for the assigned\n AWS accounts or to access application APIs using bearer authentication.

", + "smithy.api#documentation": "

Creates and returns access and refresh tokens for clients that are authenticated using\n client secrets. The access token can be used to fetch short-lived credentials for the assigned\n AWS accounts or to access application APIs using bearer authentication.

", "smithy.api#examples": [ { "title": "Call OAuth/OIDC /token endpoint for Device Code grant with Secret authentication", @@ -1156,44 +1156,44 @@ "grantType": { "target": "com.amazonaws.ssooidc#GrantType", "traits": { - "smithy.api#documentation": "

Supports the following OAuth grant types: Device Code and Refresh Token.\n Specify either of the following values, depending on the grant type that you want:

\n

* Device Code - urn:ietf:params:oauth:grant-type:device_code\n

\n

* Refresh Token - refresh_token\n

\n

For information about how to obtain the device code, see the StartDeviceAuthorization topic.

", + "smithy.api#documentation": "

Supports the following OAuth grant types: Authorization Code, Device Code, and Refresh\n Token. Specify one of the following values, depending on the grant type that you want:

\n

* Authorization Code - authorization_code\n

\n

* Device Code - urn:ietf:params:oauth:grant-type:device_code\n

\n

* Refresh Token - refresh_token\n

", "smithy.api#required": {} } }, "deviceCode": { "target": "com.amazonaws.ssooidc#DeviceCode", "traits": { - "smithy.api#documentation": "

Used only when calling this API for the Device Code grant type. This short-term code is\n used to identify this authorization request. This comes from the result of the\n StartDeviceAuthorization API.

" + "smithy.api#documentation": "

Used only when calling this API for the Device Code grant type. This short-lived code is\n used to identify this authorization request. This comes from the result of the StartDeviceAuthorization API.

" } }, "code": { "target": "com.amazonaws.ssooidc#AuthCode", "traits": { - "smithy.api#documentation": "

Used only when calling this API for the Authorization Code grant type. The short-term code is\n used to identify this authorization request. This grant type is currently unsupported for the\n CreateToken API.

" + "smithy.api#documentation": "

Used only when calling this API for the Authorization Code grant type. The short-lived\n code is used to identify this authorization request.

" } }, "refreshToken": { "target": "com.amazonaws.ssooidc#RefreshToken", "traits": { - "smithy.api#documentation": "

Used only when calling this API for the Refresh Token grant type. This token is used to\n refresh short-term tokens, such as the access token, that might expire.

\n

For more information about the features and limitations of the current IAM Identity Center OIDC\n implementation, see Considerations for Using this Guide in the IAM Identity Center\n OIDC API Reference.

" + "smithy.api#documentation": "

Used only when calling this API for the Refresh Token grant type. This token is used to\n refresh short-lived tokens, such as the access token, that might expire.

\n

For more information about the features and limitations of the current IAM Identity Center OIDC\n implementation, see Considerations for Using this Guide in the IAM Identity Center\n OIDC API Reference.

" } }, "scope": { "target": "com.amazonaws.ssooidc#Scopes", "traits": { - "smithy.api#documentation": "

The list of scopes for which authorization is requested. The access token that is issued\n is limited to the scopes that are granted. If this value is not specified, IAM Identity Center authorizes\n all scopes that are configured for the client during the call to\n RegisterClient.

" + "smithy.api#documentation": "

The list of scopes for which authorization is requested. The access token that is issued\n is limited to the scopes that are granted. If this value is not specified, IAM Identity Center authorizes\n all scopes that are configured for the client during the call to RegisterClient.

" } }, "redirectUri": { "target": "com.amazonaws.ssooidc#URI", "traits": { - "smithy.api#documentation": "

Used only when calling this API for the Authorization Code grant type. This value specifies\n the location of the client or application that has registered to receive the authorization\n code.

" + "smithy.api#documentation": "

Used only when calling this API for the Authorization Code grant type. This value\n specifies the location of the client or application that has registered to receive the\n authorization code.

" } }, "codeVerifier": { "target": "com.amazonaws.ssooidc#CodeVerifier", "traits": { - "smithy.api#documentation": "

Used only when calling this API for the Authorization Code grant type. This value is generated\n by the client and presented to validate the original code challenge value the client passed at\n authorization time.

" + "smithy.api#documentation": "

Used only when calling this API for the Authorization Code grant type. This value is\n generated by the client and presented to validate the original code challenge value the client\n passed at authorization time.

" } } }, @@ -1226,13 +1226,13 @@ "refreshToken": { "target": "com.amazonaws.ssooidc#RefreshToken", "traits": { - "smithy.api#documentation": "

A token that, if present, can be used to refresh a previously issued access token that\n might have expired.

\n

For more\n information about the features and limitations of the current IAM Identity Center OIDC implementation,\n see Considerations for Using this Guide in the IAM Identity Center\n OIDC API Reference.

" + "smithy.api#documentation": "

A token that, if present, can be used to refresh a previously issued access token that\n might have expired.

\n

For more information about the features and limitations of the current IAM Identity Center OIDC\n implementation, see Considerations for Using this Guide in the IAM Identity Center\n OIDC API Reference.

" } }, "idToken": { "target": "com.amazonaws.ssooidc#IdToken", "traits": { - "smithy.api#documentation": "

The idToken is not implemented or supported. For more information about the\n features and limitations of the current IAM Identity Center OIDC implementation, see Considerations\n for Using this Guide in the IAM Identity Center\n OIDC API Reference.

\n

A JSON Web Token (JWT) that identifies who is associated with the issued access token.\n

" + "smithy.api#documentation": "

The idToken is not implemented or supported. For more information about the\n features and limitations of the current IAM Identity Center OIDC implementation, see\n Considerations for Using this Guide in the IAM Identity Center\n OIDC API Reference.

\n

A JSON Web Token (JWT) that identifies who is associated with the issued access token.\n

" } } }, @@ -1287,7 +1287,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates and returns access and refresh tokens for clients and applications that are\n authenticated using IAM entities. The access token can be used to fetch short-term credentials\n for the assigned Amazon Web Services accounts or to access application APIs using bearer\n authentication.

", + "smithy.api#documentation": "

Creates and returns access and refresh tokens for clients and applications that are\n authenticated using IAM entities. The access token can be used to fetch short-lived\n credentials for the assigned Amazon Web Services accounts or to access application APIs using\n bearer authentication.

", "smithy.api#examples": [ { "title": "Call OAuth/OIDC /token endpoint for Authorization Code grant with IAM authentication", @@ -1318,18 +1318,19 @@ } }, { - "title": "Call OAuth/OIDC /token endpoint for Refresh Token grant with IAM authentication", + "title": "Call OAuth/OIDC /token endpoint for JWT Bearer grant with IAM authentication", "documentation": "", "input": { "clientId": "arn:aws:sso::123456789012:application/ssoins-111111111111/apl-222222222222", - "grantType": "refresh_token", - "refreshToken": "aorvJYubGpU6i91YnH7Mfo-AT2fIVa1zCfA_Rvq9yjVKIP3onFmmykuQ7E93y2I-9Nyj-A_sVvMufaLNL0bqnDRtgAkc0:MGUCMFrRsktMRVlWaOR70XGMFGLL0SlcCw4DiYveIiOVx1uK9BbD0gvAddsW3UTLozXKMgIxAJ3qxUvjpnlLIOaaKOoa/FuNgqJVvr9GMwDtnAtlh9iZzAkEXAMPLEREFRESHTOKEN" + "grantType": "urn:ietf:params:oauth:grant-type:jwt-bearer", + "assertion": "eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiIsImtpZCI6IjFMVE16YWtpaGlSbGFfOHoyQkVKVlhlV01xbyJ9.eyJ2ZXIiOiIyLjAiLCJpc3MiOiJodHRwczovL2xvZ2luLm1pY3Jvc29mdG9ubGluZS5jb20vOTEyMjA0MGQtNmM2Ny00YzViLWIxMTItMzZhMzA0YjY2ZGFkL3YyLjAiLCJzdWIiOiJBQUFBQUFBQUFBQUFBQUFBQUFBQUFJa3pxRlZyU2FTYUZIeTc4MmJidGFRIiwiYXVkIjoiNmNiMDQwMTgtYTNmNS00NmE3LWI5OTUtOTQwYzc4ZjVhZWYzIiwiZXhwIjoxNTM2MzYxNDExLCJpYXQiOjE1MzYyNzQ3MTEsIm5iZiI6MTUzNjI3NDcxMSwibmFtZSI6IkFiZSBMaW5jb2xuIiwicHJlZmVycmVkX3VzZXJuYW1lIjoiQWJlTGlAbWljcm9zb2Z0LmNvbSIsIm9pZCI6IjAwMDAwMDAwLTAwMDAtMDAwMC02NmYzLTMzMzJlY2E3ZWE4MSIsInRpZCI6IjkxMjIwNDBkLTZjNjctNGM1Yi1iMTEyLTM2YTMwNGI2NmRhZCIsIm5vbmNlIjoiMTIzNTIzIiwiYWlvIjoiRGYyVVZYTDFpeCFsTUNXTVNPSkJjRmF0emNHZnZGR2hqS3Y4cTVnMHg3MzJkUjVNQjVCaXN2R1FPN1lXQnlqZDhpUURMcSFlR2JJRGFreXA1bW5PcmNkcUhlWVNubHRlcFFtUnA2QUlaOGpZIn0.1AFWW-Ck5nROwSlltm7GzZvDwUkqvhSQpm55TQsmVo9Y59cLhRXpvB8n-55HCr9Z6G_31_UbeUkoz612I2j_Sm9FFShSDDjoaLQr54CreGIJvjtmS3EkK9a7SJBbcpL1MpUtlfygow39tFjY7EVNW9plWUvRrTgVk7lYLprvfzw-CIqw3gHC-T7IK_m_xkr08INERBtaecwhTeN4chPC4W3jdmw_lIxzC48YoQ0dB1L9-ImX98Egypfrlbm0IBL5spFzL6JDZIRRJOu8vecJvj1mq-IUhGt0MacxX8jdxYLP-KUu2d9MbNKpCKJuZ7p8gwTL5B7NlUdh_dmSviPWrw" }, "output": { "accessToken": "aoal-YigITUDiNX1xZwOMXM5MxOWDL0E0jg9P6_C_jKQPxS_SKCP6f0kh1Up4g7TtvQqkMnD-GJiU_S1gvug6SrggAkc0:MGYCMQD3IatVjV7jAJU91kK3PkS/SfA2wtgWzOgZWDOR7sDGN9t0phCZz5It/aes/3C1Zj0CMQCKWOgRaiz6AIhza3DSXQNMLjRKXC8F8ceCsHlgYLMZ7hZidEXAMPLEACCESSTOKEN", "tokenType": "Bearer", "expiresIn": 1579729529, "refreshToken": "aorvJYubGpU6i91YnH7Mfo-AT2fIVa1zCfA_Rvq9yjVKIP3onFmmykuQ7E93y2I-9Nyj-A_sVvMufaLNL0bqnDRtgAkc0:MGUCMFrRsktMRVlWaOR70XGMFGLL0SlcCw4DiYveIiOVx1uK9BbD0gvAddsW3UTLozXKMgIxAJ3qxUvjpnlLIOaaKOoa/FuNgqJVvr9GMwDtnAtlh9iZzAkEXAMPLEREFRESHTOKEN", + "idToken": "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJhd3M6aWRlbnRpdHlfc3RvcmVfaWQiOiJkLTMzMzMzMzMzMzMiLCJzdWIiOiI3MzA0NDhmMi1lMGExLTcwYTctYzk1NC0wMDAwMDAwMDAwMDAiLCJhd3M6aW5zdGFuY2VfYWNjb3VudCI6IjExMTExMTExMTExMSIsInN0czppZGVudGl0eV9jb250ZXh0IjoiRVhBTVBMRUlERU5USVRZQ09OVEVYVCIsInN0czphdWRpdF9jb250ZXh0IjoiRVhBTVBMRUFVRElUQ09OVEVYVCIsImlzcyI6Imh0dHBzOi8vaWRlbnRpdHljZW50ZXIuYW1hem9uYXdzLmNvbS9zc29pbnMtMTExMTExMTExMTExIiwiYXdzOmlkZW50aXR5X3N0b3JlX2FybiI6ImFybjphd3M6aWRlbnRpdHlzdG9yZTo6MTExMTExMTExMTExOmlkZW50aXR5c3RvcmUvZC0zMzMzMzMzMzMzIiwiYXVkIjoiYXJuOmF3czpzc286OjEyMzQ1Njc4OTAxMjphcHBsaWNhdGlvbi9zc29pbnMtMTExMTExMTExMTExL2FwbC0yMjIyMjIyMjIyMjIiLCJhd3M6aW5zdGFuY2VfYXJuIjoiYXJuOmF3czpzc286OjppbnN0YW5jZS9zc29pbnMtMTExMTExMTExMTExIiwiYXdzOmNyZWRlbnRpYWxfaWQiOiJfWlIyTjZhVkJqMjdGUEtheWpfcEtwVjc3QVBERl80MXB4ZXRfWWpJdUpONlVJR2RBdkpFWEFNUExFQ1JFRElEIiwiYXV0aF90aW1lIjoiMjAyMC0wMS0yMlQxMjo0NToyOVoiLCJleHAiOjE1Nzk3Mjk1MjksImlhdCI6MTU3OTcyNTkyOX0.Xyah6qbk78qThzJ41iFU2yfGuRqqtKXHrJYwQ8L9Ip0", "issuedTokenType": "urn:ietf:params:oauth:token-type:refresh_token", "scope": [ "openid", @@ -1339,19 +1340,18 @@ } }, { - "title": "Call OAuth/OIDC /token endpoint for JWT Bearer grant with IAM authentication", + "title": "Call OAuth/OIDC /token endpoint for Refresh Token grant with IAM authentication", "documentation": "", "input": { "clientId": "arn:aws:sso::123456789012:application/ssoins-111111111111/apl-222222222222", - "grantType": "urn:ietf:params:oauth:grant-type:jwt-bearer", - "assertion": "eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiIsImtpZCI6IjFMVE16YWtpaGlSbGFfOHoyQkVKVlhlV01xbyJ9.eyJ2ZXIiOiIyLjAiLCJpc3MiOiJodHRwczovL2xvZ2luLm1pY3Jvc29mdG9ubGluZS5jb20vOTEyMjA0MGQtNmM2Ny00YzViLWIxMTItMzZhMzA0YjY2ZGFkL3YyLjAiLCJzdWIiOiJBQUFBQUFBQUFBQUFBQUFBQUFBQUFJa3pxRlZyU2FTYUZIeTc4MmJidGFRIiwiYXVkIjoiNmNiMDQwMTgtYTNmNS00NmE3LWI5OTUtOTQwYzc4ZjVhZWYzIiwiZXhwIjoxNTM2MzYxNDExLCJpYXQiOjE1MzYyNzQ3MTEsIm5iZiI6MTUzNjI3NDcxMSwibmFtZSI6IkFiZSBMaW5jb2xuIiwicHJlZmVycmVkX3VzZXJuYW1lIjoiQWJlTGlAbWljcm9zb2Z0LmNvbSIsIm9pZCI6IjAwMDAwMDAwLTAwMDAtMDAwMC02NmYzLTMzMzJlY2E3ZWE4MSIsInRpZCI6IjkxMjIwNDBkLTZjNjctNGM1Yi1iMTEyLTM2YTMwNGI2NmRhZCIsIm5vbmNlIjoiMTIzNTIzIiwiYWlvIjoiRGYyVVZYTDFpeCFsTUNXTVNPSkJjRmF0emNHZnZGR2hqS3Y4cTVnMHg3MzJkUjVNQjVCaXN2R1FPN1lXQnlqZDhpUURMcSFlR2JJRGFreXA1bW5PcmNkcUhlWVNubHRlcFFtUnA2QUlaOGpZIn0.1AFWW-Ck5nROwSlltm7GzZvDwUkqvhSQpm55TQsmVo9Y59cLhRXpvB8n-55HCr9Z6G_31_UbeUkoz612I2j_Sm9FFShSDDjoaLQr54CreGIJvjtmS3EkK9a7SJBbcpL1MpUtlfygow39tFjY7EVNW9plWUvRrTgVk7lYLprvfzw-CIqw3gHC-T7IK_m_xkr08INERBtaecwhTeN4chPC4W3jdmw_lIxzC48YoQ0dB1L9-ImX98Egypfrlbm0IBL5spFzL6JDZIRRJOu8vecJvj1mq-IUhGt0MacxX8jdxYLP-KUu2d9MbNKpCKJuZ7p8gwTL5B7NlUdh_dmSviPWrw" + "grantType": "refresh_token", + "refreshToken": "aorvJYubGpU6i91YnH7Mfo-AT2fIVa1zCfA_Rvq9yjVKIP3onFmmykuQ7E93y2I-9Nyj-A_sVvMufaLNL0bqnDRtgAkc0:MGUCMFrRsktMRVlWaOR70XGMFGLL0SlcCw4DiYveIiOVx1uK9BbD0gvAddsW3UTLozXKMgIxAJ3qxUvjpnlLIOaaKOoa/FuNgqJVvr9GMwDtnAtlh9iZzAkEXAMPLEREFRESHTOKEN" }, "output": { "accessToken": "aoal-YigITUDiNX1xZwOMXM5MxOWDL0E0jg9P6_C_jKQPxS_SKCP6f0kh1Up4g7TtvQqkMnD-GJiU_S1gvug6SrggAkc0:MGYCMQD3IatVjV7jAJU91kK3PkS/SfA2wtgWzOgZWDOR7sDGN9t0phCZz5It/aes/3C1Zj0CMQCKWOgRaiz6AIhza3DSXQNMLjRKXC8F8ceCsHlgYLMZ7hZidEXAMPLEACCESSTOKEN", "tokenType": "Bearer", "expiresIn": 1579729529, "refreshToken": "aorvJYubGpU6i91YnH7Mfo-AT2fIVa1zCfA_Rvq9yjVKIP3onFmmykuQ7E93y2I-9Nyj-A_sVvMufaLNL0bqnDRtgAkc0:MGUCMFrRsktMRVlWaOR70XGMFGLL0SlcCw4DiYveIiOVx1uK9BbD0gvAddsW3UTLozXKMgIxAJ3qxUvjpnlLIOaaKOoa/FuNgqJVvr9GMwDtnAtlh9iZzAkEXAMPLEREFRESHTOKEN", - "idToken": "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJhd3M6aWRlbnRpdHlfc3RvcmVfaWQiOiJkLTMzMzMzMzMzMzMiLCJzdWIiOiI3MzA0NDhmMi1lMGExLTcwYTctYzk1NC0wMDAwMDAwMDAwMDAiLCJhd3M6aW5zdGFuY2VfYWNjb3VudCI6IjExMTExMTExMTExMSIsInN0czppZGVudGl0eV9jb250ZXh0IjoiRVhBTVBMRUlERU5USVRZQ09OVEVYVCIsInN0czphdWRpdF9jb250ZXh0IjoiRVhBTVBMRUFVRElUQ09OVEVYVCIsImlzcyI6Imh0dHBzOi8vaWRlbnRpdHljZW50ZXIuYW1hem9uYXdzLmNvbS9zc29pbnMtMTExMTExMTExMTExIiwiYXdzOmlkZW50aXR5X3N0b3JlX2FybiI6ImFybjphd3M6aWRlbnRpdHlzdG9yZTo6MTExMTExMTExMTExOmlkZW50aXR5c3RvcmUvZC0zMzMzMzMzMzMzIiwiYXVkIjoiYXJuOmF3czpzc286OjEyMzQ1Njc4OTAxMjphcHBsaWNhdGlvbi9zc29pbnMtMTExMTExMTExMTExL2FwbC0yMjIyMjIyMjIyMjIiLCJhd3M6aW5zdGFuY2VfYXJuIjoiYXJuOmF3czpzc286OjppbnN0YW5jZS9zc29pbnMtMTExMTExMTExMTExIiwiYXdzOmNyZWRlbnRpYWxfaWQiOiJfWlIyTjZhVkJqMjdGUEtheWpfcEtwVjc3QVBERl80MXB4ZXRfWWpJdUpONlVJR2RBdkpFWEFNUExFQ1JFRElEIiwiYXV0aF90aW1lIjoiMjAyMC0wMS0yMlQxMjo0NToyOVoiLCJleHAiOjE1Nzk3Mjk1MjksImlhdCI6MTU3OTcyNTkyOX0.Xyah6qbk78qThzJ41iFU2yfGuRqqtKXHrJYwQ8L9Ip0", "issuedTokenType": "urn:ietf:params:oauth:token-type:refresh_token", "scope": [ "openid", @@ -1411,37 +1411,37 @@ "code": { "target": "com.amazonaws.ssooidc#AuthCode", "traits": { - "smithy.api#documentation": "

Used only when calling this API for the Authorization Code grant type. This short-term\n code is used to identify this authorization request. The code is obtained through a redirect\n from IAM Identity Center to a redirect URI persisted in the Authorization Code GrantOptions for the\n application.

" + "smithy.api#documentation": "

Used only when calling this API for the Authorization Code grant type. This short-lived\n code is used to identify this authorization request. The code is obtained through a redirect\n from IAM Identity Center to a redirect URI persisted in the Authorization Code GrantOptions for the\n application.

" } }, "refreshToken": { "target": "com.amazonaws.ssooidc#RefreshToken", "traits": { - "smithy.api#documentation": "

Used only when calling this API for the Refresh Token grant type. This token is used to\n refresh short-term tokens, such as the access token, that might expire.

\n

For more information about the features and limitations of the current IAM Identity Center OIDC\n implementation, see Considerations for Using this Guide in the IAM Identity Center\n OIDC API Reference.

" + "smithy.api#documentation": "

Used only when calling this API for the Refresh Token grant type. This token is used to\n refresh short-lived tokens, such as the access token, that might expire.

\n

For more information about the features and limitations of the current IAM Identity Center OIDC\n implementation, see Considerations for Using this Guide in the IAM Identity Center\n OIDC API Reference.

" } }, "assertion": { "target": "com.amazonaws.ssooidc#Assertion", "traits": { - "smithy.api#documentation": "

Used only when calling this API for the JWT Bearer grant type. This value specifies the JSON\n Web Token (JWT) issued by a trusted token issuer. To authorize a trusted token issuer,\n configure the JWT Bearer GrantOptions for the application.

" + "smithy.api#documentation": "

Used only when calling this API for the JWT Bearer grant type. This value specifies the\n JSON Web Token (JWT) issued by a trusted token issuer. To authorize a trusted token issuer,\n configure the JWT Bearer GrantOptions for the application.

" } }, "scope": { "target": "com.amazonaws.ssooidc#Scopes", "traits": { - "smithy.api#documentation": "

The list of scopes for which authorization is requested. The access token that is issued\n is limited to the scopes that are granted. If the value is not specified, IAM Identity Center authorizes all\n scopes configured for the application, including the following default scopes:\n openid, aws, sts:identity_context.

" + "smithy.api#documentation": "

The list of scopes for which authorization is requested. The access token that is issued\n is limited to the scopes that are granted. If the value is not specified, IAM Identity Center authorizes all\n scopes configured for the application, including the following default scopes:\n openid, aws, sts:identity_context.

" } }, "redirectUri": { "target": "com.amazonaws.ssooidc#URI", "traits": { - "smithy.api#documentation": "

Used only when calling this API for the Authorization Code grant type. This value specifies\n the location of the client or application that has registered to receive the authorization code.\n

" + "smithy.api#documentation": "

Used only when calling this API for the Authorization Code grant type. This value\n specifies the location of the client or application that has registered to receive the\n authorization code.

" } }, "subjectToken": { "target": "com.amazonaws.ssooidc#SubjectToken", "traits": { - "smithy.api#documentation": "

Used only when calling this API for the Token Exchange grant type. This value specifies\n the subject of the exchange. The value of the subject token must be an access token issued by\n IAM Identity Center to a different client or application. The access token must have authorized scopes\n that indicate the requested application as a target audience.

" + "smithy.api#documentation": "

Used only when calling this API for the Token Exchange grant type. This value specifies\n the subject of the exchange. The value of the subject token must be an access token issued by\n IAM Identity Center to a different client or application. The access token must have authorized scopes that\n indicate the requested application as a target audience.

" } }, "subjectTokenType": { @@ -1459,7 +1459,7 @@ "codeVerifier": { "target": "com.amazonaws.ssooidc#CodeVerifier", "traits": { - "smithy.api#documentation": "

Used only when calling this API for the Authorization Code grant type. This value is generated\n by the client and presented to validate the original code challenge value the client passed at\n authorization time.

" + "smithy.api#documentation": "

Used only when calling this API for the Authorization Code grant type. This value is\n generated by the client and presented to validate the original code challenge value the client\n passed at authorization time.

" } } }, @@ -1492,25 +1492,25 @@ "refreshToken": { "target": "com.amazonaws.ssooidc#RefreshToken", "traits": { - "smithy.api#documentation": "

A token that, if present, can be used to refresh a previously issued access token that\n might have expired.

\n

For more\n information about the features and limitations of the current IAM Identity Center OIDC implementation,\n see Considerations for Using this Guide in the IAM Identity Center\n OIDC API Reference.

" + "smithy.api#documentation": "

A token that, if present, can be used to refresh a previously issued access token that\n might have expired.

\n

For more information about the features and limitations of the current IAM Identity Center OIDC\n implementation, see Considerations for Using this Guide in the IAM Identity Center\n OIDC API Reference.

" } }, "idToken": { "target": "com.amazonaws.ssooidc#IdToken", "traits": { - "smithy.api#documentation": "

A JSON Web Token (JWT) that identifies the user associated with the issued access token.\n

" + "smithy.api#documentation": "

A JSON Web Token (JWT) that identifies the user associated with the issued access token.\n

" } }, "issuedTokenType": { "target": "com.amazonaws.ssooidc#TokenTypeURI", "traits": { - "smithy.api#documentation": "

Indicates the type of tokens that are issued by IAM Identity Center. The following values are supported:\n

\n

* Access Token - urn:ietf:params:oauth:token-type:access_token\n

\n

* Refresh Token - urn:ietf:params:oauth:token-type:refresh_token\n

" + "smithy.api#documentation": "

Indicates the type of tokens that are issued by IAM Identity Center. The following values are supported:

\n

* Access Token - urn:ietf:params:oauth:token-type:access_token\n

\n

* Refresh Token - urn:ietf:params:oauth:token-type:refresh_token\n

" } }, "scope": { "target": "com.amazonaws.ssooidc#Scopes", "traits": { - "smithy.api#documentation": "

The list of scopes for which authorization is granted. The access token that is issued\n is limited to the scopes that are granted.

" + "smithy.api#documentation": "

The list of scopes for which authorization is granted. The access token that is issued is\n limited to the scopes that are granted.

" } } }, @@ -1539,13 +1539,13 @@ "error": { "target": "com.amazonaws.ssooidc#Error", "traits": { - "smithy.api#documentation": "

Single error code.\n For this exception the value will be expired_token.

" + "smithy.api#documentation": "

Single error code. For this exception the value will be expired_token.

" } }, "error_description": { "target": "com.amazonaws.ssooidc#ErrorDescription", "traits": { - "smithy.api#documentation": "

Human-readable text providing additional information, used to assist the\n client developer in understanding the error that occurred.

" + "smithy.api#documentation": "

Human-readable text providing additional information, used to assist the client developer\n in understanding the error that occurred.

" } } }, @@ -1576,13 +1576,13 @@ "error": { "target": "com.amazonaws.ssooidc#Error", "traits": { - "smithy.api#documentation": "

Single error code.\n For this exception the value will be server_error.

" + "smithy.api#documentation": "

Single error code. For this exception the value will be server_error.

" } }, "error_description": { "target": "com.amazonaws.ssooidc#ErrorDescription", "traits": { - "smithy.api#documentation": "

Human-readable text providing additional information, used to assist the\n client developer in understanding the error that occurred.

" + "smithy.api#documentation": "

Human-readable text providing additional information, used to assist the client developer\n in understanding the error that occurred.

" } } }, @@ -1604,13 +1604,13 @@ "error": { "target": "com.amazonaws.ssooidc#Error", "traits": { - "smithy.api#documentation": "

Single error code.\n For this exception the value will be invalid_client.

" + "smithy.api#documentation": "

Single error code. For this exception the value will be\n invalid_client.

" } }, "error_description": { "target": "com.amazonaws.ssooidc#ErrorDescription", "traits": { - "smithy.api#documentation": "

Human-readable text providing additional information, used to assist the\n client developer in understanding the error that occurred.

" + "smithy.api#documentation": "

Human-readable text providing additional information, used to assist the client developer\n in understanding the error that occurred.

" } } }, @@ -1626,13 +1626,13 @@ "error": { "target": "com.amazonaws.ssooidc#Error", "traits": { - "smithy.api#documentation": "

Single error code.\n For this exception the value will be invalid_client_metadata.

" + "smithy.api#documentation": "

Single error code. For this exception the value will be\n invalid_client_metadata.

" } }, "error_description": { "target": "com.amazonaws.ssooidc#ErrorDescription", "traits": { - "smithy.api#documentation": "

Human-readable text providing additional information, used to assist the\n client developer in understanding the error that occurred.

" + "smithy.api#documentation": "

Human-readable text providing additional information, used to assist the client developer\n in understanding the error that occurred.

" } } }, @@ -1648,13 +1648,13 @@ "error": { "target": "com.amazonaws.ssooidc#Error", "traits": { - "smithy.api#documentation": "

Single error code.\n For this exception the value will be invalid_grant.

" + "smithy.api#documentation": "

Single error code. For this exception the value will be invalid_grant.

" } }, "error_description": { "target": "com.amazonaws.ssooidc#ErrorDescription", "traits": { - "smithy.api#documentation": "

Human-readable text providing additional information, used to assist the\n client developer in understanding the error that occurred.

" + "smithy.api#documentation": "

Human-readable text providing additional information, used to assist the client developer\n in understanding the error that occurred.

" } } }, @@ -1670,18 +1670,18 @@ "error": { "target": "com.amazonaws.ssooidc#Error", "traits": { - "smithy.api#documentation": "

Single error code.\n For this exception the value will be invalid_redirect_uri.

" + "smithy.api#documentation": "

Single error code. For this exception the value will be\n invalid_redirect_uri.

" } }, "error_description": { "target": "com.amazonaws.ssooidc#ErrorDescription", "traits": { - "smithy.api#documentation": "

Human-readable text providing additional information, used to assist the\n client developer in understanding the error that occurred.

" + "smithy.api#documentation": "

Human-readable text providing additional information, used to assist the client developer\n in understanding the error that occurred.

" } } }, "traits": { - "smithy.api#documentation": "

Indicates that one or more redirect URI in the request is not supported for this operation.

", + "smithy.api#documentation": "

Indicates that one or more redirect URI in the request is not supported for this\n operation.

", "smithy.api#error": "client", "smithy.api#httpError": 400 } @@ -1692,13 +1692,13 @@ "error": { "target": "com.amazonaws.ssooidc#Error", "traits": { - "smithy.api#documentation": "

Single error code.\n For this exception the value will be invalid_request.

" + "smithy.api#documentation": "

Single error code. For this exception the value will be\n invalid_request.

" } }, "error_description": { "target": "com.amazonaws.ssooidc#ErrorDescription", "traits": { - "smithy.api#documentation": "

Human-readable text providing additional information, used to assist the\n client developer in understanding the error that occurred.

" + "smithy.api#documentation": "

Human-readable text providing additional information, used to assist the client developer\n in understanding the error that occurred.

" } } }, @@ -1714,13 +1714,13 @@ "error": { "target": "com.amazonaws.ssooidc#Error", "traits": { - "smithy.api#documentation": "

Single error code.\n For this exception the value will be invalid_request.

" + "smithy.api#documentation": "

Single error code. For this exception the value will be\n invalid_request.

" } }, "error_description": { "target": "com.amazonaws.ssooidc#ErrorDescription", "traits": { - "smithy.api#documentation": "

Human-readable text providing additional information, used to assist the\n client developer in understanding the error that occurred.

" + "smithy.api#documentation": "

Human-readable text providing additional information, used to assist the client developer\n in understanding the error that occurred.

" } }, "endpoint": { @@ -1748,13 +1748,13 @@ "error": { "target": "com.amazonaws.ssooidc#Error", "traits": { - "smithy.api#documentation": "

Single error code.\n For this exception the value will be invalid_scope.

" + "smithy.api#documentation": "

Single error code. For this exception the value will be invalid_scope.

" } }, "error_description": { "target": "com.amazonaws.ssooidc#ErrorDescription", "traits": { - "smithy.api#documentation": "

Human-readable text providing additional information, used to assist the\n client developer in understanding the error that occurred.

" + "smithy.api#documentation": "

Human-readable text providing additional information, used to assist the client developer\n in understanding the error that occurred.

" } } }, @@ -1818,7 +1818,7 @@ ], "traits": { "smithy.api#auth": [], - "smithy.api#documentation": "

Registers a client with IAM Identity Center. This allows clients to initiate device authorization.\n The output should be persisted for reuse through many authentication requests.

", + "smithy.api#documentation": "

Registers a public client with IAM Identity Center. This allows clients to perform authorization using\n the authorization code grant with Proof Key for Code Exchange (PKCE) or the device\n code grant.

", "smithy.api#examples": [ { "title": "Call OAuth/OIDC /register-client endpoint", @@ -1888,19 +1888,19 @@ "grantTypes": { "target": "com.amazonaws.ssooidc#GrantTypes", "traits": { - "smithy.api#documentation": "

The list of OAuth 2.0 grant types that are defined by the client. This list is used to\n restrict the token granting flows available to the client.

" + "smithy.api#documentation": "

The list of OAuth 2.0 grant types that are defined by the client. This list is used to\n restrict the token granting flows available to the client. Supports the following OAuth 2.0\n grant types: Authorization Code, Device Code, and Refresh Token.

\n

* Authorization Code - authorization_code\n

\n

* Device Code - urn:ietf:params:oauth:grant-type:device_code\n

\n

* Refresh Token - refresh_token\n

" } }, "issuerUrl": { "target": "com.amazonaws.ssooidc#URI", "traits": { - "smithy.api#documentation": "

The IAM Identity Center Issuer URL associated with an instance of IAM Identity Center. This value is needed for user access to resources through the client.

" + "smithy.api#documentation": "

The IAM Identity Center Issuer URL associated with an instance of IAM Identity Center. This value is needed for user\n access to resources through the client.

" } }, "entitledApplicationArn": { "target": "com.amazonaws.ssooidc#ArnType", "traits": { - "smithy.api#documentation": "

This IAM Identity Center application ARN is used to define administrator-managed configuration for public client access to resources. At\n authorization, the scopes, grants, and redirect URI available to this client will be restricted by this application resource.

" + "smithy.api#documentation": "

This IAM Identity Center application ARN is used to define administrator-managed configuration for\n public client access to resources. At authorization, the scopes, grants, and redirect URI\n available to this client will be restricted by this application resource.

" } } }, @@ -1969,13 +1969,13 @@ "error": { "target": "com.amazonaws.ssooidc#Error", "traits": { - "smithy.api#documentation": "

Single error code.\n For this exception the value will be slow_down.

" + "smithy.api#documentation": "

Single error code. For this exception the value will be slow_down.

" } }, "error_description": { "target": "com.amazonaws.ssooidc#ErrorDescription", "traits": { - "smithy.api#documentation": "

Human-readable text providing additional information, used to assist the\n client developer in understanding the error that occurred.

" + "smithy.api#documentation": "

Human-readable text providing additional information, used to assist the client developer\n in understanding the error that occurred.

" } } }, @@ -2060,7 +2060,7 @@ "startUrl": { "target": "com.amazonaws.ssooidc#URI", "traits": { - "smithy.api#documentation": "

The URL for the Amazon Web Services access portal. For more information, see Using\n the Amazon Web Services access portal in the IAM Identity Center User Guide.

", + "smithy.api#documentation": "

The URL for the Amazon Web Services access portal. For more information, see Using\n the Amazon Web Services access portal in the IAM Identity Center User Guide.

", "smithy.api#required": {} } } @@ -2136,13 +2136,13 @@ "error": { "target": "com.amazonaws.ssooidc#Error", "traits": { - "smithy.api#documentation": "

Single error code.\n For this exception the value will be unauthorized_client.

" + "smithy.api#documentation": "

Single error code. For this exception the value will be\n unauthorized_client.

" } }, "error_description": { "target": "com.amazonaws.ssooidc#ErrorDescription", "traits": { - "smithy.api#documentation": "

Human-readable text providing additional information, used to assist the\n client developer in understanding the error that occurred.

" + "smithy.api#documentation": "

Human-readable text providing additional information, used to assist the client developer\n in understanding the error that occurred.

" } } }, @@ -2158,13 +2158,13 @@ "error": { "target": "com.amazonaws.ssooidc#Error", "traits": { - "smithy.api#documentation": "

Single error code.\n For this exception the value will be unsupported_grant_type.

" + "smithy.api#documentation": "

Single error code. For this exception the value will be\n unsupported_grant_type.

" } }, "error_description": { "target": "com.amazonaws.ssooidc#ErrorDescription", "traits": { - "smithy.api#documentation": "

Human-readable text providing additional information, used to assist the\n client developer in understanding the error that occurred.

" + "smithy.api#documentation": "

Human-readable text providing additional information, used to assist the client developer\n in understanding the error that occurred.

" } } }, diff --git a/models/sts.json b/models/sts.json index 71f4721012..d5e6ea3e91 100644 --- a/models/sts.json +++ b/models/sts.json @@ -2386,7 +2386,7 @@ "SourceIdentity": { "target": "com.amazonaws.sts#sourceIdentityType", "traits": { - "smithy.api#documentation": "

The source identity specified by the principal that is calling the\n AssumeRole operation. The source identity value persists across chained role sessions.

\n

You can require users to specify a source identity when they assume a role. You do this\n by using the \n sts:SourceIdentity\n condition key in a role trust policy. You\n can use source identity information in CloudTrail logs to determine who took actions with a\n role. You can use the aws:SourceIdentity condition key to further control\n access to Amazon Web Services resources based on the value of source identity. For more information about\n using source identity, see Monitor and control\n actions taken with assumed roles in the\n IAM User Guide.

\n

The regex used to validate this parameter is a string of characters consisting of upper-\n and lower-case alphanumeric characters with no spaces. You can also include underscores or\n any of the following characters: =,.@-. You cannot use a value that begins with the text\n aws:. This prefix is reserved for Amazon Web Services internal use.

" + "smithy.api#documentation": "

The source identity specified by the principal that is calling the\n AssumeRole operation. The source identity value persists across chained role sessions.

\n

You can require users to specify a source identity when they assume a role. You do this\n by using the \n sts:SourceIdentity\n condition key in a role trust policy. You\n can use source identity information in CloudTrail logs to determine who took actions with a\n role. You can use the aws:SourceIdentity condition key to further control\n access to Amazon Web Services resources based on the value of source identity. For more information about\n using source identity, see Monitor and control\n actions taken with assumed roles in the\n IAM User Guide.

\n

The regex used to validate this parameter is a string of characters consisting of upper-\n and lower-case alphanumeric characters with no spaces. You can also include underscores or\n any of the following characters: +=,.@-. You cannot use a value that begins with the text\n aws:. This prefix is reserved for Amazon Web Services internal use.

" } }, "ProvidedContexts": { @@ -2690,7 +2690,7 @@ "WebIdentityToken": { "target": "com.amazonaws.sts#clientTokenType", "traits": { - "smithy.api#documentation": "

The OAuth 2.0 access token or OpenID Connect ID token that is provided by the identity\n provider. Your application must get this token by authenticating the user who is using your\n application with a web identity provider before the application makes an\n AssumeRoleWithWebIdentity call. Timestamps in the token must be formatted\n as either an integer or a long integer. Only tokens with RSA algorithms (RS256) are\n supported.

", + "smithy.api#documentation": "

The OAuth 2.0 access token or OpenID Connect ID token that is provided by the identity\n provider. Your application must get this token by authenticating the user who is using your\n application with a web identity provider before the application makes an\n AssumeRoleWithWebIdentity call. Timestamps in the token must be formatted\n as either an integer or a long integer. Tokens must be signed using either RSA keys (RS256,\n RS384, or RS512) or ECDSA keys (ES256, ES384, or ES512).

", "smithy.api#required": {} } }, @@ -2791,7 +2791,7 @@ } ], "traits": { - "smithy.api#documentation": "

Returns a set of short term credentials you can use to perform privileged tasks in a\n member account.

\n

Before you can launch a privileged session, you must have enabled centralized root\n access in your organization. For steps to enable this feature, see Centralize root access for member accounts in the IAM User\n Guide.

\n \n

The global endpoint is not supported for AssumeRoot. You must send this request to a\n Regional STS endpoint. For more information, see Endpoints.

\n
\n

You can track AssumeRoot in CloudTrail logs to determine what actions were performed in a\n session. For more information, see Track privileged tasks\n in CloudTrail in the IAM User Guide.

", + "smithy.api#documentation": "

Returns a set of short term credentials you can use to perform privileged tasks on a\n member account in your organization.

\n

Before you can launch a privileged session, you must have centralized root access in\n your organization. For steps to enable this feature, see Centralize root access for\n member accounts in the IAM User Guide.

\n \n

The STS global endpoint is not supported for AssumeRoot. You must send this request\n to a Regional STS endpoint. For more information, see Endpoints.

\n
\n

You can track AssumeRoot in CloudTrail logs to determine what actions were performed in a\n session. For more information, see Track privileged tasks\n in CloudTrail in the IAM User Guide.

", "smithy.api#examples": [ { "title": "To launch a privileged session", @@ -2829,7 +2829,7 @@ "TaskPolicyArn": { "target": "com.amazonaws.sts#PolicyDescriptorType", "traits": { - "smithy.api#documentation": "

The identity based policy that scopes the session to the privileged tasks that can be\n performed. You can use one of following Amazon Web Services managed policies to scope\n root session actions. You can add additional customer managed policies to further limit the\n permissions for the root session.

\n ", + "smithy.api#documentation": "

The identity based policy that scopes the session to the privileged tasks that can be\n performed. You can use one of following Amazon Web Services managed policies to scope root session\n actions.

\n ", "smithy.api#required": {} } }, diff --git a/models/supplychain.json b/models/supplychain.json index 5651911a7d..4f3098a11b 100644 --- a/models/supplychain.json +++ b/models/supplychain.json @@ -792,7 +792,7 @@ "schema": { "target": "com.amazonaws.supplychain#DataLakeDatasetSchema", "traits": { - "smithy.api#documentation": "

The custom schema of the data lake dataset and is only required when the name space is default.

" + "smithy.api#documentation": "

The custom schema of the data lake dataset and is only required when the name space is default.

" } }, "description": { @@ -931,6 +931,12 @@ "smithy.api#documentation": "

The ARN (Amazon Resource Name) of the Key Management Service (KMS) key you provide for encryption. This is required if you do not want to use the Amazon Web Services owned KMS key. If you don't provide anything here, AWS Supply Chain uses the Amazon Web Services owned KMS key.

" } }, + "webAppDnsDomain": { + "target": "com.amazonaws.supplychain#InstanceWebAppDnsDomain", + "traits": { + "smithy.api#documentation": "

The DNS subdomain of the web app. This would be \"example\" in the URL \"example.scn.global.on.aws\". You can set this to a custom value, as long as the domain isn't already being used by someone else. The name may only include alphanumeric characters and hyphens.

" + } + }, "tags": { "target": "com.amazonaws.supplychain#TagMap", "traits": { @@ -1356,6 +1362,9 @@ "traits": { "aws.api#arn": { "template": "instance/{instanceId}/data-integration-flows/{name}" + }, + "aws.cloudformation#cfnResource": { + "name": "DataIntegrationFlow" } } }, @@ -1793,6 +1802,9 @@ "aws.api#arn": { "template": "instance/{instanceId}/namespaces/{namespace}/datasets/{name}" }, + "aws.cloudformation#cfnResource": { + "name": "DataLakeDataset" + }, "smithy.api#noReplace": {} } }, @@ -2088,7 +2100,7 @@ "namespace": { "target": "com.amazonaws.supplychain#DataLakeDatasetNamespace", "traits": { - "smithy.api#documentation": "

The namespace of the dataset. The available values are:

\n ", + "smithy.api#documentation": "

The name space of the dataset. The available values are:

\n ", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -2096,7 +2108,7 @@ "name": { "target": "com.amazonaws.supplychain#DataLakeDatasetName", "traits": { - "smithy.api#documentation": "

The name of the dataset. If the namespace is asc, the name must be one of the supported data entities\n .

", + "smithy.api#documentation": "

The name of the dataset. For asc name space, the name must be one of the supported data entities under https://docs.aws.amazon.com/aws-supply-chain/latest/userguide/data-model-asc.html.

", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -2120,7 +2132,7 @@ "namespace": { "target": "com.amazonaws.supplychain#DataLakeDatasetNamespace", "traits": { - "smithy.api#documentation": "

The namespace of deleted dataset.

", + "smithy.api#documentation": "

The name space of deleted dataset.

", "smithy.api#required": {} } }, @@ -2163,7 +2175,7 @@ } ], "traits": { - "smithy.api#documentation": "

Enables you to programmatically delete an Amazon Web Services Supply Chain instance by deleting the KMS keys and relevant information associated with the API without using the Amazon Web Services console.

\n

This is an asynchronous operation. Upon receiving a DeleteInstance request, Amazon Web Services Supply Chain immediately returns a response with the instance resource, delete state while cleaning up all Amazon Web Services resources created during \n the instance creation process. You can use the GetInstance action to check the instance status.

", + "smithy.api#documentation": "

Enables you to programmatically delete an Amazon Web Services Supply Chain instance by deleting the KMS keys and relevant information associated with the API without using the Amazon Web Services console.

\n

This is an asynchronous operation. Upon receiving a DeleteInstance request, Amazon Web Services Supply Chain immediately returns a response with the instance resource, delete state while cleaning up all Amazon Web Services resources created during the instance creation process. You can use the GetInstance action to check the instance status.

", "smithy.api#examples": [ { "title": "Successful DeleteInstance request", @@ -3808,6 +3820,9 @@ "traits": { "aws.api#arn": { "template": "instance/{instanceId}" + }, + "aws.cloudformation#cfnResource": { + "name": "Instance" } } }, @@ -3867,7 +3882,7 @@ "com.amazonaws.supplychain#InstanceWebAppDnsDomain": { "type": "string", "traits": { - "smithy.api#pattern": "^[A-Za-z0-9]+(.[A-Za-z0-9]+)+$" + "smithy.api#pattern": "^(?![-])[a-zA-Z0-9-]{1,62}[a-zA-Z0-9]$" } }, "com.amazonaws.supplychain#InternalServerException": { @@ -4363,7 +4378,7 @@ "namespace": { "target": "com.amazonaws.supplychain#DataLakeDatasetNamespace", "traits": { - "smithy.api#documentation": "

The namespace of the dataset. The available values are:

\n ", + "smithy.api#documentation": "

The name space of the dataset. The available values are:

\n ", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -4744,7 +4759,7 @@ } ], "traits": { - "smithy.api#documentation": "

Send the transactional data payload for the event with real-time data for analysis or monitoring. The real-time data events are stored in an Amazon Web Services service before being processed and stored in data lake. \n New data events are synced with data lake at 5 PM GMT everyday. The updated transactional data is available in data lake after ingestion.

", + "smithy.api#documentation": "

Send the transactional data payload for the event with real-time data for analysis or monitoring. The real-time data events are stored in an Amazon Web Services service before being processed and stored in data lake. New data events are synced with data lake at 5 PM GMT everyday. The updated transactional data is available in data lake after ingestion.

", "smithy.api#examples": [ { "title": "Successful SendDataIntegrationEvent for inboundorder event type", @@ -4971,7 +4986,7 @@ "data": { "target": "com.amazonaws.supplychain#DataIntegrationEventData", "traits": { - "smithy.api#documentation": "

The data payload of the event. For more information on the data schema to use, see Data entities supported in AWS Supply Chain\n .

", + "smithy.api#documentation": "

The data payload of the event. For more information on the data schema to use, see Data entities supported in AWS Supply Chain.

", "smithy.api#required": {} } }, @@ -5093,7 +5108,7 @@ } ], "traits": { - "smithy.api#documentation": "

You can create tags during or after creating a resource such as instance, data flow, or dataset in AWS Supply chain. During the data ingestion process, you can add tags such as dev, test, or prod to data flows \n created during the data ingestion process in the AWS Supply Chain datasets. You can use these tags to identify a group of resources or a single resource used by the developer.

", + "smithy.api#documentation": "

You can create tags during or after creating a resource such as instance, data flow, or dataset in AWS Supply chain. During the data ingestion process, you can add tags such as dev, test, or prod to data flows created during the data ingestion process in the AWS Supply Chain datasets. You can use these tags to identify a group of resources or a single resource used by the developer.

", "smithy.api#examples": [ { "title": "Successful TagResource", @@ -5206,7 +5221,7 @@ } ], "traits": { - "smithy.api#documentation": "

You can delete tags for an Amazon Web Services Supply chain resource such as instance, data flow, or dataset in AWS Supply Chain. During the data ingestion process, you can delete tags such as dev, test, or prod to data flows \n created during the data ingestion process in the AWS Supply Chain datasets.

", + "smithy.api#documentation": "

You can delete tags for an Amazon Web Services Supply chain resource such as instance, data flow, or dataset in AWS Supply Chain. During the data ingestion process, you can delete tags such as dev, test, or prod to data flows created during the data ingestion process in the AWS Supply Chain datasets.

", "smithy.api#examples": [ { "title": "Successful UntagResource", diff --git a/models/transcribe.json b/models/transcribe.json index 9446a7bc14..a04f3d22dc 100644 --- a/models/transcribe.json +++ b/models/transcribe.json @@ -247,6 +247,12 @@ "traits": { "smithy.api#documentation": "

Indicates which speaker is on which channel.

" } + }, + "Tags": { + "target": "com.amazonaws.transcribe#TagList", + "traits": { + "smithy.api#documentation": "

The tags, each in the form of a key:value pair, assigned to the specified\n call analytics job.

" + } } }, "traits": { @@ -507,6 +513,12 @@ "smithy.api#documentation": "

The date and time the specified Call Analytics category was last updated.

\n

Timestamps are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC. For\n example, 2022-05-05T12:45:32.691000-07:00 represents 12:45 PM UTC-7 on May\n 5, 2022.

" } }, + "Tags": { + "target": "com.amazonaws.transcribe#TagList", + "traits": { + "smithy.api#documentation": "

The tags, each in the form of a key:value pair, assigned to the specified\n call analytics category.

" + } + }, "InputType": { "target": "com.amazonaws.transcribe#InputType", "traits": { @@ -657,6 +669,12 @@ "smithy.api#required": {} } }, + "Tags": { + "target": "com.amazonaws.transcribe#TagList", + "traits": { + "smithy.api#documentation": "

Adds one or more custom tags, each in the form of a key:value pair, to a new\n call analytics category at the time you start this new job.

\n

To learn more about using tags with Amazon Transcribe, refer to Tagging\n resources.

" + } + }, "InputType": { "target": "com.amazonaws.transcribe#InputType", "traits": { @@ -5275,6 +5293,12 @@ "smithy.api#documentation": "

Specify additional optional settings in your request, including content redaction; allows you to apply custom language models,\n vocabulary filters, and custom vocabularies to your Call Analytics job.

" } }, + "Tags": { + "target": "com.amazonaws.transcribe#TagList", + "traits": { + "smithy.api#documentation": "

Adds one or more custom tags, each in the form of a key:value pair, to a new\n call analytics job at the time you start this new job.

\n

To learn more about using tags with Amazon Transcribe, refer to Tagging\n resources.

" + } + }, "ChannelDefinitions": { "target": "com.amazonaws.transcribe#ChannelDefinitions", "traits": { diff --git a/models/transfer.json b/models/transfer.json index fec686b645..18d7db616b 100644 --- a/models/transfer.json +++ b/models/transfer.json @@ -180,6 +180,12 @@ "traits": { "smithy.api#documentation": "

Provides Basic authentication support to the AS2 Connectors API. To use Basic authentication,\n you must provide the name or Amazon Resource Name (ARN) of a secret in Secrets Manager.

\n

The default value for this parameter is null, which indicates that Basic authentication is not enabled for the connector.

\n

If the connector should use Basic authentication, the secret needs to be in the following format:

\n

\n {\n \"Username\": \"user-name\",\n \"Password\": \"user-password\"\n }\n

\n

Replace user-name and user-password with the credentials for the actual user that is being authenticated.

\n

Note the following:

\n
    \n
  • \n

    You are storing these credentials in Secrets Manager, not passing them directly into this API.

    \n
  • \n
  • \n

    If you are using the API, SDKs, or CloudFormation to configure your connector, then you must create the secret before you can enable Basic authentication.\n However, if you are using the Amazon Web Services management console, you can have the system create the secret for you.

    \n
  • \n
\n

If you have previously enabled Basic authentication for a connector, you can disable it by using the UpdateConnector API call. For example, if you are using the CLI, you can run the following command to remove Basic authentication:

\n

\n update-connector --connector-id my-connector-id --as2-config 'BasicAuthSecretId=\"\"'\n

" } + }, + "PreserveContentType": { + "target": "com.amazonaws.transfer#PreserveContentType", + "traits": { + "smithy.api#documentation": "

Allows you to use the Amazon S3 Content-Type that is associated with objects in S3 instead of\n having the content type mapped based on the file extension. This parameter is enabled by default when you create an AS2 connector\n from the console, but disabled by default when you create an AS2 connector by calling the API directly.

" + } } }, "traits": { @@ -708,7 +714,7 @@ "iam:PassRole" ] }, - "smithy.api#documentation": "

Creates an agreement. An agreement is a bilateral trading partner agreement, or partnership,\n between an Transfer Family server and an AS2 process. The agreement defines the file and message\n transfer relationship between the server and the AS2 process. To define an agreement, Transfer Family\n combines a server, local profile, partner profile, certificate, and other\n attributes.

\n

The partner is identified with the PartnerProfileId, and the AS2 process is identified with the LocalProfileId.

" + "smithy.api#documentation": "

Creates an agreement. An agreement is a bilateral trading partner agreement, or partnership,\n between an Transfer Family server and an AS2 process. The agreement defines the file and message\n transfer relationship between the server and the AS2 process. To define an agreement, Transfer Family\n combines a server, local profile, partner profile, certificate, and other\n attributes.

\n

The partner is identified with the PartnerProfileId, and the AS2 process is identified with the LocalProfileId.

\n \n

Specify either\n BaseDirectory or CustomDirectories, but not both. Specifying both causes the command to fail.

\n
" } }, "com.amazonaws.transfer#CreateAgreementRequest": { @@ -744,8 +750,7 @@ "BaseDirectory": { "target": "com.amazonaws.transfer#HomeDirectory", "traits": { - "smithy.api#documentation": "

The landing directory (folder) for files transferred by using the AS2 protocol.

\n

A BaseDirectory example is\n /amzn-s3-demo-bucket/home/mydirectory.

", - "smithy.api#required": {} + "smithy.api#documentation": "

The landing directory (folder) for files transferred by using the AS2 protocol.

\n

A BaseDirectory example is\n /amzn-s3-demo-bucket/home/mydirectory.

" } }, "AccessRole": { @@ -766,6 +771,24 @@ "traits": { "smithy.api#documentation": "

Key-value pairs that can be used to group and search for agreements.

" } + }, + "PreserveFilename": { + "target": "com.amazonaws.transfer#PreserveFilenameType", + "traits": { + "smithy.api#documentation": "

\n Determines whether or not Transfer Family appends a unique string of characters to the end of the AS2 message payload\n filename when saving it.\n

\n
    \n
  • \n

    \n ENABLED: the filename provided by your trading parter is preserved when the file is saved.

    \n
  • \n
  • \n

    \n DISABLED (default value): when Transfer Family saves the file, the filename is adjusted, as\n described in File names and locations.

    \n
  • \n
" + } + }, + "EnforceMessageSigning": { + "target": "com.amazonaws.transfer#EnforceMessageSigningType", + "traits": { + "smithy.api#documentation": "

\n Determines whether or not unsigned messages from your trading partners will be accepted.\n

\n
    \n
  • \n

    \n ENABLED: Transfer Family rejects unsigned messages from your trading partner.

    \n
  • \n
  • \n

    \n DISABLED (default value): Transfer Family accepts unsigned messages from your trading partner.

    \n
  • \n
" + } + }, + "CustomDirectories": { + "target": "com.amazonaws.transfer#CustomDirectoriesType", + "traits": { + "smithy.api#documentation": "

A CustomDirectoriesType structure. This structure specifies custom directories for storing various AS2 message files. You can specify directories for the following types of files.

\n
    \n
  • \n

    Failed files

    \n
  • \n
  • \n

    MDN files

    \n
  • \n
  • \n

    Payload files

    \n
  • \n
  • \n

    Status files

    \n
  • \n
  • \n

    Temporary files

    \n
  • \n
" + } } }, "traits": { @@ -1469,6 +1492,49 @@ "smithy.api#output": {} } }, + "com.amazonaws.transfer#CustomDirectoriesType": { + "type": "structure", + "members": { + "FailedFilesDirectory": { + "target": "com.amazonaws.transfer#HomeDirectory", + "traits": { + "smithy.api#documentation": "

Specifies a location to store failed AS2 message files.

", + "smithy.api#required": {} + } + }, + "MdnFilesDirectory": { + "target": "com.amazonaws.transfer#HomeDirectory", + "traits": { + "smithy.api#documentation": "

Specifies a location to store MDN files.

", + "smithy.api#required": {} + } + }, + "PayloadFilesDirectory": { + "target": "com.amazonaws.transfer#HomeDirectory", + "traits": { + "smithy.api#documentation": "

Specifies a location to store the payload for AS2 message files.

", + "smithy.api#required": {} + } + }, + "StatusFilesDirectory": { + "target": "com.amazonaws.transfer#HomeDirectory", + "traits": { + "smithy.api#documentation": "

Specifies a location to store AS2 status messages.

", + "smithy.api#required": {} + } + }, + "TemporaryFilesDirectory": { + "target": "com.amazonaws.transfer#HomeDirectory", + "traits": { + "smithy.api#documentation": "

Specifies a location to store temporary AS2 message files.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Contains Amazon S3 locations for storing specific types of AS2 message files.

" + } + }, "com.amazonaws.transfer#CustomStepDetails": { "type": "structure", "members": { @@ -3218,6 +3284,24 @@ "traits": { "smithy.api#documentation": "

Key-value pairs that can be used to group and search for agreements.

" } + }, + "PreserveFilename": { + "target": "com.amazonaws.transfer#PreserveFilenameType", + "traits": { + "smithy.api#documentation": "

\n Determines whether or not Transfer Family appends a unique string of characters to the end of the AS2 message payload\n filename when saving it.\n

\n
    \n
  • \n

    \n ENABLED: the filename provided by your trading parter is preserved when the file is saved.

    \n
  • \n
  • \n

    \n DISABLED (default value): when Transfer Family saves the file, the filename is adjusted, as\n described in File names and locations.

    \n
  • \n
" + } + }, + "EnforceMessageSigning": { + "target": "com.amazonaws.transfer#EnforceMessageSigningType", + "traits": { + "smithy.api#documentation": "

\n Determines whether or not unsigned messages from your trading partners will be accepted.\n

\n
    \n
  • \n

    \n ENABLED: Transfer Family rejects unsigned messages from your trading partner.

    \n
  • \n
  • \n

    \n DISABLED (default value): Transfer Family accepts unsigned messages from your trading partner.

    \n
  • \n
" + } + }, + "CustomDirectories": { + "target": "com.amazonaws.transfer#CustomDirectoriesType", + "traits": { + "smithy.api#documentation": "

A CustomDirectoriesType structure. This structure specifies custom directories for storing various AS2 message files. You can specify directories for the following types of files.

\n
    \n
  • \n

    Failed files

    \n
  • \n
  • \n

    MDN files

    \n
  • \n
  • \n

    Payload files

    \n
  • \n
  • \n

    Status files

    \n
  • \n
  • \n

    Temporary files

    \n
  • \n
" + } } }, "traits": { @@ -3250,7 +3334,7 @@ "Status": { "target": "com.amazonaws.transfer#CertificateStatusType", "traits": { - "smithy.api#documentation": "

The certificate can be either ACTIVE, PENDING_ROTATION, or\n INACTIVE. PENDING_ROTATION means that this certificate will\n replace the current certificate when it expires.

" + "smithy.api#documentation": "

Currently, the only available status is ACTIVE: all other values are reserved for future use.

" } }, "Certificate": { @@ -4227,6 +4311,23 @@ } } }, + "com.amazonaws.transfer#EnforceMessageSigningType": { + "type": "enum", + "members": { + "ENABLED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ENABLED" + } + }, + "DISABLED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DISABLED" + } + } + } + }, "com.amazonaws.transfer#ExecutionError": { "type": "structure", "members": { @@ -7017,6 +7118,40 @@ "smithy.api#pattern": "^[\\x09-\\x0D\\x20-\\x7E]*$" } }, + "com.amazonaws.transfer#PreserveContentType": { + "type": "enum", + "members": { + "ENABLED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ENABLED" + } + }, + "DISABLED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DISABLED" + } + } + } + }, + "com.amazonaws.transfer#PreserveFilenameType": { + "type": "enum", + "members": { + "ENABLED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ENABLED" + } + }, + "DISABLED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DISABLED" + } + } + } + }, "com.amazonaws.transfer#PrivateKeyType": { "type": "string", "traits": { @@ -9933,7 +10068,7 @@ "iam:PassRole" ] }, - "smithy.api#documentation": "

Updates some of the parameters for an existing agreement. Provide the\n AgreementId and the ServerId for the agreement that you want to\n update, along with the new values for the parameters to update.

" + "smithy.api#documentation": "

Updates some of the parameters for an existing agreement. Provide the\n AgreementId and the ServerId for the agreement that you want to\n update, along with the new values for the parameters to update.

\n \n

Specify either\n BaseDirectory or CustomDirectories, but not both. Specifying both causes the command to fail.

\n

If you update an agreement from using base directory to custom directories, the base directory is no longer used. Similarly, if you change from custom directories to a base directory, the custom directories are no longer used.

\n
" } }, "com.amazonaws.transfer#UpdateAgreementRequest": { @@ -9988,6 +10123,24 @@ "traits": { "smithy.api#documentation": "

Connectors are used to send files using either the AS2 or SFTP protocol. For the access role,\n provide the Amazon Resource Name (ARN) of the Identity and Access Management role to use.

\n

\n For AS2 connectors\n

\n

With AS2, you can send files by calling StartFileTransfer and specifying the\n file paths in the request parameter, SendFilePaths. We use the file’s parent\n directory (for example, for --send-file-paths /bucket/dir/file.txt, parent\n directory is /bucket/dir/) to temporarily store a processed AS2 message file,\n store the MDN when we receive them from the partner, and write a final JSON file containing\n relevant metadata of the transmission. So, the AccessRole needs to provide read\n and write access to the parent directory of the file location used in the\n StartFileTransfer request. Additionally, you need to provide read and write\n access to the parent directory of the files that you intend to send with\n StartFileTransfer.

\n

If you are using Basic authentication for your AS2 connector, the access role requires the\n secretsmanager:GetSecretValue permission for the secret. If the secret is encrypted using\n a customer-managed key instead of the Amazon Web Services managed key in Secrets Manager, then the role also\n needs the kms:Decrypt permission for that key.

\n

\n For SFTP connectors\n

\n

Make sure that the access role provides\n read and write access to the parent directory of the file location\n that's used in the StartFileTransfer request.\n Additionally, make sure that the role provides\n secretsmanager:GetSecretValue permission to Secrets Manager.

" } + }, + "PreserveFilename": { + "target": "com.amazonaws.transfer#PreserveFilenameType", + "traits": { + "smithy.api#documentation": "

\n Determines whether or not Transfer Family appends a unique string of characters to the end of the AS2 message payload\n filename when saving it.\n

\n
    \n
  • \n

    \n ENABLED: the filename provided by your trading parter is preserved when the file is saved.

    \n
  • \n
  • \n

    \n DISABLED (default value): when Transfer Family saves the file, the filename is adjusted, as\n described in File names and locations.

    \n
  • \n
" + } + }, + "EnforceMessageSigning": { + "target": "com.amazonaws.transfer#EnforceMessageSigningType", + "traits": { + "smithy.api#documentation": "

\n Determines whether or not unsigned messages from your trading partners will be accepted.\n

\n
    \n
  • \n

    \n ENABLED: Transfer Family rejects unsigned messages from your trading partner.

    \n
  • \n
  • \n

    \n DISABLED (default value): Transfer Family accepts unsigned messages from your trading partner.

    \n
  • \n
" + } + }, + "CustomDirectories": { + "target": "com.amazonaws.transfer#CustomDirectoriesType", + "traits": { + "smithy.api#documentation": "

A CustomDirectoriesType structure. This structure specifies custom directories for storing various AS2 message files. You can specify directories for the following types of files.

\n
    \n
  • \n

    Failed files

    \n
  • \n
  • \n

    MDN files

    \n
  • \n
  • \n

    Payload files

    \n
  • \n
  • \n

    Status files

    \n
  • \n
  • \n

    Temporary files

    \n
  • \n
" + } } }, "traits": { diff --git a/models/workspaces-thin-client.json b/models/workspaces-thin-client.json index aa00d2b127..fc9a725cde 100644 --- a/models/workspaces-thin-client.json +++ b/models/workspaces-thin-client.json @@ -144,7 +144,7 @@ "desktopArn": { "target": "com.amazonaws.workspacesthinclient#Arn", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the desktop to stream from Amazon WorkSpaces,\n WorkSpaces Web, or AppStream 2.0.

", + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the desktop to stream from Amazon WorkSpaces,\n WorkSpaces Secure Browser, or AppStream 2.0.

", "smithy.api#required": {} } }, @@ -894,7 +894,7 @@ "desktopArn": { "target": "com.amazonaws.workspacesthinclient#Arn", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the desktop to stream from Amazon WorkSpaces,\n WorkSpaces Web, or AppStream 2.0.

" + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the desktop to stream from Amazon WorkSpaces,\n WorkSpaces Secure Browser, or AppStream 2.0.

" } }, "desktopEndpoint": { @@ -1064,7 +1064,7 @@ "desktopArn": { "target": "com.amazonaws.workspacesthinclient#Arn", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the desktop to stream from Amazon WorkSpaces,\n WorkSpaces Web, or AppStream 2.0.

" + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the desktop to stream from Amazon WorkSpaces,\n WorkSpaces Secure Browser, or AppStream 2.0.

" } }, "desktopEndpoint": { @@ -1710,7 +1710,8 @@ "type": { "target": "com.amazonaws.workspacesthinclient#MaintenanceWindowType", "traits": { - "smithy.api#documentation": "

An option to select the default or custom maintenance window.

" + "smithy.api#documentation": "

An option to select the default or custom maintenance window.

", + "smithy.api#required": {} } }, "startTimeHour": { @@ -3183,7 +3184,7 @@ "desktopArn": { "target": "com.amazonaws.workspacesthinclient#Arn", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the desktop to stream from Amazon WorkSpaces,\n WorkSpaces Web, or AppStream 2.0.

" + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the desktop to stream from Amazon WorkSpaces,\n WorkSpaces Secure Browser, or AppStream 2.0.

" } }, "desktopEndpoint": { diff --git a/models/workspaces.json b/models/workspaces.json index 8d64aa31b1..b7fb435053 100644 --- a/models/workspaces.json +++ b/models/workspaces.json @@ -29,6 +29,86 @@ ] }, "shapes": { + "com.amazonaws.workspaces#AGAModeForDirectoryEnum": { + "type": "enum", + "members": { + "ENABLED_AUTO": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ENABLED_AUTO" + } + }, + "DISABLED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DISABLED" + } + } + } + }, + "com.amazonaws.workspaces#AGAModeForWorkSpaceEnum": { + "type": "enum", + "members": { + "ENABLED_AUTO": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ENABLED_AUTO" + } + }, + "DISABLED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DISABLED" + } + }, + "INHERITED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "INHERITED" + } + } + } + }, + "com.amazonaws.workspaces#AGAPreferredProtocolForDirectory": { + "type": "enum", + "members": { + "TCP": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "TCP" + } + }, + "NONE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "NONE" + } + } + } + }, + "com.amazonaws.workspaces#AGAPreferredProtocolForWorkSpace": { + "type": "enum", + "members": { + "TCP": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "TCP" + } + }, + "NONE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "NONE" + } + }, + "INHERITED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "INHERITED" + } + } + } + }, "com.amazonaws.workspaces#ARN": { "type": "string", "traits": { @@ -1352,6 +1432,18 @@ "smithy.api#enumValue": "POWERPRO" } }, + "GENERALPURPOSE_4XLARGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "GENERALPURPOSE_4XLARGE" + } + }, + "GENERALPURPOSE_8XLARGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "GENERALPURPOSE_8XLARGE" + } + }, "GRAPHICSPRO": { "target": "smithy.api#Unit", "traits": { @@ -5691,6 +5783,48 @@ "smithy.api#output": {} } }, + "com.amazonaws.workspaces#GlobalAcceleratorForDirectory": { + "type": "structure", + "members": { + "Mode": { + "target": "com.amazonaws.workspaces#AGAModeForDirectoryEnum", + "traits": { + "smithy.api#documentation": "

Indicates if Global Accelerator for directory is enabled or disabled.

", + "smithy.api#required": {} + } + }, + "PreferredProtocol": { + "target": "com.amazonaws.workspaces#AGAPreferredProtocolForDirectory", + "traits": { + "smithy.api#documentation": "

Indicates the preferred protocol for Global Accelerator.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Describes the Global Accelerator for directory

" + } + }, + "com.amazonaws.workspaces#GlobalAcceleratorForWorkSpace": { + "type": "structure", + "members": { + "Mode": { + "target": "com.amazonaws.workspaces#AGAModeForWorkSpaceEnum", + "traits": { + "smithy.api#documentation": "

Indicates if Global Accelerator for WorkSpaces is enabled, disabled, \n or the same mode as the associated directory.

", + "smithy.api#required": {} + } + }, + "PreferredProtocol": { + "target": "com.amazonaws.workspaces#AGAPreferredProtocolForWorkSpace", + "traits": { + "smithy.api#documentation": "

Indicates the preferred protocol for Global Accelerator.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Describes the Global Accelerator for WorkSpaces.

" + } + }, "com.amazonaws.workspaces#IDCConfig": { "type": "structure", "members": { @@ -8813,6 +8947,12 @@ "traits": { "smithy.api#documentation": "

Indicates the storage connector used

" } + }, + "GlobalAccelerator": { + "target": "com.amazonaws.workspaces#GlobalAcceleratorForDirectory", + "traits": { + "smithy.api#documentation": "

Indicates the Global Accelerator properties.

" + } } }, "traits": { @@ -11133,6 +11273,12 @@ "traits": { "smithy.api#documentation": "

The name of the operating system.

" } + }, + "GlobalAccelerator": { + "target": "com.amazonaws.workspaces#GlobalAcceleratorForWorkSpace", + "traits": { + "smithy.api#documentation": "

Indicates the Global Accelerator properties.

" + } } }, "traits": {