From 0506884a4da91eeb211b0f6345da88702a79db2b Mon Sep 17 00:00:00 2001 From: adam-fowler Date: Mon, 30 Dec 2024 02:49:25 +0000 Subject: [PATCH] Update models from aws-sdk-go-v2 release-2024-12-28 --- .../Services/Amplify/Amplify_shapes.swift | 56 +- .../Services/AppStream/AppStream_shapes.swift | 1 + .../BedrockAgent/BedrockAgent_shapes.swift | 43 +- .../BedrockAgentRuntime_api.swift | 22 +- .../BedrockAgentRuntime_shapes.swift | 134 +- .../BedrockDataAutomation_api.swift | 46 +- .../BedrockDataAutomationRuntime_api.swift | 2 +- .../Soto/Services/Billing/Billing_api.swift | 374 +++- .../Services/Billing/Billing_shapes.swift | 563 ++++- .../CognitoIdentityProvider_api.swift | 39 + .../Soto/Services/Connect/Connect_api.swift | 48 +- .../Services/Connect/Connect_shapes.swift | 107 +- .../ConnectParticipant_api.swift | 110 +- .../ConnectParticipant_shapes.swift | 104 +- .../CostExplorer/CostExplorer_api.swift | 21 + .../CostExplorer/CostExplorer_shapes.swift | 63 +- Sources/Soto/Services/DLM/DLM_api.swift | 2 + .../Soto/Services/DataSync/DataSync_api.swift | 306 ++- .../Services/DataSync/DataSync_shapes.swift | 298 ++- Sources/Soto/Services/DocDB/DocDB_api.swift | 15 + .../Soto/Services/DocDB/DocDB_shapes.swift | 52 +- Sources/Soto/Services/ECR/ECR_api.swift | 84 +- Sources/Soto/Services/ECR/ECR_shapes.swift | 14 +- .../Services/ECRPublic/ECRPublic_api.swift | 7 + Sources/Soto/Services/EKS/EKS_api.swift | 111 + Sources/Soto/Services/EKS/EKS_shapes.swift | 142 +- Sources/Soto/Services/Glue/Glue_api.swift | 47 +- Sources/Soto/Services/Glue/Glue_shapes.swift | 12 +- Sources/Soto/Services/IoT/IoT_api.swift | 35 +- Sources/Soto/Services/IoT/IoT_shapes.swift | 74 +- .../IoTSecureTunneling_api.swift | 48 +- Sources/Soto/Services/MWAA/MWAA_api.swift | 4 +- Sources/Soto/Services/MWAA/MWAA_shapes.swift | 6 +- Sources/Soto/Services/Macie2/Macie2_api.swift | 40 +- .../Soto/Services/Macie2/Macie2_shapes.swift | 43 +- .../MediaConvert/MediaConvert_api.swift | 37 +- .../MediaConvert/MediaConvert_shapes.swift | 34 +- .../Services/MediaLive/MediaLive_api.swift | 38 + .../Services/MediaLive/MediaLive_shapes.swift | 127 +- .../NetworkFirewall/NetworkFirewall_api.swift | 2 +- .../Organizations/Organizations_api.swift | 2 + .../Soto/Services/Outposts/Outposts_api.swift | 6 +- .../Services/Outposts/Outposts_shapes.swift | 3 +- .../Services/QConnect/QConnect_shapes.swift | 18 +- .../Services/QuickSight/QuickSight_api.swift | 6 + .../QuickSight/QuickSight_shapes.swift | 67 +- Sources/Soto/Services/RDS/RDS_api.swift | 56 +- Sources/Soto/Services/RDS/RDS_shapes.swift | 66 +- Sources/Soto/Services/Rbin/Rbin_api.swift | 46 + .../Resiliencehub/Resiliencehub_shapes.swift | 69 +- .../Services/SageMaker/SageMaker_api.swift | 200 +- .../Services/SageMaker/SageMaker_shapes.swift | 228 +- .../SecurityHub/SecurityHub_api.swift | 10 +- .../SecurityHub/SecurityHub_shapes.swift | 400 +--- Sources/Soto/Services/SsmSap/SsmSap_api.swift | 3 + .../Soto/Services/SsmSap/SsmSap_shapes.swift | 37 +- .../Soto/Services/Transfer/Transfer_api.swift | 12 + .../Services/Transfer/Transfer_shapes.swift | 56 +- .../WorkSpaces/WorkSpaces_shapes.swift | 74 +- models/amplify.json | 106 +- models/appstream.json | 6 + models/bcm-pricing-calculator.json | 3 + models/bedrock-agent-runtime.json | 159 +- models/bedrock-agent.json | 68 +- models/bedrock-data-automation-runtime.json | 2 +- models/bedrock-data-automation.json | 24 +- models/billing.json | 1527 +++++++++++-- models/budgets.json | 168 +- models/connect.json | 201 +- models/connectparticipant.json | 216 +- models/cost-explorer.json | 73 + models/datasync.json | 472 +++- models/docdb.json | 77 + models/ecr-public.json | 40 +- models/ecr.json | 20 +- models/eks.json | 247 +++ models/endpoints/endpoints.json | 1936 ++++++++++++++--- models/glue.json | 12 +- models/iot.json | 199 +- models/macie2.json | 74 +- models/mediaconvert.json | 98 +- models/medialive.json | 205 ++ models/mwaa.json | 6 +- models/network-firewall.json | 2 +- models/outposts.json | 10 +- models/qconnect.json | 14 +- models/quicksight.json | 71 + models/rds.json | 66 +- models/resiliencehub.json | 80 +- models/sagemaker.json | 499 +++-- models/securityhub.json | 232 +- models/ssm-sap.json | 47 + models/transfer.json | 95 +- models/workspaces.json | 134 ++ 94 files changed, 10152 insertions(+), 1707 deletions(-) diff --git a/Sources/Soto/Services/Amplify/Amplify_shapes.swift b/Sources/Soto/Services/Amplify/Amplify_shapes.swift index 3532c02c8c..463a8fe785 100644 --- a/Sources/Soto/Services/Amplify/Amplify_shapes.swift +++ b/Sources/Soto/Services/Amplify/Amplify_shapes.swift @@ -55,6 +55,7 @@ extension Amplify { public enum JobStatus: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case cancelled = "CANCELLED" case cancelling = "CANCELLING" + case created = "CREATED" case failed = "FAILED" case pending = "PENDING" case provisioning = "PROVISIONING" @@ -111,6 +112,15 @@ extension Amplify { public var description: String { return self.rawValue } } + public enum WafStatus: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case associating = "ASSOCIATING" + case associationFailed = "ASSOCIATION_FAILED" + case associationSuccess = "ASSOCIATION_SUCCESS" + case disassociating = "DISASSOCIATING" + case disassociationFailed = "DISASSOCIATION_FAILED" + public var description: String { return self.rawValue } + } + // MARK: Shapes public struct App: AWSDecodableShape { @@ -128,7 +138,7 @@ extension Amplify { public let buildSpec: String? /// The cache configuration for the Amplify app. If you don't specify the cache configuration type, Amplify uses the default AMPLIFY_MANAGED setting. public let cacheConfig: CacheConfig? - /// Creates a date and time for the Amplify app. + /// A timestamp of when Amplify created the application. public let createTime: Date /// Describes the custom HTTP headers for the Amplify app. public let customHeaders: String? @@ -162,11 +172,15 @@ extension Amplify { public let repositoryCloneMethod: RepositoryCloneMethod? /// The tag for the Amplify app. public let tags: [String: String]? - /// Updates the date and time for the Amplify app. + /// A timestamp of when Amplify updated the application. public let updateTime: Date + /// Describes the Firewall configuration for the Amplify app. Firewall support enables you to protect your hosted applications with a direct integration with WAF. + public let wafConfiguration: WafConfiguration? + /// A timestamp of when Amplify created the webhook in your Git repository. + public let webhookCreateTime: Date? @inlinable - public init(appArn: String, appId: String, autoBranchCreationConfig: AutoBranchCreationConfig? = nil, autoBranchCreationPatterns: [String]? = nil, basicAuthCredentials: String? = nil, buildSpec: String? = nil, cacheConfig: CacheConfig? = nil, createTime: Date, customHeaders: String? = nil, customRules: [CustomRule]? = nil, defaultDomain: String, description: String? = nil, enableAutoBranchCreation: Bool? = nil, enableBasicAuth: Bool, enableBranchAutoBuild: Bool, enableBranchAutoDeletion: Bool? = nil, environmentVariables: [String: String]? = nil, iamServiceRoleArn: String? = nil, name: String, platform: Platform, productionBranch: ProductionBranch? = nil, repository: String? = nil, repositoryCloneMethod: RepositoryCloneMethod? = nil, tags: [String: String]? = nil, updateTime: Date) { + public init(appArn: String, appId: String, autoBranchCreationConfig: AutoBranchCreationConfig? = nil, autoBranchCreationPatterns: [String]? = nil, basicAuthCredentials: String? = nil, buildSpec: String? = nil, cacheConfig: CacheConfig? = nil, createTime: Date, customHeaders: String? = nil, customRules: [CustomRule]? = nil, defaultDomain: String, description: String? = nil, enableAutoBranchCreation: Bool? = nil, enableBasicAuth: Bool, enableBranchAutoBuild: Bool, enableBranchAutoDeletion: Bool? = nil, environmentVariables: [String: String]? = nil, iamServiceRoleArn: String? = nil, name: String, platform: Platform, productionBranch: ProductionBranch? = nil, repository: String? = nil, repositoryCloneMethod: RepositoryCloneMethod? = nil, tags: [String: String]? = nil, updateTime: Date, wafConfiguration: WafConfiguration? = nil, webhookCreateTime: Date? = nil) { self.appArn = appArn self.appId = appId self.autoBranchCreationConfig = autoBranchCreationConfig @@ -192,6 +206,8 @@ extension Amplify { self.repositoryCloneMethod = repositoryCloneMethod self.tags = tags self.updateTime = updateTime + self.wafConfiguration = wafConfiguration + self.webhookCreateTime = webhookCreateTime } private enum CodingKeys: String, CodingKey { @@ -220,6 +236,8 @@ extension Amplify { case repositoryCloneMethod = "repositoryCloneMethod" case tags = "tags" case updateTime = "updateTime" + case wafConfiguration = "wafConfiguration" + case webhookCreateTime = "webhookCreateTime" } } @@ -379,7 +397,7 @@ extension Amplify { public let branchName: String /// The build specification (build spec) content for the branch of an Amplify app. public let buildSpec: String? - /// The creation date and time for a branch that is part of an Amplify app. + /// A timestamp of when Amplify created the branch. public let createTime: Date /// The custom domains for a branch of an Amplify app. public let customDomains: [String] @@ -417,7 +435,7 @@ extension Amplify { public let totalNumberOfJobs: String /// The content Time to Live (TTL) for the website in seconds. public let ttl: String - /// The last updated date and time for a branch that is part of an Amplify app. + /// A timestamp for the last updated time for a branch. public let updateTime: Date @inlinable @@ -1850,7 +1868,7 @@ extension Amplify { public let commitId: String /// The commit message from a third-party repository provider for the job. public let commitMessage: String - /// The commit date and time for the job. + /// The commit date and time for the job. public let commitTime: Date /// The end date and time for the job. public let endTime: Date? @@ -3196,14 +3214,36 @@ extension Amplify { } } + public struct WafConfiguration: AWSDecodableShape { + /// The reason for the current status of the Firewall configuration. + public let statusReason: String? + /// The status of the process to associate or disassociate a web ACL to an Amplify app. + public let wafStatus: WafStatus? + /// The Amazon Resource Name (ARN) for the web ACL associated with an Amplify app. + public let webAclArn: String? + + @inlinable + public init(statusReason: String? = nil, wafStatus: WafStatus? = nil, webAclArn: String? = nil) { + self.statusReason = statusReason + self.wafStatus = wafStatus + self.webAclArn = webAclArn + } + + private enum CodingKeys: String, CodingKey { + case statusReason = "statusReason" + case wafStatus = "wafStatus" + case webAclArn = "webAclArn" + } + } + public struct Webhook: AWSDecodableShape { /// The name for a branch that is part of an Amplify app. public let branchName: String - /// The create date and time for a webhook. + /// A timestamp of when Amplify created the webhook in your Git repository. public let createTime: Date /// The description for a webhook. public let description: String - /// Updates the date and time for a webhook. + /// A timestamp of when Amplify updated the webhook in your Git repository. public let updateTime: Date /// The Amazon Resource Name (ARN) for the webhook. public let webhookArn: String diff --git a/Sources/Soto/Services/AppStream/AppStream_shapes.swift b/Sources/Soto/Services/AppStream/AppStream_shapes.swift index 437fb8b0ac..cb33f432d6 100644 --- a/Sources/Soto/Services/AppStream/AppStream_shapes.swift +++ b/Sources/Soto/Services/AppStream/AppStream_shapes.swift @@ -239,6 +239,7 @@ extension AppStream { public enum PlatformType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case amazonLinux2 = "AMAZON_LINUX2" case rhel8 = "RHEL8" + case rockyLinux8 = "ROCKY_LINUX8" case windows = "WINDOWS" case windowsServer2016 = "WINDOWS_SERVER_2016" case windowsServer2019 = "WINDOWS_SERVER_2019" diff --git a/Sources/Soto/Services/BedrockAgent/BedrockAgent_shapes.swift b/Sources/Soto/Services/BedrockAgent/BedrockAgent_shapes.swift index 80b33a8871..f30d2a3cba 100644 --- a/Sources/Soto/Services/BedrockAgent/BedrockAgent_shapes.swift +++ b/Sources/Soto/Services/BedrockAgent/BedrockAgent_shapes.swift @@ -359,6 +359,7 @@ extension BedrockAgent { public enum PromptType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case knowledgeBaseResponseGeneration = "KNOWLEDGE_BASE_RESPONSE_GENERATION" + case memorySummarization = "MEMORY_SUMMARIZATION" case orchestration = "ORCHESTRATION" case postProcessing = "POST_PROCESSING" case preProcessing = "PRE_PROCESSING" @@ -414,6 +415,7 @@ extension BedrockAgent { public enum SharePointAuthType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case oauth2ClientCredentials = "OAUTH2_CLIENT_CREDENTIALS" + case oauth2SharepointAppOnlyClientCredentials = "OAUTH2_SHAREPOINT_APP_ONLY_CLIENT_CREDENTIALS" public var description: String { return self.rawValue } } @@ -6963,24 +6965,29 @@ extension BedrockAgent { public struct MemoryConfiguration: AWSEncodableShape & AWSDecodableShape { /// The type of memory that is stored. public let enabledMemoryTypes: [MemoryType] + /// Contains the configuration for SESSION_SUMMARY memory type enabled for the agent. + public let sessionSummaryConfiguration: SessionSummaryConfiguration? /// The number of days the agent is configured to retain the conversational context. public let storageDays: Int? @inlinable - public init(enabledMemoryTypes: [MemoryType], storageDays: Int? = nil) { + public init(enabledMemoryTypes: [MemoryType], sessionSummaryConfiguration: SessionSummaryConfiguration? = nil, storageDays: Int? = nil) { self.enabledMemoryTypes = enabledMemoryTypes + self.sessionSummaryConfiguration = sessionSummaryConfiguration self.storageDays = storageDays } public func validate(name: String) throws { try self.validate(self.enabledMemoryTypes, name: "enabledMemoryTypes", parent: name, max: 1) try self.validate(self.enabledMemoryTypes, name: "enabledMemoryTypes", parent: name, min: 1) - try self.validate(self.storageDays, name: "storageDays", parent: name, max: 30) + try self.sessionSummaryConfiguration?.validate(name: "\(name).sessionSummaryConfiguration") + try self.validate(self.storageDays, name: "storageDays", parent: name, max: 365) try self.validate(self.storageDays, name: "storageDays", parent: name, min: 0) } private enum CodingKeys: String, CodingKey { case enabledMemoryTypes = "enabledMemoryTypes" + case sessionSummaryConfiguration = "sessionSummaryConfiguration" case storageDays = "storageDays" } } @@ -8746,6 +8753,24 @@ extension BedrockAgent { } } + public struct SessionSummaryConfiguration: AWSEncodableShape & AWSDecodableShape { + /// Maximum number of recent session summaries to include in the agent's prompt context. + public let maxRecentSessions: Int? + + @inlinable + public init(maxRecentSessions: Int? = nil) { + self.maxRecentSessions = maxRecentSessions + } + + public func validate(name: String) throws { + try self.validate(self.maxRecentSessions, name: "maxRecentSessions", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case maxRecentSessions = "maxRecentSessions" + } + } + public struct SharePointCrawlerConfiguration: AWSEncodableShape & AWSDecodableShape { /// The configuration of filtering the SharePoint content. For example, configuring regular expression patterns to include or exclude certain content. public let filterConfiguration: CrawlFilterConfiguration? @@ -10460,13 +10485,16 @@ extension BedrockAgent { public let inclusionFilters: [String]? /// The scope of what is crawled for your URLs. You can choose to crawl only web pages that belong to the same host or primary domain. For example, only web pages that contain the seed URL "https://docs.aws.amazon.com/bedrock/latest/userguide/" and no other domains. You can choose to include sub domains in addition to the host or primary domain. For example, web pages that contain "aws.amazon.com" can also include sub domain "docs.aws.amazon.com". public let scope: WebScopeType? + /// A string used for identifying the crawler or a bot when it accesses a web server. By default, this is set to bedrockbot_UUID for your crawler. You can optionally append a custom string to bedrockbot_UUID to allowlist a specific user agent permitted to access your source URLs. + public let userAgent: String? @inlinable - public init(crawlerLimits: WebCrawlerLimits? = nil, exclusionFilters: [String]? = nil, inclusionFilters: [String]? = nil, scope: WebScopeType? = nil) { + public init(crawlerLimits: WebCrawlerLimits? = nil, exclusionFilters: [String]? = nil, inclusionFilters: [String]? = nil, scope: WebScopeType? = nil, userAgent: String? = nil) { self.crawlerLimits = crawlerLimits self.exclusionFilters = exclusionFilters self.inclusionFilters = inclusionFilters self.scope = scope + self.userAgent = userAgent } public func validate(name: String) throws { @@ -10482,6 +10510,8 @@ extension BedrockAgent { } try self.validate(self.inclusionFilters, name: "inclusionFilters", parent: name, max: 25) try self.validate(self.inclusionFilters, name: "inclusionFilters", parent: name, min: 1) + try self.validate(self.userAgent, name: "userAgent", parent: name, max: 40) + try self.validate(self.userAgent, name: "userAgent", parent: name, min: 15) } private enum CodingKeys: String, CodingKey { @@ -10489,19 +10519,24 @@ extension BedrockAgent { case exclusionFilters = "exclusionFilters" case inclusionFilters = "inclusionFilters" case scope = "scope" + case userAgent = "userAgent" } } public struct WebCrawlerLimits: AWSEncodableShape & AWSDecodableShape { + /// The max number of web pages crawled from your source URLs, up to 25,000 pages. If the web pages exceed this limit, the data source sync will fail and no web pages will be ingested. + public let maxPages: Int? /// The max rate at which pages are crawled, up to 300 per minute per host. public let rateLimit: Int? @inlinable - public init(rateLimit: Int? = nil) { + public init(maxPages: Int? = nil, rateLimit: Int? = nil) { + self.maxPages = maxPages self.rateLimit = rateLimit } private enum CodingKeys: String, CodingKey { + case maxPages = "maxPages" case rateLimit = "rateLimit" } } diff --git a/Sources/Soto/Services/BedrockAgentRuntime/BedrockAgentRuntime_api.swift b/Sources/Soto/Services/BedrockAgentRuntime/BedrockAgentRuntime_api.swift index 2b30053b2f..a42aa4f066 100644 --- a/Sources/Soto/Services/BedrockAgentRuntime/BedrockAgentRuntime_api.swift +++ b/Sources/Soto/Services/BedrockAgentRuntime/BedrockAgentRuntime_api.swift @@ -99,18 +99,21 @@ public struct BedrockAgentRuntime: AWSService { /// - agentAliasId: The unique identifier of an alias of an agent. /// - agentId: The unique identifier of the agent to which the alias belongs. /// - memoryId: The unique identifier of the memory. + /// - sessionId: The unique session identifier of the memory. /// - logger: Logger use during operation @inlinable public func deleteAgentMemory( agentAliasId: String, agentId: String, memoryId: String? = nil, + sessionId: String? = nil, logger: Logger = AWSClient.loggingDisabled ) async throws -> DeleteAgentMemoryResponse { let input = DeleteAgentMemoryRequest( agentAliasId: agentAliasId, agentId: agentId, - memoryId: memoryId + memoryId: memoryId, + sessionId: sessionId ) return try await self.deleteAgentMemory(input, logger: logger) } @@ -191,7 +194,7 @@ public struct BedrockAgentRuntime: AWSService { return try await self.getAgentMemory(input, logger: logger) } - /// The CLI doesn't support streaming operations in Amazon Bedrock, including InvokeAgent. Sends a prompt for the agent to process and respond to. Note the following fields for the request: To continue the same conversation with an agent, use the same sessionId value in the request. To activate trace enablement, turn enableTrace to true. Trace enablement helps you follow the agent's reasoning process that led it to the information it processed, the actions it took, and the final result it yielded. For more information, see Trace enablement. End a conversation by setting endSession to true. In the sessionState object, you can include attributes for the session or prompt or, if you configured an action group to return control, results from invocation of the action group. The response is returned in the bytes field of the chunk object. The attribution object contains citations for parts of the response. If you set enableTrace to true in the request, you can trace the agent's steps and reasoning process that led it to the response. If the action predicted was configured to return control, the response returns parameters for the action, elicited from the user, in the returnControl field. Errors are also surfaced in the response. + /// The CLI doesn't support streaming operations in Amazon Bedrock, including InvokeAgent. Sends a prompt for the agent to process and respond to. Note the following fields for the request: To continue the same conversation with an agent, use the same sessionId value in the request. To activate trace enablement, turn enableTrace to true. Trace enablement helps you follow the agent's reasoning process that led it to the information it processed, the actions it took, and the final result it yielded. For more information, see Trace enablement. To stream agent responses, make sure that only orchestration prompt is enabled. Agent streaming is not supported for the following steps: Pre-processing Post-processing Agent with 1 Knowledge base and User Input not enabled End a conversation by setting endSession to true. In the sessionState object, you can include attributes for the session or prompt or, if you configured an action group to return control, results from invocation of the action group. The response is returned in the bytes field of the chunk object. The attribution object contains citations for parts of the response. If you set enableTrace to true in the request, you can trace the agent's steps and reasoning process that led it to the response. If the action predicted was configured to return control, the response returns parameters for the action, elicited from the user, in the returnControl field. Errors are also surfaced in the response. @Sendable @inlinable public func invokeAgent(_ input: InvokeAgentRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> InvokeAgentResponse { @@ -204,11 +207,12 @@ public struct BedrockAgentRuntime: AWSService { logger: logger ) } - /// The CLI doesn't support streaming operations in Amazon Bedrock, including InvokeAgent. Sends a prompt for the agent to process and respond to. Note the following fields for the request: To continue the same conversation with an agent, use the same sessionId value in the request. To activate trace enablement, turn enableTrace to true. Trace enablement helps you follow the agent's reasoning process that led it to the information it processed, the actions it took, and the final result it yielded. For more information, see Trace enablement. End a conversation by setting endSession to true. In the sessionState object, you can include attributes for the session or prompt or, if you configured an action group to return control, results from invocation of the action group. The response is returned in the bytes field of the chunk object. The attribution object contains citations for parts of the response. If you set enableTrace to true in the request, you can trace the agent's steps and reasoning process that led it to the response. If the action predicted was configured to return control, the response returns parameters for the action, elicited from the user, in the returnControl field. Errors are also surfaced in the response. + /// The CLI doesn't support streaming operations in Amazon Bedrock, including InvokeAgent. Sends a prompt for the agent to process and respond to. Note the following fields for the request: To continue the same conversation with an agent, use the same sessionId value in the request. To activate trace enablement, turn enableTrace to true. Trace enablement helps you follow the agent's reasoning process that led it to the information it processed, the actions it took, and the final result it yielded. For more information, see Trace enablement. To stream agent responses, make sure that only orchestration prompt is enabled. Agent streaming is not supported for the following steps: Pre-processing Post-processing Agent with 1 Knowledge base and User Input not enabled End a conversation by setting endSession to true. In the sessionState object, you can include attributes for the session or prompt or, if you configured an action group to return control, results from invocation of the action group. The response is returned in the bytes field of the chunk object. The attribution object contains citations for parts of the response. If you set enableTrace to true in the request, you can trace the agent's steps and reasoning process that led it to the response. If the action predicted was configured to return control, the response returns parameters for the action, elicited from the user, in the returnControl field. Errors are also surfaced in the response. /// /// Parameters: /// - agentAliasId: The alias of the agent to use. /// - agentId: The unique identifier of the agent to use. + /// - bedrockModelConfigurations: Model performance settings for the request. /// - enableTrace: Specifies whether to turn on the trace or not to track the agent's reasoning process. For more information, see Trace enablement. /// - endSession: Specifies whether to end the session with the agent or not. /// - inputText: The prompt text to send the agent. If you include returnControlInvocationResults in the sessionState field, the inputText field will be ignored. @@ -216,12 +220,13 @@ public struct BedrockAgentRuntime: AWSService { /// - sessionId: The unique identifier of the session. Use the same value across requests to continue the same conversation. /// - sessionState: Contains parameters that specify various attributes of the session. For more information, see Control session context. If you include returnControlInvocationResults in the sessionState field, the inputText field will be ignored. /// - sourceArn: The ARN of the resource making the request. - /// - streamingConfigurations: Specifies the configurations for streaming. + /// - streamingConfigurations: Specifies the configurations for streaming. To use agent streaming, you need permissions to perform the bedrock:InvokeModelWithResponseStream action. /// - logger: Logger use during operation @inlinable public func invokeAgent( agentAliasId: String, agentId: String, + bedrockModelConfigurations: BedrockModelConfigurations? = nil, enableTrace: Bool? = nil, endSession: Bool? = nil, inputText: String? = nil, @@ -235,6 +240,7 @@ public struct BedrockAgentRuntime: AWSService { let input = InvokeAgentRequest( agentAliasId: agentAliasId, agentId: agentId, + bedrockModelConfigurations: bedrockModelConfigurations, enableTrace: enableTrace, endSession: endSession, inputText: inputText, @@ -267,6 +273,7 @@ public struct BedrockAgentRuntime: AWSService { /// - flowAliasIdentifier: The unique identifier of the flow alias. /// - flowIdentifier: The unique identifier of the flow. /// - inputs: A list of objects, each containing information about an input into the flow. + /// - modelPerformanceConfiguration: Model performance settings for the request. /// - logger: Logger use during operation @inlinable public func invokeFlow( @@ -274,13 +281,15 @@ public struct BedrockAgentRuntime: AWSService { flowAliasIdentifier: String, flowIdentifier: String, inputs: [FlowInput], + modelPerformanceConfiguration: ModelPerformanceConfiguration? = nil, logger: Logger = AWSClient.loggingDisabled ) async throws -> InvokeFlowResponse { let input = InvokeFlowRequest( enableTrace: enableTrace, flowAliasIdentifier: flowAliasIdentifier, flowIdentifier: flowIdentifier, - inputs: inputs + inputs: inputs, + modelPerformanceConfiguration: modelPerformanceConfiguration ) return try await self.invokeFlow(input, logger: logger) } @@ -302,6 +311,7 @@ public struct BedrockAgentRuntime: AWSService { /// /// Parameters: /// - actionGroups: A list of action groups with each action group defining the action the inline agent needs to carry out. + /// - bedrockModelConfigurations: Model settings for the request. /// - customerEncryptionKeyArn: The Amazon Resource Name (ARN) of the Amazon Web Services KMS key to use to encrypt your inline agent. /// - enableTrace: Specifies whether to turn on the trace or not to track the agent's reasoning process. For more information, see Using trace. /// - endSession: Specifies whether to end the session with the inline agent or not. @@ -318,6 +328,7 @@ public struct BedrockAgentRuntime: AWSService { @inlinable public func invokeInlineAgent( actionGroups: [AgentActionGroup]? = nil, + bedrockModelConfigurations: InlineBedrockModelConfigurations? = nil, customerEncryptionKeyArn: String? = nil, enableTrace: Bool? = nil, endSession: Bool? = nil, @@ -334,6 +345,7 @@ public struct BedrockAgentRuntime: AWSService { ) async throws -> InvokeInlineAgentResponse { let input = InvokeInlineAgentRequest( actionGroups: actionGroups, + bedrockModelConfigurations: bedrockModelConfigurations, customerEncryptionKeyArn: customerEncryptionKeyArn, enableTrace: enableTrace, endSession: endSession, diff --git a/Sources/Soto/Services/BedrockAgentRuntime/BedrockAgentRuntime_shapes.swift b/Sources/Soto/Services/BedrockAgentRuntime/BedrockAgentRuntime_shapes.swift index 89753a0e36..a569d58d83 100644 --- a/Sources/Soto/Services/BedrockAgentRuntime/BedrockAgentRuntime_shapes.swift +++ b/Sources/Soto/Services/BedrockAgentRuntime/BedrockAgentRuntime_shapes.swift @@ -245,6 +245,12 @@ extension BedrockAgentRuntime { public var description: String { return self.rawValue } } + public enum PerformanceConfigLatency: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case optimized = "optimized" + case standard = "standard" + public var description: String { return self.rawValue } + } + public enum PromptState: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case disabled = "DISABLED" case enabled = "ENABLED" @@ -963,6 +969,8 @@ extension BedrockAgentRuntime { case files(FilePart) /// An internal server error occurred. Retry your request. case internalServerException(InternalServerException) + /// The model specified in the request is not ready to serve Inference requests. The AWS SDK will automatically retry the operation up to 5 times. For information about configuring automatic retries, see Retry behavior in the AWS SDKs and Tools reference guide. + case modelNotReadyException(ModelNotReadyException) /// The specified resource Amazon Resource Name (ARN) was not found. Check the Amazon Resource Name (ARN) and try your request again. case resourceNotFoundException(ResourceNotFoundException) /// Contains the parameters and information that the agent elicited from the customer to carry out an action. This information is returned to the system and can be used in your own setup for fulfilling the action. @@ -1007,6 +1015,9 @@ extension BedrockAgentRuntime { case .internalServerException: let value = try container.decode(InternalServerException.self, forKey: .internalServerException) self = .internalServerException(value) + case .modelNotReadyException: + let value = try container.decode(ModelNotReadyException.self, forKey: .modelNotReadyException) + self = .modelNotReadyException(value) case .resourceNotFoundException: let value = try container.decode(ResourceNotFoundException.self, forKey: .resourceNotFoundException) self = .resourceNotFoundException(value) @@ -1036,6 +1047,7 @@ extension BedrockAgentRuntime { case dependencyFailedException = "dependencyFailedException" case files = "files" case internalServerException = "internalServerException" + case modelNotReadyException = "modelNotReadyException" case resourceNotFoundException = "resourceNotFoundException" case returnControl = "returnControl" case serviceQuotaExceededException = "serviceQuotaExceededException" @@ -1721,6 +1733,20 @@ extension BedrockAgentRuntime { } } + public struct BedrockModelConfigurations: AWSEncodableShape { + /// The performance configuration for the model. + public let performanceConfig: PerformanceConfiguration? + + @inlinable + public init(performanceConfig: PerformanceConfiguration? = nil) { + self.performanceConfig = performanceConfig + } + + private enum CodingKeys: String, CodingKey { + case performanceConfig = "performanceConfig" + } + } + public struct BedrockRerankingConfiguration: AWSEncodableShape { /// Contains configurations for a reranker model. public let modelConfiguration: BedrockRerankingModelConfiguration @@ -1980,12 +2006,15 @@ extension BedrockAgentRuntime { public let agentId: String /// The unique identifier of the memory. public let memoryId: String? + /// The unique session identifier of the memory. + public let sessionId: String? @inlinable - public init(agentAliasId: String, agentId: String, memoryId: String? = nil) { + public init(agentAliasId: String, agentId: String, memoryId: String? = nil, sessionId: String? = nil) { self.agentAliasId = agentAliasId self.agentId = agentId self.memoryId = memoryId + self.sessionId = sessionId } public func encode(to encoder: Encoder) throws { @@ -1994,6 +2023,7 @@ extension BedrockAgentRuntime { request.encodePath(self.agentAliasId, key: "agentAliasId") request.encodePath(self.agentId, key: "agentId") request.encodeQuery(self.memoryId, key: "memoryId") + request.encodeQuery(self.sessionId, key: "sessionId") } public func validate(name: String) throws { @@ -2004,6 +2034,9 @@ extension BedrockAgentRuntime { try self.validate(self.memoryId, name: "memoryId", parent: name, max: 100) try self.validate(self.memoryId, name: "memoryId", parent: name, min: 2) try self.validate(self.memoryId, name: "memoryId", parent: name, pattern: "^[0-9a-zA-Z._:-]+$") + try self.validate(self.sessionId, name: "sessionId", parent: name, max: 100) + try self.validate(self.sessionId, name: "sessionId", parent: name, min: 2) + try self.validate(self.sessionId, name: "sessionId", parent: name, pattern: "^[0-9a-zA-Z._:-]+$") } private enum CodingKeys: CodingKey {} @@ -2064,14 +2097,17 @@ extension BedrockAgentRuntime { public let guardrailConfiguration: GuardrailConfiguration? /// Configuration settings for inference when using RetrieveAndGenerate to generate responses while using an external source. public let inferenceConfig: InferenceConfig? + /// The latency configuration for the model. + public let performanceConfig: PerformanceConfiguration? /// Contain the textPromptTemplate string for the external source wrapper object. public let promptTemplate: PromptTemplate? @inlinable - public init(additionalModelRequestFields: [String: String]? = nil, guardrailConfiguration: GuardrailConfiguration? = nil, inferenceConfig: InferenceConfig? = nil, promptTemplate: PromptTemplate? = nil) { + public init(additionalModelRequestFields: [String: String]? = nil, guardrailConfiguration: GuardrailConfiguration? = nil, inferenceConfig: InferenceConfig? = nil, performanceConfig: PerformanceConfiguration? = nil, promptTemplate: PromptTemplate? = nil) { self.additionalModelRequestFields = additionalModelRequestFields self.guardrailConfiguration = guardrailConfiguration self.inferenceConfig = inferenceConfig + self.performanceConfig = performanceConfig self.promptTemplate = promptTemplate } @@ -2088,6 +2124,7 @@ extension BedrockAgentRuntime { case additionalModelRequestFields = "additionalModelRequestFields" case guardrailConfiguration = "guardrailConfiguration" case inferenceConfig = "inferenceConfig" + case performanceConfig = "performanceConfig" case promptTemplate = "promptTemplate" } } @@ -2633,14 +2670,17 @@ extension BedrockAgentRuntime { public let guardrailConfiguration: GuardrailConfiguration? /// Configuration settings for inference when using RetrieveAndGenerate to generate responses while using a knowledge base as a source. public let inferenceConfig: InferenceConfig? + /// The latency configuration for the model. + public let performanceConfig: PerformanceConfiguration? /// Contains the template for the prompt that's sent to the model for response generation. Generation prompts must include the $search_results$ variable. For more information, see Use placeholder variables in the user guide. public let promptTemplate: PromptTemplate? @inlinable - public init(additionalModelRequestFields: [String: String]? = nil, guardrailConfiguration: GuardrailConfiguration? = nil, inferenceConfig: InferenceConfig? = nil, promptTemplate: PromptTemplate? = nil) { + public init(additionalModelRequestFields: [String: String]? = nil, guardrailConfiguration: GuardrailConfiguration? = nil, inferenceConfig: InferenceConfig? = nil, performanceConfig: PerformanceConfiguration? = nil, promptTemplate: PromptTemplate? = nil) { self.additionalModelRequestFields = additionalModelRequestFields self.guardrailConfiguration = guardrailConfiguration self.inferenceConfig = inferenceConfig + self.performanceConfig = performanceConfig self.promptTemplate = promptTemplate } @@ -2657,6 +2697,7 @@ extension BedrockAgentRuntime { case additionalModelRequestFields = "additionalModelRequestFields" case guardrailConfiguration = "guardrailConfiguration" case inferenceConfig = "inferenceConfig" + case performanceConfig = "performanceConfig" case promptTemplate = "promptTemplate" } } @@ -3192,6 +3233,20 @@ extension BedrockAgentRuntime { } } + public struct InlineBedrockModelConfigurations: AWSEncodableShape { + /// The latency configuration for the model. + public let performanceConfig: PerformanceConfiguration? + + @inlinable + public init(performanceConfig: PerformanceConfiguration? = nil) { + self.performanceConfig = performanceConfig + } + + private enum CodingKeys: String, CodingKey { + case performanceConfig = "performanceConfig" + } + } + public struct InlineSessionState: AWSEncodableShape { /// Contains information about the files used by code interpreter. public let files: [InputFile]? @@ -3308,6 +3363,8 @@ extension BedrockAgentRuntime { public let agentAliasId: String /// The unique identifier of the agent to use. public let agentId: String + /// Model performance settings for the request. + public let bedrockModelConfigurations: BedrockModelConfigurations? /// Specifies whether to turn on the trace or not to track the agent's reasoning process. For more information, see Trace enablement. public let enableTrace: Bool? /// Specifies whether to end the session with the agent or not. @@ -3322,13 +3379,14 @@ extension BedrockAgentRuntime { public let sessionState: SessionState? /// The ARN of the resource making the request. public let sourceArn: String? - /// Specifies the configurations for streaming. + /// Specifies the configurations for streaming. To use agent streaming, you need permissions to perform the bedrock:InvokeModelWithResponseStream action. public let streamingConfigurations: StreamingConfigurations? @inlinable - public init(agentAliasId: String, agentId: String, enableTrace: Bool? = nil, endSession: Bool? = nil, inputText: String? = nil, memoryId: String? = nil, sessionId: String, sessionState: SessionState? = nil, sourceArn: String? = nil, streamingConfigurations: StreamingConfigurations? = nil) { + public init(agentAliasId: String, agentId: String, bedrockModelConfigurations: BedrockModelConfigurations? = nil, enableTrace: Bool? = nil, endSession: Bool? = nil, inputText: String? = nil, memoryId: String? = nil, sessionId: String, sessionState: SessionState? = nil, sourceArn: String? = nil, streamingConfigurations: StreamingConfigurations? = nil) { self.agentAliasId = agentAliasId self.agentId = agentId + self.bedrockModelConfigurations = bedrockModelConfigurations self.enableTrace = enableTrace self.endSession = endSession self.inputText = inputText @@ -3344,6 +3402,7 @@ extension BedrockAgentRuntime { var container = encoder.container(keyedBy: CodingKeys.self) request.encodePath(self.agentAliasId, key: "agentAliasId") request.encodePath(self.agentId, key: "agentId") + try container.encodeIfPresent(self.bedrockModelConfigurations, forKey: .bedrockModelConfigurations) try container.encodeIfPresent(self.enableTrace, forKey: .enableTrace) try container.encodeIfPresent(self.endSession, forKey: .endSession) try container.encodeIfPresent(self.inputText, forKey: .inputText) @@ -3372,6 +3431,7 @@ extension BedrockAgentRuntime { } private enum CodingKeys: String, CodingKey { + case bedrockModelConfigurations = "bedrockModelConfigurations" case enableTrace = "enableTrace" case endSession = "endSession" case inputText = "inputText" @@ -3421,13 +3481,16 @@ extension BedrockAgentRuntime { public let flowIdentifier: String /// A list of objects, each containing information about an input into the flow. public let inputs: [FlowInput] + /// Model performance settings for the request. + public let modelPerformanceConfiguration: ModelPerformanceConfiguration? @inlinable - public init(enableTrace: Bool? = nil, flowAliasIdentifier: String, flowIdentifier: String, inputs: [FlowInput]) { + public init(enableTrace: Bool? = nil, flowAliasIdentifier: String, flowIdentifier: String, inputs: [FlowInput], modelPerformanceConfiguration: ModelPerformanceConfiguration? = nil) { self.enableTrace = enableTrace self.flowAliasIdentifier = flowAliasIdentifier self.flowIdentifier = flowIdentifier self.inputs = inputs + self.modelPerformanceConfiguration = modelPerformanceConfiguration } public func encode(to encoder: Encoder) throws { @@ -3437,6 +3500,7 @@ extension BedrockAgentRuntime { request.encodePath(self.flowAliasIdentifier, key: "flowAliasIdentifier") request.encodePath(self.flowIdentifier, key: "flowIdentifier") try container.encode(self.inputs, forKey: .inputs) + try container.encodeIfPresent(self.modelPerformanceConfiguration, forKey: .modelPerformanceConfiguration) } public func validate(name: String) throws { @@ -3454,6 +3518,7 @@ extension BedrockAgentRuntime { private enum CodingKeys: String, CodingKey { case enableTrace = "enableTrace" case inputs = "inputs" + case modelPerformanceConfiguration = "modelPerformanceConfiguration" } } @@ -3478,6 +3543,8 @@ extension BedrockAgentRuntime { public struct InvokeInlineAgentRequest: AWSEncodableShape { /// A list of action groups with each action group defining the action the inline agent needs to carry out. public let actionGroups: [AgentActionGroup]? + /// Model settings for the request. + public let bedrockModelConfigurations: InlineBedrockModelConfigurations? /// The Amazon Resource Name (ARN) of the Amazon Web Services KMS key to use to encrypt your inline agent. public let customerEncryptionKeyArn: String? /// Specifies whether to turn on the trace or not to track the agent's reasoning process. For more information, see Using trace. @@ -3504,8 +3571,9 @@ extension BedrockAgentRuntime { public let sessionId: String @inlinable - public init(actionGroups: [AgentActionGroup]? = nil, customerEncryptionKeyArn: String? = nil, enableTrace: Bool? = nil, endSession: Bool? = nil, foundationModel: String, guardrailConfiguration: GuardrailConfigurationWithArn? = nil, idleSessionTTLInSeconds: Int? = nil, inlineSessionState: InlineSessionState? = nil, inputText: String? = nil, instruction: String, knowledgeBases: [KnowledgeBase]? = nil, promptOverrideConfiguration: PromptOverrideConfiguration? = nil, sessionId: String) { + public init(actionGroups: [AgentActionGroup]? = nil, bedrockModelConfigurations: InlineBedrockModelConfigurations? = nil, customerEncryptionKeyArn: String? = nil, enableTrace: Bool? = nil, endSession: Bool? = nil, foundationModel: String, guardrailConfiguration: GuardrailConfigurationWithArn? = nil, idleSessionTTLInSeconds: Int? = nil, inlineSessionState: InlineSessionState? = nil, inputText: String? = nil, instruction: String, knowledgeBases: [KnowledgeBase]? = nil, promptOverrideConfiguration: PromptOverrideConfiguration? = nil, sessionId: String) { self.actionGroups = actionGroups + self.bedrockModelConfigurations = bedrockModelConfigurations self.customerEncryptionKeyArn = customerEncryptionKeyArn self.enableTrace = enableTrace self.endSession = endSession @@ -3524,6 +3592,7 @@ extension BedrockAgentRuntime { let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer var container = encoder.container(keyedBy: CodingKeys.self) try container.encodeIfPresent(self.actionGroups, forKey: .actionGroups) + try container.encodeIfPresent(self.bedrockModelConfigurations, forKey: .bedrockModelConfigurations) try container.encodeIfPresent(self.customerEncryptionKeyArn, forKey: .customerEncryptionKeyArn) try container.encodeIfPresent(self.enableTrace, forKey: .enableTrace) try container.encodeIfPresent(self.endSession, forKey: .endSession) @@ -3565,6 +3634,7 @@ extension BedrockAgentRuntime { private enum CodingKeys: String, CodingKey { case actionGroups = "actionGroups" + case bedrockModelConfigurations = "bedrockModelConfigurations" case customerEncryptionKeyArn = "customerEncryptionKeyArn" case enableTrace = "enableTrace" case endSession = "endSession" @@ -3977,6 +4047,33 @@ extension BedrockAgentRuntime { } } + public struct ModelNotReadyException: AWSDecodableShape { + public let message: String? + + @inlinable + public init(message: String? = nil) { + self.message = message + } + + private enum CodingKeys: String, CodingKey { + case message = "message" + } + } + + public struct ModelPerformanceConfiguration: AWSEncodableShape { + /// The latency configuration for the model. + public let performanceConfig: PerformanceConfiguration? + + @inlinable + public init(performanceConfig: PerformanceConfiguration? = nil) { + self.performanceConfig = performanceConfig + } + + private enum CodingKeys: String, CodingKey { + case performanceConfig = "performanceConfig" + } + } + public struct Observation: AWSDecodableShape { /// Contains the JSON-formatted string returned by the API invoked by the action group. public let actionGroupInvocationOutput: ActionGroupInvocationOutput? @@ -4074,15 +4171,18 @@ extension BedrockAgentRuntime { public let additionalModelRequestFields: [String: String]? /// Configuration settings for inference when using RetrieveAndGenerate to generate responses while using a knowledge base as a source. public let inferenceConfig: InferenceConfig? + /// The latency configuration for the model. + public let performanceConfig: PerformanceConfiguration? /// Contains the template for the prompt that's sent to the model. Orchestration prompts must include the $conversation_history$ and $output_format_instructions$ variables. For more information, see Use placeholder variables in the user guide. public let promptTemplate: PromptTemplate? /// To split up the prompt and retrieve multiple sources, set the transformation type to QUERY_DECOMPOSITION. public let queryTransformationConfiguration: QueryTransformationConfiguration? @inlinable - public init(additionalModelRequestFields: [String: String]? = nil, inferenceConfig: InferenceConfig? = nil, promptTemplate: PromptTemplate? = nil, queryTransformationConfiguration: QueryTransformationConfiguration? = nil) { + public init(additionalModelRequestFields: [String: String]? = nil, inferenceConfig: InferenceConfig? = nil, performanceConfig: PerformanceConfiguration? = nil, promptTemplate: PromptTemplate? = nil, queryTransformationConfiguration: QueryTransformationConfiguration? = nil) { self.additionalModelRequestFields = additionalModelRequestFields self.inferenceConfig = inferenceConfig + self.performanceConfig = performanceConfig self.promptTemplate = promptTemplate self.queryTransformationConfiguration = queryTransformationConfiguration } @@ -4099,6 +4199,7 @@ extension BedrockAgentRuntime { private enum CodingKeys: String, CodingKey { case additionalModelRequestFields = "additionalModelRequestFields" case inferenceConfig = "inferenceConfig" + case performanceConfig = "performanceConfig" case promptTemplate = "promptTemplate" case queryTransformationConfiguration = "queryTransformationConfiguration" } @@ -4215,6 +4316,20 @@ extension BedrockAgentRuntime { } } + public struct PerformanceConfiguration: AWSEncodableShape { + /// To use a latency-optimized version of the model, set to optimized. + public let latency: PerformanceConfigLatency? + + @inlinable + public init(latency: PerformanceConfigLatency? = nil) { + self.latency = latency + } + + private enum CodingKeys: String, CodingKey { + case latency = "latency" + } + } + public struct PostProcessingModelInvocationOutput: AWSDecodableShape { /// Contains information about the foundation model output from the post-processing step. public let metadata: Metadata? @@ -5854,6 +5969,7 @@ public struct BedrockAgentRuntimeErrorType: AWSErrorType { case conflictException = "ConflictException" case dependencyFailedException = "DependencyFailedException" case internalServerException = "InternalServerException" + case modelNotReadyException = "ModelNotReadyException" case resourceNotFoundException = "ResourceNotFoundException" case serviceQuotaExceededException = "ServiceQuotaExceededException" case throttlingException = "ThrottlingException" @@ -5888,6 +6004,8 @@ public struct BedrockAgentRuntimeErrorType: AWSErrorType { public static var dependencyFailedException: Self { .init(.dependencyFailedException) } /// An internal server error occurred. Retry your request. public static var internalServerException: Self { .init(.internalServerException) } + /// The model specified in the request is not ready to serve inference requests. The AWS SDK will automatically retry the operation up to 5 times. For information about configuring automatic retries, see Retry behavior in the AWS SDKs and Tools reference guide. + public static var modelNotReadyException: Self { .init(.modelNotReadyException) } /// The specified resource Amazon Resource Name (ARN) was not found. Check the Amazon Resource Name (ARN) and try your request again. public static var resourceNotFoundException: Self { .init(.resourceNotFoundException) } /// The number of requests exceeds the service quota. Resubmit your request later. diff --git a/Sources/Soto/Services/BedrockDataAutomation/BedrockDataAutomation_api.swift b/Sources/Soto/Services/BedrockDataAutomation/BedrockDataAutomation_api.swift index 535abd9495..fa42f1dce2 100644 --- a/Sources/Soto/Services/BedrockDataAutomation/BedrockDataAutomation_api.swift +++ b/Sources/Soto/Services/BedrockDataAutomation/BedrockDataAutomation_api.swift @@ -25,7 +25,7 @@ import Foundation /// Service object for interacting with AWS BedrockDataAutomation service. /// -/// Amazon Bedrock Keystone Build +/// Amazon Bedrock Data Automation BuildTime public struct BedrockDataAutomation: AWSService { // MARK: Member variables @@ -80,7 +80,7 @@ public struct BedrockDataAutomation: AWSService { // MARK: API Calls - /// Creates an Amazon Bedrock Keystone Blueprint + /// Creates an Amazon Bedrock Data Automation Blueprint @Sendable @inlinable public func createBlueprint(_ input: CreateBlueprintRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateBlueprintResponse { @@ -93,7 +93,7 @@ public struct BedrockDataAutomation: AWSService { logger: logger ) } - /// Creates an Amazon Bedrock Keystone Blueprint + /// Creates an Amazon Bedrock Data Automation Blueprint /// /// Parameters: /// - blueprintName: @@ -124,7 +124,7 @@ public struct BedrockDataAutomation: AWSService { return try await self.createBlueprint(input, logger: logger) } - /// Creates a new version of an existing Amazon Bedrock Keystone Blueprint + /// Creates a new version of an existing Amazon Bedrock Data Automation Blueprint @Sendable @inlinable public func createBlueprintVersion(_ input: CreateBlueprintVersionRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateBlueprintVersionResponse { @@ -137,7 +137,7 @@ public struct BedrockDataAutomation: AWSService { logger: logger ) } - /// Creates a new version of an existing Amazon Bedrock Keystone Blueprint + /// Creates a new version of an existing Amazon Bedrock Data Automation Blueprint /// /// Parameters: /// - blueprintArn: ARN generated at the server side when a Blueprint is created @@ -156,7 +156,7 @@ public struct BedrockDataAutomation: AWSService { return try await self.createBlueprintVersion(input, logger: logger) } - /// Creates an Amazon Bedrock Keystone DataAutomationProject + /// Creates an Amazon Bedrock Data Automation Project @Sendable @inlinable public func createDataAutomationProject(_ input: CreateDataAutomationProjectRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateDataAutomationProjectResponse { @@ -169,7 +169,7 @@ public struct BedrockDataAutomation: AWSService { logger: logger ) } - /// Creates an Amazon Bedrock Keystone DataAutomationProject + /// Creates an Amazon Bedrock Data Automation Project /// /// Parameters: /// - clientToken: @@ -206,7 +206,7 @@ public struct BedrockDataAutomation: AWSService { return try await self.createDataAutomationProject(input, logger: logger) } - /// Deletes an existing Amazon Bedrock Keystone Blueprint + /// Deletes an existing Amazon Bedrock Data Automation Blueprint @Sendable @inlinable public func deleteBlueprint(_ input: DeleteBlueprintRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DeleteBlueprintResponse { @@ -219,7 +219,7 @@ public struct BedrockDataAutomation: AWSService { logger: logger ) } - /// Deletes an existing Amazon Bedrock Keystone Blueprint + /// Deletes an existing Amazon Bedrock Data Automation Blueprint /// /// Parameters: /// - blueprintArn: ARN generated at the server side when a Blueprint is created @@ -238,7 +238,7 @@ public struct BedrockDataAutomation: AWSService { return try await self.deleteBlueprint(input, logger: logger) } - /// Deletes an existing Amazon Bedrock Keystone DataAutomationProject + /// Deletes an existing Amazon Bedrock Data Automation Project @Sendable @inlinable public func deleteDataAutomationProject(_ input: DeleteDataAutomationProjectRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DeleteDataAutomationProjectResponse { @@ -251,7 +251,7 @@ public struct BedrockDataAutomation: AWSService { logger: logger ) } - /// Deletes an existing Amazon Bedrock Keystone DataAutomationProject + /// Deletes an existing Amazon Bedrock Data Automation Project /// /// Parameters: /// - projectArn: ARN generated at the server side when a DataAutomationProject is created @@ -267,7 +267,7 @@ public struct BedrockDataAutomation: AWSService { return try await self.deleteDataAutomationProject(input, logger: logger) } - /// Gets an existing Amazon Bedrock Keystone Blueprint + /// Gets an existing Amazon Bedrock Data Automation Blueprint @Sendable @inlinable public func getBlueprint(_ input: GetBlueprintRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetBlueprintResponse { @@ -280,7 +280,7 @@ public struct BedrockDataAutomation: AWSService { logger: logger ) } - /// Gets an existing Amazon Bedrock Keystone Blueprint + /// Gets an existing Amazon Bedrock Data Automation Blueprint /// /// Parameters: /// - blueprintArn: ARN generated at the server side when a Blueprint is created @@ -302,7 +302,7 @@ public struct BedrockDataAutomation: AWSService { return try await self.getBlueprint(input, logger: logger) } - /// Gets an existing Amazon Bedrock Keystone DataAutomationProject + /// Gets an existing Amazon Bedrock Data Automation Project @Sendable @inlinable public func getDataAutomationProject(_ input: GetDataAutomationProjectRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetDataAutomationProjectResponse { @@ -315,7 +315,7 @@ public struct BedrockDataAutomation: AWSService { logger: logger ) } - /// Gets an existing Amazon Bedrock Keystone DataAutomationProject + /// Gets an existing Amazon Bedrock Data Automation Project /// /// Parameters: /// - projectArn: ARN generated at the server side when a DataAutomationProject is created @@ -334,7 +334,7 @@ public struct BedrockDataAutomation: AWSService { return try await self.getDataAutomationProject(input, logger: logger) } - /// Lists all existing Amazon Bedrock Keystone Blueprints + /// Lists all existing Amazon Bedrock Data Automation Blueprints @Sendable @inlinable public func listBlueprints(_ input: ListBlueprintsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListBlueprintsResponse { @@ -347,7 +347,7 @@ public struct BedrockDataAutomation: AWSService { logger: logger ) } - /// Lists all existing Amazon Bedrock Keystone Blueprints + /// Lists all existing Amazon Bedrock Data Automation Blueprints /// /// Parameters: /// - blueprintArn: @@ -378,7 +378,7 @@ public struct BedrockDataAutomation: AWSService { return try await self.listBlueprints(input, logger: logger) } - /// Lists all existing Amazon Bedrock Keystone DataAutomationProjects + /// Lists all existing Amazon Bedrock Data Automation Projects @Sendable @inlinable public func listDataAutomationProjects(_ input: ListDataAutomationProjectsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListDataAutomationProjectsResponse { @@ -391,7 +391,7 @@ public struct BedrockDataAutomation: AWSService { logger: logger ) } - /// Lists all existing Amazon Bedrock Keystone DataAutomationProjects + /// Lists all existing Amazon Bedrock Data Automation Projects /// /// Parameters: /// - blueprintFilter: @@ -419,7 +419,7 @@ public struct BedrockDataAutomation: AWSService { return try await self.listDataAutomationProjects(input, logger: logger) } - /// Updates an existing Amazon Bedrock Blueprint + /// Updates an existing Amazon Bedrock Data Automation Blueprint @Sendable @inlinable public func updateBlueprint(_ input: UpdateBlueprintRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateBlueprintResponse { @@ -432,7 +432,7 @@ public struct BedrockDataAutomation: AWSService { logger: logger ) } - /// Updates an existing Amazon Bedrock Blueprint + /// Updates an existing Amazon Bedrock Data Automation Blueprint /// /// Parameters: /// - blueprintArn: ARN generated at the server side when a Blueprint is created @@ -454,7 +454,7 @@ public struct BedrockDataAutomation: AWSService { return try await self.updateBlueprint(input, logger: logger) } - /// Updates an existing Amazon Bedrock DataAutomationProject + /// Updates an existing Amazon Bedrock Data Automation Project @Sendable @inlinable public func updateDataAutomationProject(_ input: UpdateDataAutomationProjectRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateDataAutomationProjectResponse { @@ -467,7 +467,7 @@ public struct BedrockDataAutomation: AWSService { logger: logger ) } - /// Updates an existing Amazon Bedrock DataAutomationProject + /// Updates an existing Amazon Bedrock Data Automation Project /// /// Parameters: /// - customOutputConfiguration: diff --git a/Sources/Soto/Services/BedrockDataAutomationRuntime/BedrockDataAutomationRuntime_api.swift b/Sources/Soto/Services/BedrockDataAutomationRuntime/BedrockDataAutomationRuntime_api.swift index e1be16cbd4..e9c64b3d3f 100644 --- a/Sources/Soto/Services/BedrockDataAutomationRuntime/BedrockDataAutomationRuntime_api.swift +++ b/Sources/Soto/Services/BedrockDataAutomationRuntime/BedrockDataAutomationRuntime_api.swift @@ -25,7 +25,7 @@ import Foundation /// Service object for interacting with AWS BedrockDataAutomationRuntime service. /// -/// Amazon Bedrock Keystone Runtime +/// Amazon Bedrock Data Automation Runtime public struct BedrockDataAutomationRuntime: AWSService { // MARK: Member variables diff --git a/Sources/Soto/Services/Billing/Billing_api.swift b/Sources/Soto/Services/Billing/Billing_api.swift index e4cacaf9fc..25000817d9 100644 --- a/Sources/Soto/Services/Billing/Billing_api.swift +++ b/Sources/Soto/Services/Billing/Billing_api.swift @@ -80,6 +80,137 @@ public struct Billing: AWSService { // MARK: API Calls + /// Creates a billing view with the specified billing view attributes. + @Sendable + @inlinable + public func createBillingView(_ input: CreateBillingViewRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateBillingViewResponse { + try await self.client.execute( + operation: "CreateBillingView", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Creates a billing view with the specified billing view attributes. + /// + /// Parameters: + /// - clientToken: A unique, case-sensitive identifier you specify to ensure idempotency of the request. Idempotency ensures that an API request completes no more than one time. If the original request completes successfully, any subsequent retries complete successfully without performing any further actions with an idempotent request. + /// - dataFilterExpression: See Expression. Billing view only supports LINKED_ACCOUNT and Tags. + /// - description: The description of the billing view. + /// - name: The name of the billing view. + /// - resourceTags: A list of key value map specifying tags associated to the billing view being created. + /// - sourceViews: A list of billing views used as the data source for the custom billing view. + /// - logger: Logger use during operation + @inlinable + public func createBillingView( + clientToken: String? = CreateBillingViewRequest.idempotencyToken(), + dataFilterExpression: Expression? = nil, + description: String? = nil, + name: String, + resourceTags: [ResourceTag]? = nil, + sourceViews: [String], + logger: Logger = AWSClient.loggingDisabled + ) async throws -> CreateBillingViewResponse { + let input = CreateBillingViewRequest( + clientToken: clientToken, + dataFilterExpression: dataFilterExpression, + description: description, + name: name, + resourceTags: resourceTags, + sourceViews: sourceViews + ) + return try await self.createBillingView(input, logger: logger) + } + + /// Deletes the specified billing view. + @Sendable + @inlinable + public func deleteBillingView(_ input: DeleteBillingViewRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DeleteBillingViewResponse { + try await self.client.execute( + operation: "DeleteBillingView", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Deletes the specified billing view. + /// + /// Parameters: + /// - arn: The Amazon Resource Name (ARN) that can be used to uniquely identify the billing view. + /// - logger: Logger use during operation + @inlinable + public func deleteBillingView( + arn: String, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> DeleteBillingViewResponse { + let input = DeleteBillingViewRequest( + arn: arn + ) + return try await self.deleteBillingView(input, logger: logger) + } + + /// Returns the metadata associated to the specified billing view ARN. + @Sendable + @inlinable + public func getBillingView(_ input: GetBillingViewRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetBillingViewResponse { + try await self.client.execute( + operation: "GetBillingView", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Returns the metadata associated to the specified billing view ARN. + /// + /// Parameters: + /// - arn: The Amazon Resource Name (ARN) that can be used to uniquely identify the billing view. + /// - logger: Logger use during operation + @inlinable + public func getBillingView( + arn: String, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> GetBillingViewResponse { + let input = GetBillingViewRequest( + arn: arn + ) + return try await self.getBillingView(input, logger: logger) + } + + /// Returns the resource-based policy document attached to the resource in JSON format. + @Sendable + @inlinable + public func getResourcePolicy(_ input: GetResourcePolicyRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetResourcePolicyResponse { + try await self.client.execute( + operation: "GetResourcePolicy", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Returns the resource-based policy document attached to the resource in JSON format. + /// + /// Parameters: + /// - resourceArn: The Amazon Resource Name (ARN) of the billing view resource to which the policy is attached to. + /// - logger: Logger use during operation + @inlinable + public func getResourcePolicy( + resourceArn: String, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> GetResourcePolicyResponse { + let input = GetResourcePolicyRequest( + resourceArn: resourceArn + ) + return try await self.getResourcePolicy(input, logger: logger) + } + /// Lists the billing views available for a given time period. Every Amazon Web Services account has a unique PRIMARY billing view that represents the billing data available by default. Accounts that use Billing Conductor also have BILLING_GROUP billing views representing pro forma costs associated with each created billing group. @Sendable @inlinable @@ -97,23 +228,198 @@ public struct Billing: AWSService { /// /// Parameters: /// - activeTimeRange: The time range for the billing views listed. PRIMARY billing view is always listed. BILLING_GROUP billing views are listed for time ranges when the associated billing group resource in Billing Conductor is active. The time range must be within one calendar month. + /// - arns: The Amazon Resource Name (ARN) that can be used to uniquely identify the billing view. + /// - billingViewTypes: The type of billing view. /// - maxResults: The maximum number of billing views to retrieve. Default is 100. /// - nextToken: The pagination token that is used on subsequent calls to list billing views. + /// - ownerAccountId: The list of owners of the billing view. /// - logger: Logger use during operation @inlinable public func listBillingViews( - activeTimeRange: ActiveTimeRange, + activeTimeRange: ActiveTimeRange? = nil, + arns: [String]? = nil, + billingViewTypes: [BillingViewType]? = nil, maxResults: Int? = nil, nextToken: String? = nil, + ownerAccountId: String? = nil, logger: Logger = AWSClient.loggingDisabled ) async throws -> ListBillingViewsResponse { let input = ListBillingViewsRequest( activeTimeRange: activeTimeRange, + arns: arns, + billingViewTypes: billingViewTypes, maxResults: maxResults, - nextToken: nextToken + nextToken: nextToken, + ownerAccountId: ownerAccountId ) return try await self.listBillingViews(input, logger: logger) } + + /// Lists the source views (managed Amazon Web Services billing views) associated with the billing view. + @Sendable + @inlinable + public func listSourceViewsForBillingView(_ input: ListSourceViewsForBillingViewRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListSourceViewsForBillingViewResponse { + try await self.client.execute( + operation: "ListSourceViewsForBillingView", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Lists the source views (managed Amazon Web Services billing views) associated with the billing view. + /// + /// Parameters: + /// - arn: The Amazon Resource Name (ARN) that can be used to uniquely identify the billing view. + /// - maxResults: The number of entries a paginated response contains. + /// - nextToken: The pagination token that is used on subsequent calls to list billing views. + /// - logger: Logger use during operation + @inlinable + public func listSourceViewsForBillingView( + arn: String, + maxResults: Int? = nil, + nextToken: String? = nil, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> ListSourceViewsForBillingViewResponse { + let input = ListSourceViewsForBillingViewRequest( + arn: arn, + maxResults: maxResults, + nextToken: nextToken + ) + return try await self.listSourceViewsForBillingView(input, logger: logger) + } + + /// Lists tags associated with the billing view resource. + @Sendable + @inlinable + public func listTagsForResource(_ input: ListTagsForResourceRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListTagsForResourceResponse { + try await self.client.execute( + operation: "ListTagsForResource", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Lists tags associated with the billing view resource. + /// + /// Parameters: + /// - resourceArn: The Amazon Resource Name (ARN) of the resource. + /// - logger: Logger use during operation + @inlinable + public func listTagsForResource( + resourceArn: String, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> ListTagsForResourceResponse { + let input = ListTagsForResourceRequest( + resourceArn: resourceArn + ) + return try await self.listTagsForResource(input, logger: logger) + } + + /// An API operation for adding one or more tags (key-value pairs) to a resource. + @Sendable + @inlinable + public func tagResource(_ input: TagResourceRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> TagResourceResponse { + try await self.client.execute( + operation: "TagResource", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// An API operation for adding one or more tags (key-value pairs) to a resource. + /// + /// Parameters: + /// - resourceArn: The Amazon Resource Name (ARN) of the resource. + /// - resourceTags: A list of tag key value pairs that are associated with the resource. + /// - logger: Logger use during operation + @inlinable + public func tagResource( + resourceArn: String, + resourceTags: [ResourceTag], + logger: Logger = AWSClient.loggingDisabled + ) async throws -> TagResourceResponse { + let input = TagResourceRequest( + resourceArn: resourceArn, + resourceTags: resourceTags + ) + return try await self.tagResource(input, logger: logger) + } + + /// Removes one or more tags from a resource. Specify only tag keys in your request. Don't specify the value. + @Sendable + @inlinable + public func untagResource(_ input: UntagResourceRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UntagResourceResponse { + try await self.client.execute( + operation: "UntagResource", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Removes one or more tags from a resource. Specify only tag keys in your request. Don't specify the value. + /// + /// Parameters: + /// - resourceArn: The Amazon Resource Name (ARN) of the resource. + /// - resourceTagKeys: A list of tag key value pairs that are associated with the resource. + /// - logger: Logger use during operation + @inlinable + public func untagResource( + resourceArn: String, + resourceTagKeys: [String], + logger: Logger = AWSClient.loggingDisabled + ) async throws -> UntagResourceResponse { + let input = UntagResourceRequest( + resourceArn: resourceArn, + resourceTagKeys: resourceTagKeys + ) + return try await self.untagResource(input, logger: logger) + } + + /// An API to update the attributes of the billing view. + @Sendable + @inlinable + public func updateBillingView(_ input: UpdateBillingViewRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateBillingViewResponse { + try await self.client.execute( + operation: "UpdateBillingView", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// An API to update the attributes of the billing view. + /// + /// Parameters: + /// - arn: The Amazon Resource Name (ARN) that can be used to uniquely identify the billing view. + /// - dataFilterExpression: See Expression. Billing view only supports LINKED_ACCOUNT and Tags. + /// - description: The description of the billing view. + /// - name: The name of the billing view. + /// - logger: Logger use during operation + @inlinable + public func updateBillingView( + arn: String, + dataFilterExpression: Expression? = nil, + description: String? = nil, + name: String? = nil, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> UpdateBillingViewResponse { + let input = UpdateBillingViewRequest( + arn: arn, + dataFilterExpression: dataFilterExpression, + description: description, + name: name + ) + return try await self.updateBillingView(input, logger: logger) + } } extension Billing { @@ -151,20 +457,66 @@ extension Billing { /// /// - Parameters: /// - activeTimeRange: The time range for the billing views listed. PRIMARY billing view is always listed. BILLING_GROUP billing views are listed for time ranges when the associated billing group resource in Billing Conductor is active. The time range must be within one calendar month. + /// - arns: The Amazon Resource Name (ARN) that can be used to uniquely identify the billing view. + /// - billingViewTypes: The type of billing view. /// - maxResults: The maximum number of billing views to retrieve. Default is 100. + /// - ownerAccountId: The list of owners of the billing view. /// - logger: Logger used for logging @inlinable public func listBillingViewsPaginator( - activeTimeRange: ActiveTimeRange, + activeTimeRange: ActiveTimeRange? = nil, + arns: [String]? = nil, + billingViewTypes: [BillingViewType]? = nil, maxResults: Int? = nil, + ownerAccountId: String? = nil, logger: Logger = AWSClient.loggingDisabled ) -> AWSClient.PaginatorSequence { let input = ListBillingViewsRequest( activeTimeRange: activeTimeRange, - maxResults: maxResults + arns: arns, + billingViewTypes: billingViewTypes, + maxResults: maxResults, + ownerAccountId: ownerAccountId ) return self.listBillingViewsPaginator(input, logger: logger) } + + /// Return PaginatorSequence for operation ``listSourceViewsForBillingView(_:logger:)``. + /// + /// - Parameters: + /// - input: Input for operation + /// - logger: Logger used for logging + @inlinable + public func listSourceViewsForBillingViewPaginator( + _ input: ListSourceViewsForBillingViewRequest, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + return .init( + input: input, + command: self.listSourceViewsForBillingView, + inputKey: \ListSourceViewsForBillingViewRequest.nextToken, + outputKey: \ListSourceViewsForBillingViewResponse.nextToken, + logger: logger + ) + } + /// Return PaginatorSequence for operation ``listSourceViewsForBillingView(_:logger:)``. + /// + /// - Parameters: + /// - arn: The Amazon Resource Name (ARN) that can be used to uniquely identify the billing view. + /// - maxResults: The number of entries a paginated response contains. + /// - logger: Logger used for logging + @inlinable + public func listSourceViewsForBillingViewPaginator( + arn: String, + maxResults: Int? = nil, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + let input = ListSourceViewsForBillingViewRequest( + arn: arn, + maxResults: maxResults + ) + return self.listSourceViewsForBillingViewPaginator(input, logger: logger) + } } extension Billing.ListBillingViewsRequest: AWSPaginateToken { @@ -172,6 +524,20 @@ extension Billing.ListBillingViewsRequest: AWSPaginateToken { public func usingPaginationToken(_ token: String) -> Billing.ListBillingViewsRequest { return .init( activeTimeRange: self.activeTimeRange, + arns: self.arns, + billingViewTypes: self.billingViewTypes, + maxResults: self.maxResults, + nextToken: token, + ownerAccountId: self.ownerAccountId + ) + } +} + +extension Billing.ListSourceViewsForBillingViewRequest: AWSPaginateToken { + @inlinable + public func usingPaginationToken(_ token: String) -> Billing.ListSourceViewsForBillingViewRequest { + return .init( + arn: self.arn, maxResults: self.maxResults, nextToken: token ) diff --git a/Sources/Soto/Services/Billing/Billing_shapes.swift b/Sources/Soto/Services/Billing/Billing_shapes.swift index eaaef4b292..017adfb0fb 100644 --- a/Sources/Soto/Services/Billing/Billing_shapes.swift +++ b/Sources/Soto/Services/Billing/Billing_shapes.swift @@ -28,10 +28,16 @@ extension Billing { public enum BillingViewType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case billingGroup = "BILLING_GROUP" + case custom = "CUSTOM" case primary = "PRIMARY" public var description: String { return self.rawValue } } + public enum Dimension: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case linkedAccount = "LINKED_ACCOUNT" + public var description: String { return self.rawValue } + } + // MARK: Shapes public struct ActiveTimeRange: AWSEncodableShape { @@ -52,20 +58,65 @@ extension Billing { } } + public struct BillingViewElement: AWSDecodableShape { + /// The Amazon Resource Name (ARN) that can be used to uniquely identify the billing view. + public let arn: String? + /// The type of billing group. + public let billingViewType: BillingViewType? + /// The time when the billing view was created. + public let createdAt: Date? + /// See Expression. Billing view only supports LINKED_ACCOUNT and Tags. + public let dataFilterExpression: Expression? + /// The description of the billing view. + public let description: String? + /// A list of names of the billing view. + public let name: String? + /// The list of owners of the billing view. + public let ownerAccountId: String? + /// The time when the billing view was last updated. + public let updatedAt: Date? + + @inlinable + public init(arn: String? = nil, billingViewType: BillingViewType? = nil, createdAt: Date? = nil, dataFilterExpression: Expression? = nil, description: String? = nil, name: String? = nil, ownerAccountId: String? = nil, updatedAt: Date? = nil) { + self.arn = arn + self.billingViewType = billingViewType + self.createdAt = createdAt + self.dataFilterExpression = dataFilterExpression + self.description = description + self.name = name + self.ownerAccountId = ownerAccountId + self.updatedAt = updatedAt + } + + private enum CodingKeys: String, CodingKey { + case arn = "arn" + case billingViewType = "billingViewType" + case createdAt = "createdAt" + case dataFilterExpression = "dataFilterExpression" + case description = "description" + case name = "name" + case ownerAccountId = "ownerAccountId" + case updatedAt = "updatedAt" + } + } + public struct BillingViewListElement: AWSDecodableShape { /// The Amazon Resource Name (ARN) that can be used to uniquely identify the billing view. public let arn: String? /// The type of billing view. public let billingViewType: BillingViewType? + /// The description of the billing view. + public let description: String? /// A list of names of the Billing view. public let name: String? /// The list of owners of the Billing view. public let ownerAccountId: String? @inlinable - public init(arn: String? = nil, billingViewType: BillingViewType? = nil, name: String? = nil, ownerAccountId: String? = nil) { + public init(arn: String? = nil, billingViewType: BillingViewType? = nil, description: String? = nil, name: String? = nil, ownerAccountId: String? = nil) { self.arn = arn self.billingViewType = billingViewType + self.description = description self.name = name self.ownerAccountId = ownerAccountId } @@ -73,37 +124,286 @@ extension Billing { private enum CodingKeys: String, CodingKey { case arn = "arn" case billingViewType = "billingViewType" + case description = "description" case name = "name" case ownerAccountId = "ownerAccountId" } } + public struct CreateBillingViewRequest: AWSEncodableShape { + /// A unique, case-sensitive identifier you specify to ensure idempotency of the request. Idempotency ensures that an API request completes no more than one time. If the original request completes successfully, any subsequent retries complete successfully without performing any further actions with an idempotent request. + public let clientToken: String? + /// See Expression. Billing view only supports LINKED_ACCOUNT and Tags. + public let dataFilterExpression: Expression? + /// The description of the billing view. + public let description: String? + /// The name of the billing view. + public let name: String + /// A list of key value map specifying tags associated to the billing view being created. + public let resourceTags: [ResourceTag]? + /// A list of billing views used as the data source for the custom billing view. + public let sourceViews: [String] + + @inlinable + public init(clientToken: String? = CreateBillingViewRequest.idempotencyToken(), dataFilterExpression: Expression? = nil, description: String? = nil, name: String, resourceTags: [ResourceTag]? = nil, sourceViews: [String]) { + self.clientToken = clientToken + self.dataFilterExpression = dataFilterExpression + self.description = description + self.name = name + self.resourceTags = resourceTags + self.sourceViews = sourceViews + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + var container = encoder.container(keyedBy: CodingKeys.self) + request.encodeHeader(self.clientToken, key: "X-Amzn-Client-Token") + try container.encodeIfPresent(self.dataFilterExpression, forKey: .dataFilterExpression) + try container.encodeIfPresent(self.description, forKey: .description) + try container.encode(self.name, forKey: .name) + try container.encodeIfPresent(self.resourceTags, forKey: .resourceTags) + try container.encode(self.sourceViews, forKey: .sourceViews) + } + + public func validate(name: String) throws { + try self.validate(self.clientToken, name: "clientToken", parent: name, pattern: "^[a-zA-Z0-9-]+$") + try self.dataFilterExpression?.validate(name: "\(name).dataFilterExpression") + try self.validate(self.description, name: "description", parent: name, max: 1024) + try self.validate(self.description, name: "description", parent: name, pattern: "^([ a-zA-Z0-9_\\+=\\.\\-@]+)?$") + try self.validate(self.name, name: "name", parent: name, max: 128) + try self.validate(self.name, name: "name", parent: name, min: 1) + try self.validate(self.name, name: "name", parent: name, pattern: "^[ a-zA-Z0-9_\\+=\\.\\-@]+$") + try self.resourceTags?.forEach { + try $0.validate(name: "\(name).resourceTags[]") + } + try self.validate(self.resourceTags, name: "resourceTags", parent: name, max: 200) + try self.sourceViews.forEach { + try validate($0, name: "sourceViews[]", parent: name, pattern: "^arn:aws[a-z-]*:(billing)::[0-9]{12}:billingview/[a-zA-Z0-9/:_\\+=\\.\\-@]{0,59}[a-zA-Z0-9]$") + } + try self.validate(self.sourceViews, name: "sourceViews", parent: name, max: 1) + try self.validate(self.sourceViews, name: "sourceViews", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case dataFilterExpression = "dataFilterExpression" + case description = "description" + case name = "name" + case resourceTags = "resourceTags" + case sourceViews = "sourceViews" + } + } + + public struct CreateBillingViewResponse: AWSDecodableShape { + /// The Amazon Resource Name (ARN) that can be used to uniquely identify the billing view. + public let arn: String + /// The time when the billing view was created. + public let createdAt: Date? + + @inlinable + public init(arn: String, createdAt: Date? = nil) { + self.arn = arn + self.createdAt = createdAt + } + + private enum CodingKeys: String, CodingKey { + case arn = "arn" + case createdAt = "createdAt" + } + } + + public struct DeleteBillingViewRequest: AWSEncodableShape { + /// The Amazon Resource Name (ARN) that can be used to uniquely identify the billing view. + public let arn: String + + @inlinable + public init(arn: String) { + self.arn = arn + } + + public func validate(name: String) throws { + try self.validate(self.arn, name: "arn", parent: name, pattern: "^arn:aws[a-z-]*:(billing)::[0-9]{12}:billingview/[a-zA-Z0-9/:_\\+=\\.\\-@]{0,59}[a-zA-Z0-9]$") + } + + private enum CodingKeys: String, CodingKey { + case arn = "arn" + } + } + + public struct DeleteBillingViewResponse: AWSDecodableShape { + /// The Amazon Resource Name (ARN) that can be used to uniquely identify the billing view. + public let arn: String + + @inlinable + public init(arn: String) { + self.arn = arn + } + + private enum CodingKeys: String, CodingKey { + case arn = "arn" + } + } + + public struct DimensionValues: AWSEncodableShape & AWSDecodableShape { + /// The names of the metadata types that you can use to filter and group your results. + public let key: Dimension + /// The metadata values that you can use to filter and group your results. + public let values: [String] + + @inlinable + public init(key: Dimension, values: [String]) { + self.key = key + self.values = values + } + + public func validate(name: String) throws { + try self.values.forEach { + try validate($0, name: "values[]", parent: name, max: 1024) + try validate($0, name: "values[]", parent: name, pattern: "^[\\S\\s]*$") + } + try self.validate(self.values, name: "values", parent: name, max: 200) + try self.validate(self.values, name: "values", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case key = "key" + case values = "values" + } + } + + public struct Expression: AWSEncodableShape & AWSDecodableShape { + /// The specific Dimension to use for Expression. + public let dimensions: DimensionValues? + /// The specific Tag to use for Expression. + public let tags: TagValues? + + @inlinable + public init(dimensions: DimensionValues? = nil, tags: TagValues? = nil) { + self.dimensions = dimensions + self.tags = tags + } + + public func validate(name: String) throws { + try self.dimensions?.validate(name: "\(name).dimensions") + try self.tags?.validate(name: "\(name).tags") + } + + private enum CodingKeys: String, CodingKey { + case dimensions = "dimensions" + case tags = "tags" + } + } + + public struct GetBillingViewRequest: AWSEncodableShape { + /// The Amazon Resource Name (ARN) that can be used to uniquely identify the billing view. + public let arn: String + + @inlinable + public init(arn: String) { + self.arn = arn + } + + public func validate(name: String) throws { + try self.validate(self.arn, name: "arn", parent: name, pattern: "^arn:aws[a-z-]*:(billing)::[0-9]{12}:billingview/[a-zA-Z0-9/:_\\+=\\.\\-@]{0,59}[a-zA-Z0-9]$") + } + + private enum CodingKeys: String, CodingKey { + case arn = "arn" + } + } + + public struct GetBillingViewResponse: AWSDecodableShape { + /// The billing view element associated with the specified ARN. + public let billingView: BillingViewElement + + @inlinable + public init(billingView: BillingViewElement) { + self.billingView = billingView + } + + private enum CodingKeys: String, CodingKey { + case billingView = "billingView" + } + } + + public struct GetResourcePolicyRequest: AWSEncodableShape { + /// The Amazon Resource Name (ARN) of the billing view resource to which the policy is attached to. + public let resourceArn: String + + @inlinable + public init(resourceArn: String) { + self.resourceArn = resourceArn + } + + public func validate(name: String) throws { + try self.validate(self.resourceArn, name: "resourceArn", parent: name, pattern: "^arn:aws[a-z-]*:(billing)::[0-9]{12}:[a-zA-Z0-9/:_\\+=\\.\\@-]{0,70}[a-zA-Z0-9]$") + } + + private enum CodingKeys: String, CodingKey { + case resourceArn = "resourceArn" + } + } + + public struct GetResourcePolicyResponse: AWSDecodableShape { + /// The resource-based policy document attached to the resource in JSON format. + public let policy: String? + /// The Amazon Resource Name (ARN) of the billing view resource to which the policy is attached to. + public let resourceArn: String + + @inlinable + public init(policy: String? = nil, resourceArn: String) { + self.policy = policy + self.resourceArn = resourceArn + } + + private enum CodingKeys: String, CodingKey { + case policy = "policy" + case resourceArn = "resourceArn" + } + } + public struct ListBillingViewsRequest: AWSEncodableShape { /// The time range for the billing views listed. PRIMARY billing view is always listed. BILLING_GROUP billing views are listed for time ranges when the associated billing group resource in Billing Conductor is active. The time range must be within one calendar month. - public let activeTimeRange: ActiveTimeRange + public let activeTimeRange: ActiveTimeRange? + /// The Amazon Resource Name (ARN) that can be used to uniquely identify the billing view. + public let arns: [String]? + /// The type of billing view. + public let billingViewTypes: [BillingViewType]? /// The maximum number of billing views to retrieve. Default is 100. public let maxResults: Int? /// The pagination token that is used on subsequent calls to list billing views. public let nextToken: String? + /// The list of owners of the billing view. + public let ownerAccountId: String? @inlinable - public init(activeTimeRange: ActiveTimeRange, maxResults: Int? = nil, nextToken: String? = nil) { + public init(activeTimeRange: ActiveTimeRange? = nil, arns: [String]? = nil, billingViewTypes: [BillingViewType]? = nil, maxResults: Int? = nil, nextToken: String? = nil, ownerAccountId: String? = nil) { self.activeTimeRange = activeTimeRange + self.arns = arns + self.billingViewTypes = billingViewTypes self.maxResults = maxResults self.nextToken = nextToken + self.ownerAccountId = ownerAccountId } public func validate(name: String) throws { + try self.arns?.forEach { + try validate($0, name: "arns[]", parent: name, pattern: "^arn:aws[a-z-]*:(billing)::[0-9]{12}:billingview/[a-zA-Z0-9/:_\\+=\\.\\-@]{0,59}[a-zA-Z0-9]$") + } + try self.validate(self.arns, name: "arns", parent: name, max: 10) try self.validate(self.maxResults, name: "maxResults", parent: name, max: 100) try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) try self.validate(self.nextToken, name: "nextToken", parent: name, max: 2047) try self.validate(self.nextToken, name: "nextToken", parent: name, min: 1) + try self.validate(self.ownerAccountId, name: "ownerAccountId", parent: name, pattern: "^[0-9]{12}$") } private enum CodingKeys: String, CodingKey { case activeTimeRange = "activeTimeRange" + case arns = "arns" + case billingViewTypes = "billingViewTypes" case maxResults = "maxResults" case nextToken = "nextToken" + case ownerAccountId = "ownerAccountId" } } @@ -124,6 +424,254 @@ extension Billing { case nextToken = "nextToken" } } + + public struct ListSourceViewsForBillingViewRequest: AWSEncodableShape { + /// The Amazon Resource Name (ARN) that can be used to uniquely identify the billing view. + public let arn: String + /// The number of entries a paginated response contains. + public let maxResults: Int? + /// The pagination token that is used on subsequent calls to list billing views. + public let nextToken: String? + + @inlinable + public init(arn: String, maxResults: Int? = nil, nextToken: String? = nil) { + self.arn = arn + self.maxResults = maxResults + self.nextToken = nextToken + } + + public func validate(name: String) throws { + try self.validate(self.arn, name: "arn", parent: name, pattern: "^arn:aws[a-z-]*:(billing)::[0-9]{12}:billingview/[a-zA-Z0-9/:_\\+=\\.\\-@]{0,59}[a-zA-Z0-9]$") + try self.validate(self.maxResults, name: "maxResults", parent: name, max: 100) + try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) + try self.validate(self.nextToken, name: "nextToken", parent: name, max: 2047) + try self.validate(self.nextToken, name: "nextToken", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case arn = "arn" + case maxResults = "maxResults" + case nextToken = "nextToken" + } + } + + public struct ListSourceViewsForBillingViewResponse: AWSDecodableShape { + /// The pagination token that is used on subsequent calls to list billing views. + public let nextToken: String? + /// A list of billing views used as the data source for the custom billing view. + public let sourceViews: [String] + + @inlinable + public init(nextToken: String? = nil, sourceViews: [String]) { + self.nextToken = nextToken + self.sourceViews = sourceViews + } + + private enum CodingKeys: String, CodingKey { + case nextToken = "nextToken" + case sourceViews = "sourceViews" + } + } + + public struct ListTagsForResourceRequest: AWSEncodableShape { + /// The Amazon Resource Name (ARN) of the resource. + public let resourceArn: String + + @inlinable + public init(resourceArn: String) { + self.resourceArn = resourceArn + } + + public func validate(name: String) throws { + try self.validate(self.resourceArn, name: "resourceArn", parent: name, pattern: "^arn:aws[a-z-]*:(billing)::[0-9]{12}:[a-zA-Z0-9/:_\\+=\\.\\@-]{0,70}[a-zA-Z0-9]$") + } + + private enum CodingKeys: String, CodingKey { + case resourceArn = "resourceArn" + } + } + + public struct ListTagsForResourceResponse: AWSDecodableShape { + /// A list of tag key value pairs that are associated with the resource. + public let resourceTags: [ResourceTag]? + + @inlinable + public init(resourceTags: [ResourceTag]? = nil) { + self.resourceTags = resourceTags + } + + private enum CodingKeys: String, CodingKey { + case resourceTags = "resourceTags" + } + } + + public struct ResourceTag: AWSEncodableShape & AWSDecodableShape { + /// The key that's associated with the tag. + public let key: String + /// The value that's associated with the tag. + public let value: String? + + @inlinable + public init(key: String, value: String? = nil) { + self.key = key + self.value = value + } + + public func validate(name: String) throws { + try self.validate(self.key, name: "key", parent: name, max: 128) + try self.validate(self.key, name: "key", parent: name, min: 1) + try self.validate(self.value, name: "value", parent: name, max: 256) + } + + private enum CodingKeys: String, CodingKey { + case key = "key" + case value = "value" + } + } + + public struct TagResourceRequest: AWSEncodableShape { + /// The Amazon Resource Name (ARN) of the resource. + public let resourceArn: String + /// A list of tag key value pairs that are associated with the resource. + public let resourceTags: [ResourceTag] + + @inlinable + public init(resourceArn: String, resourceTags: [ResourceTag]) { + self.resourceArn = resourceArn + self.resourceTags = resourceTags + } + + public func validate(name: String) throws { + try self.validate(self.resourceArn, name: "resourceArn", parent: name, pattern: "^arn:aws[a-z-]*:(billing)::[0-9]{12}:[a-zA-Z0-9/:_\\+=\\.\\@-]{0,70}[a-zA-Z0-9]$") + try self.resourceTags.forEach { + try $0.validate(name: "\(name).resourceTags[]") + } + try self.validate(self.resourceTags, name: "resourceTags", parent: name, max: 200) + } + + private enum CodingKeys: String, CodingKey { + case resourceArn = "resourceArn" + case resourceTags = "resourceTags" + } + } + + public struct TagResourceResponse: AWSDecodableShape { + public init() {} + } + + public struct TagValues: AWSEncodableShape & AWSDecodableShape { + /// The key for the tag. + public let key: String + /// The specific value of the tag. + public let values: [String] + + @inlinable + public init(key: String, values: [String]) { + self.key = key + self.values = values + } + + public func validate(name: String) throws { + try self.validate(self.key, name: "key", parent: name, max: 1024) + try self.validate(self.key, name: "key", parent: name, pattern: "^[\\S\\s]*$") + try self.values.forEach { + try validate($0, name: "values[]", parent: name, max: 1024) + try validate($0, name: "values[]", parent: name, pattern: "^[\\S\\s]*$") + } + try self.validate(self.values, name: "values", parent: name, max: 200) + try self.validate(self.values, name: "values", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case key = "key" + case values = "values" + } + } + + public struct UntagResourceRequest: AWSEncodableShape { + /// The Amazon Resource Name (ARN) of the resource. + public let resourceArn: String + /// A list of tag key value pairs that are associated with the resource. + public let resourceTagKeys: [String] + + @inlinable + public init(resourceArn: String, resourceTagKeys: [String]) { + self.resourceArn = resourceArn + self.resourceTagKeys = resourceTagKeys + } + + public func validate(name: String) throws { + try self.validate(self.resourceArn, name: "resourceArn", parent: name, pattern: "^arn:aws[a-z-]*:(billing)::[0-9]{12}:[a-zA-Z0-9/:_\\+=\\.\\@-]{0,70}[a-zA-Z0-9]$") + try self.resourceTagKeys.forEach { + try validate($0, name: "resourceTagKeys[]", parent: name, max: 128) + try validate($0, name: "resourceTagKeys[]", parent: name, min: 1) + } + try self.validate(self.resourceTagKeys, name: "resourceTagKeys", parent: name, max: 200) + } + + private enum CodingKeys: String, CodingKey { + case resourceArn = "resourceArn" + case resourceTagKeys = "resourceTagKeys" + } + } + + public struct UntagResourceResponse: AWSDecodableShape { + public init() {} + } + + public struct UpdateBillingViewRequest: AWSEncodableShape { + /// The Amazon Resource Name (ARN) that can be used to uniquely identify the billing view. + public let arn: String + /// See Expression. Billing view only supports LINKED_ACCOUNT and Tags. + public let dataFilterExpression: Expression? + /// The description of the billing view. + public let description: String? + /// The name of the billing view. + public let name: String? + + @inlinable + public init(arn: String, dataFilterExpression: Expression? = nil, description: String? = nil, name: String? = nil) { + self.arn = arn + self.dataFilterExpression = dataFilterExpression + self.description = description + self.name = name + } + + public func validate(name: String) throws { + try self.validate(self.arn, name: "arn", parent: name, pattern: "^arn:aws[a-z-]*:(billing)::[0-9]{12}:billingview/[a-zA-Z0-9/:_\\+=\\.\\-@]{0,59}[a-zA-Z0-9]$") + try self.dataFilterExpression?.validate(name: "\(name).dataFilterExpression") + try self.validate(self.description, name: "description", parent: name, max: 1024) + try self.validate(self.description, name: "description", parent: name, pattern: "^([ a-zA-Z0-9_\\+=\\.\\-@]+)?$") + try self.validate(self.name, name: "name", parent: name, max: 128) + try self.validate(self.name, name: "name", parent: name, min: 1) + try self.validate(self.name, name: "name", parent: name, pattern: "^[ a-zA-Z0-9_\\+=\\.\\-@]+$") + } + + private enum CodingKeys: String, CodingKey { + case arn = "arn" + case dataFilterExpression = "dataFilterExpression" + case description = "description" + case name = "name" + } + } + + public struct UpdateBillingViewResponse: AWSDecodableShape { + /// The Amazon Resource Name (ARN) that can be used to uniquely identify the billing view. + public let arn: String + /// The time when the billing view was last updated. + public let updatedAt: Date? + + @inlinable + public init(arn: String, updatedAt: Date? = nil) { + self.arn = arn + self.updatedAt = updatedAt + } + + private enum CodingKeys: String, CodingKey { + case arn = "arn" + case updatedAt = "updatedAt" + } + } } // MARK: - Errors @@ -132,7 +680,10 @@ extension Billing { public struct BillingErrorType: AWSErrorType { enum Code: String { case accessDeniedException = "AccessDeniedException" + case conflictException = "ConflictException" case internalServerException = "InternalServerException" + case resourceNotFoundException = "ResourceNotFoundException" + case serviceQuotaExceededException = "ServiceQuotaExceededException" case throttlingException = "ThrottlingException" case validationException = "ValidationException" } @@ -157,8 +708,14 @@ public struct BillingErrorType: AWSErrorType { /// You don't have sufficient access to perform this action. public static var accessDeniedException: Self { .init(.accessDeniedException) } + /// The requested operation would cause a conflict with the current state of a service resource associated with the request. Resolve the conflict before retrying this request. + public static var conflictException: Self { .init(.conflictException) } /// The request processing failed because of an unknown error, exception, or failure. public static var internalServerException: Self { .init(.internalServerException) } + /// The specified ARN in the request doesn't exist. + public static var resourceNotFoundException: Self { .init(.resourceNotFoundException) } + /// You've reached the limit of resources you can create, or exceeded the size of an individual resource. + public static var serviceQuotaExceededException: Self { .init(.serviceQuotaExceededException) } /// The request was denied due to request throttling. public static var throttlingException: Self { .init(.throttlingException) } /// The input fails to satisfy the constraints specified by an Amazon Web Services service. diff --git a/Sources/Soto/Services/CognitoIdentityProvider/CognitoIdentityProvider_api.swift b/Sources/Soto/Services/CognitoIdentityProvider/CognitoIdentityProvider_api.swift index adfee537cd..2fd84485f1 100644 --- a/Sources/Soto/Services/CognitoIdentityProvider/CognitoIdentityProvider_api.swift +++ b/Sources/Soto/Services/CognitoIdentityProvider/CognitoIdentityProvider_api.swift @@ -81,6 +81,45 @@ public struct CognitoIdentityProvider: AWSService { /// FIPS and dualstack endpoints static var variantEndpoints: [EndpointVariantType: AWSServiceConfig.EndpointVariant] {[ + [.dualstack]: .init(endpoints: [ + "af-south-1": "cognito-idp.af-south-1.amazonaws.com", + "ap-east-1": "cognito-idp.ap-east-1.amazonaws.com", + "ap-northeast-1": "cognito-idp.ap-northeast-1.amazonaws.com", + "ap-northeast-2": "cognito-idp.ap-northeast-2.amazonaws.com", + "ap-northeast-3": "cognito-idp.ap-northeast-3.amazonaws.com", + "ap-south-1": "cognito-idp.ap-south-1.amazonaws.com", + "ap-south-2": "cognito-idp.ap-south-2.amazonaws.com", + "ap-southeast-1": "cognito-idp.ap-southeast-1.amazonaws.com", + "ap-southeast-2": "cognito-idp.ap-southeast-2.amazonaws.com", + "ap-southeast-3": "cognito-idp.ap-southeast-3.amazonaws.com", + "ap-southeast-4": "cognito-idp.ap-southeast-4.amazonaws.com", + "ca-central-1": "cognito-idp.ca-central-1.amazonaws.com", + "ca-west-1": "cognito-idp.ca-west-1.amazonaws.com", + "eu-central-1": "cognito-idp.eu-central-1.amazonaws.com", + "eu-central-2": "cognito-idp.eu-central-2.amazonaws.com", + "eu-north-1": "cognito-idp.eu-north-1.amazonaws.com", + "eu-south-1": "cognito-idp.eu-south-1.amazonaws.com", + "eu-south-2": "cognito-idp.eu-south-2.amazonaws.com", + "eu-west-1": "cognito-idp.eu-west-1.amazonaws.com", + "eu-west-2": "cognito-idp.eu-west-2.amazonaws.com", + "eu-west-3": "cognito-idp.eu-west-3.amazonaws.com", + "il-central-1": "cognito-idp.il-central-1.amazonaws.com", + "me-central-1": "cognito-idp.me-central-1.amazonaws.com", + "me-south-1": "cognito-idp.me-south-1.amazonaws.com", + "sa-east-1": "cognito-idp.sa-east-1.amazonaws.com", + "us-east-1": "cognito-idp.us-east-1.amazonaws.com", + "us-east-2": "cognito-idp.us-east-2.amazonaws.com", + "us-gov-west-1": "cognito-idp.us-gov-west-1.amazonaws.com", + "us-west-1": "cognito-idp.us-west-1.amazonaws.com", + "us-west-2": "cognito-idp.us-west-2.amazonaws.com" + ]), + [.dualstack, .fips]: .init(endpoints: [ + "us-east-1": "cognito-idp-fips.us-east-1.amazonaws.com", + "us-east-2": "cognito-idp-fips.us-east-2.amazonaws.com", + "us-gov-west-1": "cognito-idp-fips.us-gov-west-1.amazonaws.com", + "us-west-1": "cognito-idp-fips.us-west-1.amazonaws.com", + "us-west-2": "cognito-idp-fips.us-west-2.amazonaws.com" + ]), [.fips]: .init(endpoints: [ "us-east-1": "cognito-idp-fips.us-east-1.amazonaws.com", "us-east-2": "cognito-idp-fips.us-east-2.amazonaws.com", diff --git a/Sources/Soto/Services/Connect/Connect_api.swift b/Sources/Soto/Services/Connect/Connect_api.swift index c38fd127a8..6019fa648e 100644 --- a/Sources/Soto/Services/Connect/Connect_api.swift +++ b/Sources/Soto/Services/Connect/Connect_api.swift @@ -5094,7 +5094,7 @@ public struct Connect: AWSService { return try await self.listAuthenticationProfiles(input, logger: logger) } - /// This API is in preview release for Amazon Connect and is subject to change. For the specified version of Amazon Lex, returns a paginated list of all the Amazon Lex bots currently associated with the instance. Use this API to returns both Amazon Lex V1 and V2 bots. + /// This API is in preview release for Amazon Connect and is subject to change. For the specified version of Amazon Lex, returns a paginated list of all the Amazon Lex bots currently associated with the instance. Use this API to return both Amazon Lex V1 and V2 bots. @Sendable @inlinable public func listBots(_ input: ListBotsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListBotsResponse { @@ -5107,7 +5107,7 @@ public struct Connect: AWSService { logger: logger ) } - /// This API is in preview release for Amazon Connect and is subject to change. For the specified version of Amazon Lex, returns a paginated list of all the Amazon Lex bots currently associated with the instance. Use this API to returns both Amazon Lex V1 and V2 bots. + /// This API is in preview release for Amazon Connect and is subject to change. For the specified version of Amazon Lex, returns a paginated list of all the Amazon Lex bots currently associated with the instance. Use this API to return both Amazon Lex V1 and V2 bots. /// /// Parameters: /// - instanceId: The identifier of the Amazon Connect instance. You can find the instance ID in the Amazon Resource Name (ARN) of the instance. @@ -7860,6 +7860,7 @@ public struct Connect: AWSService { /// - chatDurationInMinutes: The total duration of the newly started chat session. If not specified, the chat session duration defaults to 25 hour. The minimum configurable time is 60 minutes. The maximum configurable time is 10,080 minutes (7 days). /// - clientToken: A unique, case-sensitive identifier that you provide to ensure the idempotency of the request. If not provided, the Amazon Web Services SDK populates this field. For more information about idempotency, see Making retries safe with idempotent APIs. /// - contactFlowId: The identifier of the flow for initiating the chat. To see the ContactFlowId in the Amazon Connect admin website, on the navigation menu go to Routing, Flows. Choose the flow. On the flow page, under the name of the flow, choose Show additional flow information. The ContactFlowId is the last part of the ARN, shown here in bold: arn:aws:connect:us-west-2:xxxxxxxxxxxx:instance/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/contact-flow/846ec553-a005-41c0-8341-xxxxxxxxxxxx + /// - customerId: The customer's identification number. For example, the CustomerId may be a customer number from your CRM. /// - initialMessage: The initial message to be sent to the newly created chat. If you have a Lex bot in your flow, the initial message is not delivered to the Lex bot. /// - instanceId: The identifier of the Amazon Connect instance. You can find the instance ID in the Amazon Resource Name (ARN) of the instance. /// - participantDetails: Information identifying the participant. @@ -7874,6 +7875,7 @@ public struct Connect: AWSService { chatDurationInMinutes: Int? = nil, clientToken: String? = StartChatContactRequest.idempotencyToken(), contactFlowId: String, + customerId: String? = nil, initialMessage: ChatMessage? = nil, instanceId: String, participantDetails: ParticipantDetails, @@ -7888,6 +7890,7 @@ public struct Connect: AWSService { chatDurationInMinutes: chatDurationInMinutes, clientToken: clientToken, contactFlowId: contactFlowId, + customerId: customerId, initialMessage: initialMessage, instanceId: instanceId, participantDetails: participantDetails, @@ -9525,6 +9528,47 @@ public struct Connect: AWSService { return try await self.updateInstanceStorageConfig(input, logger: logger) } + /// Instructs Amazon Connect to resume the authentication process. The subsequent actions depend on the request body contents: If a code is provided: Connect retrieves the identity information from Amazon Cognito and imports it into Connect Customer Profiles. If an error is provided: The error branch of the Authenticate Customer block is executed. The API returns a success response to acknowledge the request. However, the interaction and exchange of identity information occur asynchronously after the response is returned. + @Sendable + @inlinable + public func updateParticipantAuthentication(_ input: UpdateParticipantAuthenticationRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateParticipantAuthenticationResponse { + try await self.client.execute( + operation: "UpdateParticipantAuthentication", + path: "/contact/update-participant-authentication", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Instructs Amazon Connect to resume the authentication process. The subsequent actions depend on the request body contents: If a code is provided: Connect retrieves the identity information from Amazon Cognito and imports it into Connect Customer Profiles. If an error is provided: The error branch of the Authenticate Customer block is executed. The API returns a success response to acknowledge the request. However, the interaction and exchange of identity information occur asynchronously after the response is returned. + /// + /// Parameters: + /// - code: The code query parameter provided by Cognito in the redirectUri. + /// - error: The error query parameter provided by Cognito in the redirectUri. + /// - errorDescription: The error_description parameter provided by Cognito in the redirectUri. + /// - instanceId: The identifier of the Amazon Connect instance. You can find the instance ID in the Amazon Resource Name (ARN) of the instance. + /// - state: The state query parameter that was provided by Cognito in the redirectUri. This will also match the state parameter provided in the AuthenticationUrl from the GetAuthenticationUrl response. + /// - logger: Logger use during operation + @inlinable + public func updateParticipantAuthentication( + code: String? = nil, + error: String? = nil, + errorDescription: String? = nil, + instanceId: String, + state: String, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> UpdateParticipantAuthenticationResponse { + let input = UpdateParticipantAuthenticationRequest( + code: code, + error: error, + errorDescription: errorDescription, + instanceId: instanceId, + state: state + ) + return try await self.updateParticipantAuthentication(input, logger: logger) + } + /// Updates timeouts for when human chat participants are to be considered idle, and when agents are automatically disconnected from a chat due to idleness. You can set four timers: Customer idle timeout Customer auto-disconnect timeout Agent idle timeout Agent auto-disconnect timeout For more information about how chat timeouts work, see Set up chat timeouts for human participants. @Sendable @inlinable diff --git a/Sources/Soto/Services/Connect/Connect_shapes.swift b/Sources/Soto/Services/Connect/Connect_shapes.swift index 7067d5be71..a3b87e1241 100644 --- a/Sources/Soto/Services/Connect/Connect_shapes.swift +++ b/Sources/Soto/Services/Connect/Connect_shapes.swift @@ -406,6 +406,7 @@ extension Connect { case enhancedContactMonitoring = "ENHANCED_CONTACT_MONITORING" case highVolumeOutbound = "HIGH_VOLUME_OUTBOUND" case inboundCalls = "INBOUND_CALLS" + case multiPartyChatConference = "MULTI_PARTY_CHAT_CONFERENCE" case multiPartyConference = "MULTI_PARTY_CONFERENCE" case outboundCalls = "OUTBOUND_CALLS" case useCustomTtsVoices = "USE_CUSTOM_TTS_VOICES" @@ -451,6 +452,7 @@ extension Connect { case application = "APPLICATION" case callTransferConnector = "CALL_TRANSFER_CONNECTOR" case casesDomain = "CASES_DOMAIN" + case cognitoUserPool = "COGNITO_USER_POOL" case event = "EVENT" case fileScanner = "FILE_SCANNER" case pinpointApp = "PINPOINT_APP" @@ -2763,15 +2765,18 @@ extension Connect { public let name: String? /// The proficiency level of the condition. public let proficiencyLevel: Float? + /// An Object to define the minimum and maximum proficiency levels. + public let range: Range? /// The value of predefined attribute. public let value: String? @inlinable - public init(comparisonOperator: String? = nil, matchCriteria: MatchCriteria? = nil, name: String? = nil, proficiencyLevel: Float? = nil, value: String? = nil) { + public init(comparisonOperator: String? = nil, matchCriteria: MatchCriteria? = nil, name: String? = nil, proficiencyLevel: Float? = nil, range: Range? = nil, value: String? = nil) { self.comparisonOperator = comparisonOperator self.matchCriteria = matchCriteria self.name = name self.proficiencyLevel = proficiencyLevel + self.range = range self.value = value } @@ -2783,6 +2788,7 @@ extension Connect { try self.validate(self.name, name: "name", parent: name, min: 1) try self.validate(self.proficiencyLevel, name: "proficiencyLevel", parent: name, max: 5.0) try self.validate(self.proficiencyLevel, name: "proficiencyLevel", parent: name, min: 1.0) + try self.range?.validate(name: "\(name).range") try self.validate(self.value, name: "value", parent: name, max: 128) try self.validate(self.value, name: "value", parent: name, min: 1) } @@ -2792,6 +2798,7 @@ extension Connect { case matchCriteria = "MatchCriteria" case name = "Name" case proficiencyLevel = "ProficiencyLevel" + case range = "Range" case value = "Value" } } @@ -3563,6 +3570,8 @@ extension Connect { public let customer: Customer? /// The customer or external third party participant endpoint. public let customerEndpoint: EndpointInfo? + /// The customer's identification number. For example, the CustomerId may be a customer number from your CRM. You can create a Lambda function to pull the unique customer ID of the caller from your CRM system. If you enable Amazon Connect Voice ID capability, this attribute is populated with the CustomerSpeakerId of the caller. + public let customerId: String? /// Information about customer’s voice activity. public let customerVoiceActivity: CustomerVoiceActivity? /// The description of the contact. @@ -3617,7 +3626,7 @@ extension Connect { public let wisdomInfo: WisdomInfo? @inlinable - public init(additionalEmailRecipients: AdditionalEmailRecipients? = nil, agentInfo: AgentInfo? = nil, answeringMachineDetectionStatus: AnsweringMachineDetectionStatus? = nil, arn: String? = nil, campaign: Campaign? = nil, channel: Channel? = nil, connectedToSystemTimestamp: Date? = nil, contactAssociationId: String? = nil, customer: Customer? = nil, customerEndpoint: EndpointInfo? = nil, customerVoiceActivity: CustomerVoiceActivity? = nil, description: String? = nil, disconnectDetails: DisconnectDetails? = nil, disconnectTimestamp: Date? = nil, id: String? = nil, initialContactId: String? = nil, initiationMethod: ContactInitiationMethod? = nil, initiationTimestamp: Date? = nil, lastPausedTimestamp: Date? = nil, lastResumedTimestamp: Date? = nil, lastUpdateTimestamp: Date? = nil, name: String? = nil, previousContactId: String? = nil, qualityMetrics: QualityMetrics? = nil, queueInfo: QueueInfo? = nil, queuePriority: Int64? = nil, queueTimeAdjustmentSeconds: Int? = nil, relatedContactId: String? = nil, routingCriteria: RoutingCriteria? = nil, scheduledTimestamp: Date? = nil, segmentAttributes: [String: SegmentAttributeValue]? = nil, systemEndpoint: EndpointInfo? = nil, tags: [String: String]? = nil, totalPauseCount: Int? = nil, totalPauseDurationInSeconds: Int? = nil, wisdomInfo: WisdomInfo? = nil) { + public init(additionalEmailRecipients: AdditionalEmailRecipients? = nil, agentInfo: AgentInfo? = nil, answeringMachineDetectionStatus: AnsweringMachineDetectionStatus? = nil, arn: String? = nil, campaign: Campaign? = nil, channel: Channel? = nil, connectedToSystemTimestamp: Date? = nil, contactAssociationId: String? = nil, customer: Customer? = nil, customerEndpoint: EndpointInfo? = nil, customerId: String? = nil, customerVoiceActivity: CustomerVoiceActivity? = nil, description: String? = nil, disconnectDetails: DisconnectDetails? = nil, disconnectTimestamp: Date? = nil, id: String? = nil, initialContactId: String? = nil, initiationMethod: ContactInitiationMethod? = nil, initiationTimestamp: Date? = nil, lastPausedTimestamp: Date? = nil, lastResumedTimestamp: Date? = nil, lastUpdateTimestamp: Date? = nil, name: String? = nil, previousContactId: String? = nil, qualityMetrics: QualityMetrics? = nil, queueInfo: QueueInfo? = nil, queuePriority: Int64? = nil, queueTimeAdjustmentSeconds: Int? = nil, relatedContactId: String? = nil, routingCriteria: RoutingCriteria? = nil, scheduledTimestamp: Date? = nil, segmentAttributes: [String: SegmentAttributeValue]? = nil, systemEndpoint: EndpointInfo? = nil, tags: [String: String]? = nil, totalPauseCount: Int? = nil, totalPauseDurationInSeconds: Int? = nil, wisdomInfo: WisdomInfo? = nil) { self.additionalEmailRecipients = additionalEmailRecipients self.agentInfo = agentInfo self.answeringMachineDetectionStatus = answeringMachineDetectionStatus @@ -3628,6 +3637,7 @@ extension Connect { self.contactAssociationId = contactAssociationId self.customer = customer self.customerEndpoint = customerEndpoint + self.customerId = customerId self.customerVoiceActivity = customerVoiceActivity self.description = description self.disconnectDetails = disconnectDetails @@ -3667,6 +3677,7 @@ extension Connect { case contactAssociationId = "ContactAssociationId" case customer = "Customer" case customerEndpoint = "CustomerEndpoint" + case customerId = "CustomerId" case customerVoiceActivity = "CustomerVoiceActivity" case description = "Description" case disconnectDetails = "DisconnectDetails" @@ -10440,13 +10451,15 @@ extension Connect { public let andExpression: [Expression]? /// An object to specify the predefined attribute condition. public let attributeCondition: AttributeCondition? + public let notAttributeCondition: AttributeCondition? /// List of routing expressions which will be OR-ed together. public let orExpression: [Expression]? @inlinable - public init(andExpression: [Expression]? = nil, attributeCondition: AttributeCondition? = nil, orExpression: [Expression]? = nil) { + public init(andExpression: [Expression]? = nil, attributeCondition: AttributeCondition? = nil, notAttributeCondition: AttributeCondition? = nil, orExpression: [Expression]? = nil) { self.andExpression = andExpression self.attributeCondition = attributeCondition + self.notAttributeCondition = notAttributeCondition self.orExpression = orExpression } @@ -10455,6 +10468,7 @@ extension Connect { try $0.validate(name: "\(name).andExpression[]") } try self.attributeCondition?.validate(name: "\(name).attributeCondition") + try self.notAttributeCondition?.validate(name: "\(name).notAttributeCondition") try self.orExpression?.forEach { try $0.validate(name: "\(name).orExpression[]") } @@ -10463,6 +10477,7 @@ extension Connect { private enum CodingKeys: String, CodingKey { case andExpression = "AndExpression" case attributeCondition = "AttributeCondition" + case notAttributeCondition = "NotAttributeCondition" case orExpression = "OrExpression" } } @@ -16759,6 +16774,31 @@ extension Connect { } } + public struct Range: AWSEncodableShape & AWSDecodableShape { + /// The maximum proficiency level of the range. + public let maxProficiencyLevel: Float? + /// The minimum proficiency level of the range. + public let minProficiencyLevel: Float? + + @inlinable + public init(maxProficiencyLevel: Float? = nil, minProficiencyLevel: Float? = nil) { + self.maxProficiencyLevel = maxProficiencyLevel + self.minProficiencyLevel = minProficiencyLevel + } + + public func validate(name: String) throws { + try self.validate(self.maxProficiencyLevel, name: "maxProficiencyLevel", parent: name, max: 5.0) + try self.validate(self.maxProficiencyLevel, name: "maxProficiencyLevel", parent: name, min: 1.0) + try self.validate(self.minProficiencyLevel, name: "minProficiencyLevel", parent: name, max: 5.0) + try self.validate(self.minProficiencyLevel, name: "minProficiencyLevel", parent: name, min: 1.0) + } + + private enum CodingKeys: String, CodingKey { + case maxProficiencyLevel = "MaxProficiencyLevel" + case minProficiencyLevel = "MinProficiencyLevel" + } + } + public struct ReadOnlyFieldInfo: AWSEncodableShape & AWSDecodableShape { /// Identifier of the read-only field. public let id: TaskTemplateFieldIdentifier? @@ -19770,7 +19810,7 @@ extension Connect { public let fileId: String? /// The current status of the attached file. public let fileStatus: FileStatusType? - /// Information to be used while uploading the attached file. + /// The headers to be provided while uploading the file to the URL. public let uploadUrlMetadata: UploadUrlMetadata? @inlinable @@ -19802,6 +19842,8 @@ extension Connect { public let clientToken: String? /// The identifier of the flow for initiating the chat. To see the ContactFlowId in the Amazon Connect admin website, on the navigation menu go to Routing, Flows. Choose the flow. On the flow page, under the name of the flow, choose Show additional flow information. The ContactFlowId is the last part of the ARN, shown here in bold: arn:aws:connect:us-west-2:xxxxxxxxxxxx:instance/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/contact-flow/846ec553-a005-41c0-8341-xxxxxxxxxxxx public let contactFlowId: String + /// The customer's identification number. For example, the CustomerId may be a customer number from your CRM. + public let customerId: String? /// The initial message to be sent to the newly created chat. If you have a Lex bot in your flow, the initial message is not delivered to the Lex bot. public let initialMessage: ChatMessage? /// The identifier of the Amazon Connect instance. You can find the instance ID in the Amazon Resource Name (ARN) of the instance. @@ -19818,11 +19860,12 @@ extension Connect { public let supportedMessagingContentTypes: [String]? @inlinable - public init(attributes: [String: String]? = nil, chatDurationInMinutes: Int? = nil, clientToken: String? = StartChatContactRequest.idempotencyToken(), contactFlowId: String, initialMessage: ChatMessage? = nil, instanceId: String, participantDetails: ParticipantDetails, persistentChat: PersistentChat? = nil, relatedContactId: String? = nil, segmentAttributes: [String: SegmentAttributeValue]? = nil, supportedMessagingContentTypes: [String]? = nil) { + public init(attributes: [String: String]? = nil, chatDurationInMinutes: Int? = nil, clientToken: String? = StartChatContactRequest.idempotencyToken(), contactFlowId: String, customerId: String? = nil, initialMessage: ChatMessage? = nil, instanceId: String, participantDetails: ParticipantDetails, persistentChat: PersistentChat? = nil, relatedContactId: String? = nil, segmentAttributes: [String: SegmentAttributeValue]? = nil, supportedMessagingContentTypes: [String]? = nil) { self.attributes = attributes self.chatDurationInMinutes = chatDurationInMinutes self.clientToken = clientToken self.contactFlowId = contactFlowId + self.customerId = customerId self.initialMessage = initialMessage self.instanceId = instanceId self.participantDetails = participantDetails @@ -19842,6 +19885,8 @@ extension Connect { try self.validate(self.chatDurationInMinutes, name: "chatDurationInMinutes", parent: name, min: 60) try self.validate(self.clientToken, name: "clientToken", parent: name, max: 500) try self.validate(self.contactFlowId, name: "contactFlowId", parent: name, max: 500) + try self.validate(self.customerId, name: "customerId", parent: name, max: 128) + try self.validate(self.customerId, name: "customerId", parent: name, min: 1) try self.initialMessage?.validate(name: "\(name).initialMessage") try self.validate(self.instanceId, name: "instanceId", parent: name, max: 100) try self.validate(self.instanceId, name: "instanceId", parent: name, min: 1) @@ -19865,6 +19910,7 @@ extension Connect { case chatDurationInMinutes = "ChatDurationInMinutes" case clientToken = "ClientToken" case contactFlowId = "ContactFlowId" + case customerId = "CustomerId" case initialMessage = "InitialMessage" case instanceId = "InstanceId" case participantDetails = "ParticipantDetails" @@ -22760,6 +22806,55 @@ extension Connect { } } + public struct UpdateParticipantAuthenticationRequest: AWSEncodableShape { + /// The code query parameter provided by Cognito in the redirectUri. + public let code: String? + /// The error query parameter provided by Cognito in the redirectUri. + public let error: String? + /// The error_description parameter provided by Cognito in the redirectUri. + public let errorDescription: String? + /// The identifier of the Amazon Connect instance. You can find the instance ID in the Amazon Resource Name (ARN) of the instance. + public let instanceId: String + /// The state query parameter that was provided by Cognito in the redirectUri. This will also match the state parameter provided in the AuthenticationUrl from the GetAuthenticationUrl response. + public let state: String + + @inlinable + public init(code: String? = nil, error: String? = nil, errorDescription: String? = nil, instanceId: String, state: String) { + self.code = code + self.error = error + self.errorDescription = errorDescription + self.instanceId = instanceId + self.state = state + } + + public func validate(name: String) throws { + try self.validate(self.code, name: "code", parent: name, max: 2048) + try self.validate(self.code, name: "code", parent: name, min: 1) + try self.validate(self.error, name: "error", parent: name, max: 2048) + try self.validate(self.error, name: "error", parent: name, min: 1) + try self.validate(self.error, name: "error", parent: name, pattern: "^[\\x20-\\x21\\x23-\\x5B\\x5D-\\x7E]*$") + try self.validate(self.errorDescription, name: "errorDescription", parent: name, max: 2048) + try self.validate(self.errorDescription, name: "errorDescription", parent: name, min: 1) + try self.validate(self.errorDescription, name: "errorDescription", parent: name, pattern: "^[\\x20-\\x21\\x23-\\x5B\\x5D-\\x7E]*$") + try self.validate(self.instanceId, name: "instanceId", parent: name, max: 100) + try self.validate(self.instanceId, name: "instanceId", parent: name, min: 1) + try self.validate(self.state, name: "state", parent: name, max: 1000) + try self.validate(self.state, name: "state", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case code = "Code" + case error = "Error" + case errorDescription = "ErrorDescription" + case instanceId = "InstanceId" + case state = "State" + } + } + + public struct UpdateParticipantAuthenticationResponse: AWSDecodableShape { + public init() {} + } + public struct UpdateParticipantRoleConfigRequest: AWSEncodableShape { /// The Amazon Connect channel you want to configure. public let channelConfiguration: UpdateParticipantRoleConfigChannelInfo @@ -25020,7 +25115,7 @@ extension Connect { } public struct VoiceRecordingConfiguration: AWSEncodableShape { - /// Identifies which IVR track is being recorded. + /// Identifies which IVR track is being recorded. One and only one of the track configurations should be presented in the request. public let ivrRecordingTrack: IvrRecordingTrack? /// Identifies which track is being recorded. public let voiceRecordingTrack: VoiceRecordingTrack? diff --git a/Sources/Soto/Services/ConnectParticipant/ConnectParticipant_api.swift b/Sources/Soto/Services/ConnectParticipant/ConnectParticipant_api.swift index e230ec3a8f..0cf20d8289 100644 --- a/Sources/Soto/Services/ConnectParticipant/ConnectParticipant_api.swift +++ b/Sources/Soto/Services/ConnectParticipant/ConnectParticipant_api.swift @@ -25,7 +25,7 @@ import Foundation /// Service object for interacting with AWS ConnectParticipant service. /// -/// Amazon Connect is an easy-to-use omnichannel cloud contact center service that enables companies of any size to deliver superior customer service at a lower cost. Amazon Connect communications capabilities make it easy for companies to deliver personalized interactions across communication channels, including chat. Use the Amazon Connect Participant Service to manage participants (for example, agents, customers, and managers listening in), and to send messages and events within a chat contact. The APIs in the service enable the following: sending chat messages, attachment sharing, managing a participant's connection state and message events, and retrieving chat transcripts. +/// Participant Service actions Participant Service data types Amazon Connect is an easy-to-use omnichannel cloud contact center service that enables companies of any size to deliver superior customer service at a lower cost. Amazon Connect communications capabilities make it easy for companies to deliver personalized interactions across communication channels, including chat. Use the Amazon Connect Participant Service to manage participants (for example, agents, customers, and managers listening in), and to send messages and events within a chat contact. The APIs in the service enable the following: sending chat messages, attachment sharing, managing a participant's connection state and message events, and retrieving chat transcripts. public struct ConnectParticipant: AWSService { // MARK: Member variables @@ -89,7 +89,39 @@ public struct ConnectParticipant: AWSService { // MARK: API Calls - /// Allows you to confirm that the attachment has been uploaded using the pre-signed URL provided in StartAttachmentUpload API. A conflict exception is thrown when an attachment with that identifier is already being uploaded. ConnectionToken is used for invoking this API instead of ParticipantToken. The Amazon Connect Participant Service APIs do not use Signature Version 4 authentication. + /// Cancels the authentication session. The opted out branch of the Authenticate Customer flow block will be taken. The current supported channel is chat. This API is not supported for Apple Messages for Business, WhatsApp, or SMS chats. + @Sendable + @inlinable + public func cancelParticipantAuthentication(_ input: CancelParticipantAuthenticationRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CancelParticipantAuthenticationResponse { + try await self.client.execute( + operation: "CancelParticipantAuthentication", + path: "/participant/cancel-authentication", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Cancels the authentication session. The opted out branch of the Authenticate Customer flow block will be taken. The current supported channel is chat. This API is not supported for Apple Messages for Business, WhatsApp, or SMS chats. + /// + /// Parameters: + /// - connectionToken: The authentication token associated with the participant's connection. + /// - sessionId: The sessionId provided in the authenticationInitiated event. + /// - logger: Logger use during operation + @inlinable + public func cancelParticipantAuthentication( + connectionToken: String, + sessionId: String, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> CancelParticipantAuthenticationResponse { + let input = CancelParticipantAuthenticationRequest( + connectionToken: connectionToken, + sessionId: sessionId + ) + return try await self.cancelParticipantAuthentication(input, logger: logger) + } + + /// Allows you to confirm that the attachment has been uploaded using the pre-signed URL provided in StartAttachmentUpload API. A conflict exception is thrown when an attachment with that identifier is already being uploaded. For security recommendations, see Amazon Connect Chat security best practices. ConnectionToken is used for invoking this API instead of ParticipantToken. The Amazon Connect Participant Service APIs do not use Signature Version 4 authentication. @Sendable @inlinable public func completeAttachmentUpload(_ input: CompleteAttachmentUploadRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CompleteAttachmentUploadResponse { @@ -102,7 +134,7 @@ public struct ConnectParticipant: AWSService { logger: logger ) } - /// Allows you to confirm that the attachment has been uploaded using the pre-signed URL provided in StartAttachmentUpload API. A conflict exception is thrown when an attachment with that identifier is already being uploaded. ConnectionToken is used for invoking this API instead of ParticipantToken. The Amazon Connect Participant Service APIs do not use Signature Version 4 authentication. + /// Allows you to confirm that the attachment has been uploaded using the pre-signed URL provided in StartAttachmentUpload API. A conflict exception is thrown when an attachment with that identifier is already being uploaded. For security recommendations, see Amazon Connect Chat security best practices. ConnectionToken is used for invoking this API instead of ParticipantToken. The Amazon Connect Participant Service APIs do not use Signature Version 4 authentication. /// /// Parameters: /// - attachmentIds: A list of unique identifiers for the attachments. @@ -124,7 +156,7 @@ public struct ConnectParticipant: AWSService { return try await self.completeAttachmentUpload(input, logger: logger) } - /// Creates the participant's connection. ParticipantToken is used for invoking this API instead of ConnectionToken. The participant token is valid for the lifetime of the participant – until they are part of a contact. The response URL for WEBSOCKET Type has a connect expiry timeout of 100s. Clients must manually connect to the returned websocket URL and subscribe to the desired topic. For chat, you need to publish the following on the established websocket connection: {"topic":"aws/subscribe","content":{"topics":["aws/chat"]}} Upon websocket URL expiry, as specified in the response ConnectionExpiry parameter, clients need to call this API again to obtain a new websocket URL and perform the same steps as before. Message streaming support: This API can also be used together with the StartContactStreaming API to create a participant connection for chat contacts that are not using a websocket. For more information about message streaming, Enable real-time chat message streaming in the Amazon Connect Administrator Guide. Feature specifications: For information about feature specifications, such as the allowed number of open websocket connections per participant, see Feature specifications in the Amazon Connect Administrator Guide. The Amazon Connect Participant Service APIs do not use Signature Version 4 authentication. + /// Creates the participant's connection. For security recommendations, see Amazon Connect Chat security best practices. ParticipantToken is used for invoking this API instead of ConnectionToken. The participant token is valid for the lifetime of the participant – until they are part of a contact. The response URL for WEBSOCKET Type has a connect expiry timeout of 100s. Clients must manually connect to the returned websocket URL and subscribe to the desired topic. For chat, you need to publish the following on the established websocket connection: {"topic":"aws/subscribe","content":{"topics":["aws/chat"]}} Upon websocket URL expiry, as specified in the response ConnectionExpiry parameter, clients need to call this API again to obtain a new websocket URL and perform the same steps as before. Message streaming support: This API can also be used together with the StartContactStreaming API to create a participant connection for chat contacts that are not using a websocket. For more information about message streaming, Enable real-time chat message streaming in the Amazon Connect Administrator Guide. Feature specifications: For information about feature specifications, such as the allowed number of open websocket connections per participant, see Feature specifications in the Amazon Connect Administrator Guide. The Amazon Connect Participant Service APIs do not use Signature Version 4 authentication. @Sendable @inlinable public func createParticipantConnection(_ input: CreateParticipantConnectionRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateParticipantConnectionResponse { @@ -137,7 +169,7 @@ public struct ConnectParticipant: AWSService { logger: logger ) } - /// Creates the participant's connection. ParticipantToken is used for invoking this API instead of ConnectionToken. The participant token is valid for the lifetime of the participant – until they are part of a contact. The response URL for WEBSOCKET Type has a connect expiry timeout of 100s. Clients must manually connect to the returned websocket URL and subscribe to the desired topic. For chat, you need to publish the following on the established websocket connection: {"topic":"aws/subscribe","content":{"topics":["aws/chat"]}} Upon websocket URL expiry, as specified in the response ConnectionExpiry parameter, clients need to call this API again to obtain a new websocket URL and perform the same steps as before. Message streaming support: This API can also be used together with the StartContactStreaming API to create a participant connection for chat contacts that are not using a websocket. For more information about message streaming, Enable real-time chat message streaming in the Amazon Connect Administrator Guide. Feature specifications: For information about feature specifications, such as the allowed number of open websocket connections per participant, see Feature specifications in the Amazon Connect Administrator Guide. The Amazon Connect Participant Service APIs do not use Signature Version 4 authentication. + /// Creates the participant's connection. For security recommendations, see Amazon Connect Chat security best practices. ParticipantToken is used for invoking this API instead of ConnectionToken. The participant token is valid for the lifetime of the participant – until they are part of a contact. The response URL for WEBSOCKET Type has a connect expiry timeout of 100s. Clients must manually connect to the returned websocket URL and subscribe to the desired topic. For chat, you need to publish the following on the established websocket connection: {"topic":"aws/subscribe","content":{"topics":["aws/chat"]}} Upon websocket URL expiry, as specified in the response ConnectionExpiry parameter, clients need to call this API again to obtain a new websocket URL and perform the same steps as before. Message streaming support: This API can also be used together with the StartContactStreaming API to create a participant connection for chat contacts that are not using a websocket. For more information about message streaming, Enable real-time chat message streaming in the Amazon Connect Administrator Guide. Feature specifications: For information about feature specifications, such as the allowed number of open websocket connections per participant, see Feature specifications in the Amazon Connect Administrator Guide. The Amazon Connect Participant Service APIs do not use Signature Version 4 authentication. /// /// Parameters: /// - connectParticipant: Amazon Connect Participant is used to mark the participant as connected for customer participant in message streaming, as well as for agent or manager participant in non-streaming chats. @@ -159,7 +191,7 @@ public struct ConnectParticipant: AWSService { return try await self.createParticipantConnection(input, logger: logger) } - /// Retrieves the view for the specified view token. + /// Retrieves the view for the specified view token. For security recommendations, see Amazon Connect Chat security best practices. @Sendable @inlinable public func describeView(_ input: DescribeViewRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeViewResponse { @@ -172,7 +204,7 @@ public struct ConnectParticipant: AWSService { logger: logger ) } - /// Retrieves the view for the specified view token. + /// Retrieves the view for the specified view token. For security recommendations, see Amazon Connect Chat security best practices. /// /// Parameters: /// - connectionToken: The connection token. @@ -191,7 +223,7 @@ public struct ConnectParticipant: AWSService { return try await self.describeView(input, logger: logger) } - /// Disconnects a participant. ConnectionToken is used for invoking this API instead of ParticipantToken. The Amazon Connect Participant Service APIs do not use Signature Version 4 authentication. + /// Disconnects a participant. For security recommendations, see Amazon Connect Chat security best practices. ConnectionToken is used for invoking this API instead of ParticipantToken. The Amazon Connect Participant Service APIs do not use Signature Version 4 authentication. @Sendable @inlinable public func disconnectParticipant(_ input: DisconnectParticipantRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DisconnectParticipantResponse { @@ -204,7 +236,7 @@ public struct ConnectParticipant: AWSService { logger: logger ) } - /// Disconnects a participant. ConnectionToken is used for invoking this API instead of ParticipantToken. The Amazon Connect Participant Service APIs do not use Signature Version 4 authentication. + /// Disconnects a participant. For security recommendations, see Amazon Connect Chat security best practices. ConnectionToken is used for invoking this API instead of ParticipantToken. The Amazon Connect Participant Service APIs do not use Signature Version 4 authentication. /// /// Parameters: /// - clientToken: A unique, case-sensitive identifier that you provide to ensure the idempotency of the request. If not provided, the Amazon Web Services SDK populates this field. For more information about idempotency, see Making retries safe with idempotent APIs. @@ -223,7 +255,7 @@ public struct ConnectParticipant: AWSService { return try await self.disconnectParticipant(input, logger: logger) } - /// Provides a pre-signed URL for download of a completed attachment. This is an asynchronous API for use with active contacts. ConnectionToken is used for invoking this API instead of ParticipantToken. The Amazon Connect Participant Service APIs do not use Signature Version 4 authentication. + /// Provides a pre-signed URL for download of a completed attachment. This is an asynchronous API for use with active contacts. For security recommendations, see Amazon Connect Chat security best practices. ConnectionToken is used for invoking this API instead of ParticipantToken. The Amazon Connect Participant Service APIs do not use Signature Version 4 authentication. @Sendable @inlinable public func getAttachment(_ input: GetAttachmentRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetAttachmentResponse { @@ -236,26 +268,64 @@ public struct ConnectParticipant: AWSService { logger: logger ) } - /// Provides a pre-signed URL for download of a completed attachment. This is an asynchronous API for use with active contacts. ConnectionToken is used for invoking this API instead of ParticipantToken. The Amazon Connect Participant Service APIs do not use Signature Version 4 authentication. + /// Provides a pre-signed URL for download of a completed attachment. This is an asynchronous API for use with active contacts. For security recommendations, see Amazon Connect Chat security best practices. ConnectionToken is used for invoking this API instead of ParticipantToken. The Amazon Connect Participant Service APIs do not use Signature Version 4 authentication. /// /// Parameters: /// - attachmentId: A unique identifier for the attachment. /// - connectionToken: The authentication token associated with the participant's connection. + /// - urlExpiryInSeconds: The expiration time of the URL in ISO timestamp. It's specified in ISO 8601 format: yyyy-MM-ddThh:mm:ss.SSSZ. For example, 2019-11-08T02:41:28.172Z. /// - logger: Logger use during operation @inlinable public func getAttachment( attachmentId: String, connectionToken: String, + urlExpiryInSeconds: Int? = nil, logger: Logger = AWSClient.loggingDisabled ) async throws -> GetAttachmentResponse { let input = GetAttachmentRequest( attachmentId: attachmentId, - connectionToken: connectionToken + connectionToken: connectionToken, + urlExpiryInSeconds: urlExpiryInSeconds ) return try await self.getAttachment(input, logger: logger) } - /// Retrieves a transcript of the session, including details about any attachments. For information about accessing past chat contact transcripts for a persistent chat, see Enable persistent chat. If you have a process that consumes events in the transcript of an chat that has ended, note that chat transcripts contain the following event content types if the event has occurred during the chat session: application/vnd.amazonaws.connect.event.participant.left application/vnd.amazonaws.connect.event.participant.joined application/vnd.amazonaws.connect.event.chat.ended application/vnd.amazonaws.connect.event.transfer.succeeded application/vnd.amazonaws.connect.event.transfer.failed ConnectionToken is used for invoking this API instead of ParticipantToken. The Amazon Connect Participant Service APIs do not use Signature Version 4 authentication. + /// Retrieves the AuthenticationUrl for the current authentication session for the AuthenticateCustomer flow block. For security recommendations, see Amazon Connect Chat security best practices. This API can only be called within one minute of receiving the authenticationInitiated event. The current supported channel is chat. This API is not supported for Apple Messages for Business, WhatsApp, or SMS chats. + @Sendable + @inlinable + public func getAuthenticationUrl(_ input: GetAuthenticationUrlRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetAuthenticationUrlResponse { + try await self.client.execute( + operation: "GetAuthenticationUrl", + path: "/participant/authentication-url", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Retrieves the AuthenticationUrl for the current authentication session for the AuthenticateCustomer flow block. For security recommendations, see Amazon Connect Chat security best practices. This API can only be called within one minute of receiving the authenticationInitiated event. The current supported channel is chat. This API is not supported for Apple Messages for Business, WhatsApp, or SMS chats. + /// + /// Parameters: + /// - connectionToken: The authentication token associated with the participant's connection. + /// - redirectUri: The URL where the customer will be redirected after Amazon Cognito authorizes the user. + /// - sessionId: The sessionId provided in the authenticationInitiated event. + /// - logger: Logger use during operation + @inlinable + public func getAuthenticationUrl( + connectionToken: String, + redirectUri: String, + sessionId: String, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> GetAuthenticationUrlResponse { + let input = GetAuthenticationUrlRequest( + connectionToken: connectionToken, + redirectUri: redirectUri, + sessionId: sessionId + ) + return try await self.getAuthenticationUrl(input, logger: logger) + } + + /// Retrieves a transcript of the session, including details about any attachments. For information about accessing past chat contact transcripts for a persistent chat, see Enable persistent chat. For security recommendations, see Amazon Connect Chat security best practices. If you have a process that consumes events in the transcript of an chat that has ended, note that chat transcripts contain the following event content types if the event has occurred during the chat session: application/vnd.amazonaws.connect.event.participant.left application/vnd.amazonaws.connect.event.participant.joined application/vnd.amazonaws.connect.event.chat.ended application/vnd.amazonaws.connect.event.transfer.succeeded application/vnd.amazonaws.connect.event.transfer.failed ConnectionToken is used for invoking this API instead of ParticipantToken. The Amazon Connect Participant Service APIs do not use Signature Version 4 authentication. @Sendable @inlinable public func getTranscript(_ input: GetTranscriptRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetTranscriptResponse { @@ -268,7 +338,7 @@ public struct ConnectParticipant: AWSService { logger: logger ) } - /// Retrieves a transcript of the session, including details about any attachments. For information about accessing past chat contact transcripts for a persistent chat, see Enable persistent chat. If you have a process that consumes events in the transcript of an chat that has ended, note that chat transcripts contain the following event content types if the event has occurred during the chat session: application/vnd.amazonaws.connect.event.participant.left application/vnd.amazonaws.connect.event.participant.joined application/vnd.amazonaws.connect.event.chat.ended application/vnd.amazonaws.connect.event.transfer.succeeded application/vnd.amazonaws.connect.event.transfer.failed ConnectionToken is used for invoking this API instead of ParticipantToken. The Amazon Connect Participant Service APIs do not use Signature Version 4 authentication. + /// Retrieves a transcript of the session, including details about any attachments. For information about accessing past chat contact transcripts for a persistent chat, see Enable persistent chat. For security recommendations, see Amazon Connect Chat security best practices. If you have a process that consumes events in the transcript of an chat that has ended, note that chat transcripts contain the following event content types if the event has occurred during the chat session: application/vnd.amazonaws.connect.event.participant.left application/vnd.amazonaws.connect.event.participant.joined application/vnd.amazonaws.connect.event.chat.ended application/vnd.amazonaws.connect.event.transfer.succeeded application/vnd.amazonaws.connect.event.transfer.failed ConnectionToken is used for invoking this API instead of ParticipantToken. The Amazon Connect Participant Service APIs do not use Signature Version 4 authentication. /// /// Parameters: /// - connectionToken: The authentication token associated with the participant's connection. @@ -302,7 +372,7 @@ public struct ConnectParticipant: AWSService { return try await self.getTranscript(input, logger: logger) } - /// The application/vnd.amazonaws.connect.event.connection.acknowledged ContentType will no longer be supported starting December 31, 2024. This event has been migrated to the CreateParticipantConnection API using the ConnectParticipant field. Sends an event. Message receipts are not supported when there are more than two active participants in the chat. Using the SendEvent API for message receipts when a supervisor is barged-in will result in a conflict exception. ConnectionToken is used for invoking this API instead of ParticipantToken. The Amazon Connect Participant Service APIs do not use Signature Version 4 authentication. + /// The application/vnd.amazonaws.connect.event.connection.acknowledged ContentType will no longer be supported starting December 31, 2024. This event has been migrated to the CreateParticipantConnection API using the ConnectParticipant field. Sends an event. Message receipts are not supported when there are more than two active participants in the chat. Using the SendEvent API for message receipts when a supervisor is barged-in will result in a conflict exception. For security recommendations, see Amazon Connect Chat security best practices. ConnectionToken is used for invoking this API instead of ParticipantToken. The Amazon Connect Participant Service APIs do not use Signature Version 4 authentication. @Sendable @inlinable public func sendEvent(_ input: SendEventRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> SendEventResponse { @@ -315,7 +385,7 @@ public struct ConnectParticipant: AWSService { logger: logger ) } - /// The application/vnd.amazonaws.connect.event.connection.acknowledged ContentType will no longer be supported starting December 31, 2024. This event has been migrated to the CreateParticipantConnection API using the ConnectParticipant field. Sends an event. Message receipts are not supported when there are more than two active participants in the chat. Using the SendEvent API for message receipts when a supervisor is barged-in will result in a conflict exception. ConnectionToken is used for invoking this API instead of ParticipantToken. The Amazon Connect Participant Service APIs do not use Signature Version 4 authentication. + /// The application/vnd.amazonaws.connect.event.connection.acknowledged ContentType will no longer be supported starting December 31, 2024. This event has been migrated to the CreateParticipantConnection API using the ConnectParticipant field. Sends an event. Message receipts are not supported when there are more than two active participants in the chat. Using the SendEvent API for message receipts when a supervisor is barged-in will result in a conflict exception. For security recommendations, see Amazon Connect Chat security best practices. ConnectionToken is used for invoking this API instead of ParticipantToken. The Amazon Connect Participant Service APIs do not use Signature Version 4 authentication. /// /// Parameters: /// - clientToken: A unique, case-sensitive identifier that you provide to ensure the idempotency of the request. If not provided, the Amazon Web Services SDK populates this field. For more information about idempotency, see Making retries safe with idempotent APIs. @@ -340,7 +410,7 @@ public struct ConnectParticipant: AWSService { return try await self.sendEvent(input, logger: logger) } - /// Sends a message. ConnectionToken is used for invoking this API instead of ParticipantToken. The Amazon Connect Participant Service APIs do not use Signature Version 4 authentication. + /// Sends a message. For security recommendations, see Amazon Connect Chat security best practices. ConnectionToken is used for invoking this API instead of ParticipantToken. The Amazon Connect Participant Service APIs do not use Signature Version 4 authentication. @Sendable @inlinable public func sendMessage(_ input: SendMessageRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> SendMessageResponse { @@ -353,7 +423,7 @@ public struct ConnectParticipant: AWSService { logger: logger ) } - /// Sends a message. ConnectionToken is used for invoking this API instead of ParticipantToken. The Amazon Connect Participant Service APIs do not use Signature Version 4 authentication. + /// Sends a message. For security recommendations, see Amazon Connect Chat security best practices. ConnectionToken is used for invoking this API instead of ParticipantToken. The Amazon Connect Participant Service APIs do not use Signature Version 4 authentication. /// /// Parameters: /// - clientToken: A unique, case-sensitive identifier that you provide to ensure the idempotency of the request. If not provided, the Amazon Web Services SDK populates this field. For more information about idempotency, see Making retries safe with idempotent APIs. @@ -378,7 +448,7 @@ public struct ConnectParticipant: AWSService { return try await self.sendMessage(input, logger: logger) } - /// Provides a pre-signed Amazon S3 URL in response for uploading the file directly to S3. ConnectionToken is used for invoking this API instead of ParticipantToken. The Amazon Connect Participant Service APIs do not use Signature Version 4 authentication. + /// Provides a pre-signed Amazon S3 URL in response for uploading the file directly to S3. For security recommendations, see Amazon Connect Chat security best practices. ConnectionToken is used for invoking this API instead of ParticipantToken. The Amazon Connect Participant Service APIs do not use Signature Version 4 authentication. @Sendable @inlinable public func startAttachmentUpload(_ input: StartAttachmentUploadRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> StartAttachmentUploadResponse { @@ -391,7 +461,7 @@ public struct ConnectParticipant: AWSService { logger: logger ) } - /// Provides a pre-signed Amazon S3 URL in response for uploading the file directly to S3. ConnectionToken is used for invoking this API instead of ParticipantToken. The Amazon Connect Participant Service APIs do not use Signature Version 4 authentication. + /// Provides a pre-signed Amazon S3 URL in response for uploading the file directly to S3. For security recommendations, see Amazon Connect Chat security best practices. ConnectionToken is used for invoking this API instead of ParticipantToken. The Amazon Connect Participant Service APIs do not use Signature Version 4 authentication. /// /// Parameters: /// - attachmentName: A case-sensitive name of the attachment being uploaded. diff --git a/Sources/Soto/Services/ConnectParticipant/ConnectParticipant_shapes.swift b/Sources/Soto/Services/ConnectParticipant/ConnectParticipant_shapes.swift index bc686afc12..a2fe0f9cca 100644 --- a/Sources/Soto/Services/ConnectParticipant/ConnectParticipant_shapes.swift +++ b/Sources/Soto/Services/ConnectParticipant/ConnectParticipant_shapes.swift @@ -104,6 +104,41 @@ extension ConnectParticipant { } } + public struct CancelParticipantAuthenticationRequest: AWSEncodableShape { + /// The authentication token associated with the participant's connection. + public let connectionToken: String + /// The sessionId provided in the authenticationInitiated event. + public let sessionId: String + + @inlinable + public init(connectionToken: String, sessionId: String) { + self.connectionToken = connectionToken + self.sessionId = sessionId + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + var container = encoder.container(keyedBy: CodingKeys.self) + request.encodeHeader(self.connectionToken, key: "X-Amz-Bearer") + try container.encode(self.sessionId, forKey: .sessionId) + } + + public func validate(name: String) throws { + try self.validate(self.connectionToken, name: "connectionToken", parent: name, max: 1000) + try self.validate(self.connectionToken, name: "connectionToken", parent: name, min: 1) + try self.validate(self.sessionId, name: "sessionId", parent: name, max: 36) + try self.validate(self.sessionId, name: "sessionId", parent: name, min: 36) + } + + private enum CodingKeys: String, CodingKey { + case sessionId = "SessionId" + } + } + + public struct CancelParticipantAuthenticationResponse: AWSDecodableShape { + public init() {} + } + public struct CompleteAttachmentUploadRequest: AWSEncodableShape { /// A list of unique identifiers for the attachments. public let attachmentIds: [String] @@ -303,11 +338,14 @@ extension ConnectParticipant { public let attachmentId: String /// The authentication token associated with the participant's connection. public let connectionToken: String + /// The expiration time of the URL in ISO timestamp. It's specified in ISO 8601 format: yyyy-MM-ddThh:mm:ss.SSSZ. For example, 2019-11-08T02:41:28.172Z. + public let urlExpiryInSeconds: Int? @inlinable - public init(attachmentId: String, connectionToken: String) { + public init(attachmentId: String, connectionToken: String, urlExpiryInSeconds: Int? = nil) { self.attachmentId = attachmentId self.connectionToken = connectionToken + self.urlExpiryInSeconds = urlExpiryInSeconds } public func encode(to encoder: Encoder) throws { @@ -315,6 +353,7 @@ extension ConnectParticipant { var container = encoder.container(keyedBy: CodingKeys.self) try container.encode(self.attachmentId, forKey: .attachmentId) request.encodeHeader(self.connectionToken, key: "X-Amz-Bearer") + try container.encodeIfPresent(self.urlExpiryInSeconds, forKey: .urlExpiryInSeconds) } public func validate(name: String) throws { @@ -322,14 +361,19 @@ extension ConnectParticipant { try self.validate(self.attachmentId, name: "attachmentId", parent: name, min: 1) try self.validate(self.connectionToken, name: "connectionToken", parent: name, max: 1000) try self.validate(self.connectionToken, name: "connectionToken", parent: name, min: 1) + try self.validate(self.urlExpiryInSeconds, name: "urlExpiryInSeconds", parent: name, max: 300) + try self.validate(self.urlExpiryInSeconds, name: "urlExpiryInSeconds", parent: name, min: 5) } private enum CodingKeys: String, CodingKey { case attachmentId = "AttachmentId" + case urlExpiryInSeconds = "UrlExpiryInSeconds" } } public struct GetAttachmentResponse: AWSDecodableShape { + /// The size of the attachment in bytes. + public let attachmentSizeInBytes: Int64 /// This is the pre-signed URL that can be used for uploading the file to Amazon S3 when used in response /// to StartAttachmentUpload. public let url: String? @@ -337,17 +381,71 @@ extension ConnectParticipant { public let urlExpiry: String? @inlinable - public init(url: String? = nil, urlExpiry: String? = nil) { + public init(attachmentSizeInBytes: Int64, url: String? = nil, urlExpiry: String? = nil) { + self.attachmentSizeInBytes = attachmentSizeInBytes self.url = url self.urlExpiry = urlExpiry } private enum CodingKeys: String, CodingKey { + case attachmentSizeInBytes = "AttachmentSizeInBytes" case url = "Url" case urlExpiry = "UrlExpiry" } } + public struct GetAuthenticationUrlRequest: AWSEncodableShape { + /// The authentication token associated with the participant's connection. + public let connectionToken: String + /// The URL where the customer will be redirected after Amazon Cognito authorizes the user. + public let redirectUri: String + /// The sessionId provided in the authenticationInitiated event. + public let sessionId: String + + @inlinable + public init(connectionToken: String, redirectUri: String, sessionId: String) { + self.connectionToken = connectionToken + self.redirectUri = redirectUri + self.sessionId = sessionId + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + var container = encoder.container(keyedBy: CodingKeys.self) + request.encodeHeader(self.connectionToken, key: "X-Amz-Bearer") + try container.encode(self.redirectUri, forKey: .redirectUri) + try container.encode(self.sessionId, forKey: .sessionId) + } + + public func validate(name: String) throws { + try self.validate(self.connectionToken, name: "connectionToken", parent: name, max: 1000) + try self.validate(self.connectionToken, name: "connectionToken", parent: name, min: 1) + try self.validate(self.redirectUri, name: "redirectUri", parent: name, max: 1024) + try self.validate(self.redirectUri, name: "redirectUri", parent: name, min: 1) + try self.validate(self.sessionId, name: "sessionId", parent: name, max: 36) + try self.validate(self.sessionId, name: "sessionId", parent: name, min: 36) + } + + private enum CodingKeys: String, CodingKey { + case redirectUri = "RedirectUri" + case sessionId = "SessionId" + } + } + + public struct GetAuthenticationUrlResponse: AWSDecodableShape { + /// The URL where the customer will sign in to the identity provider. This URL contains the authorize endpoint for the Cognito UserPool used in the authentication. + public let authenticationUrl: String? + + @inlinable + public init(authenticationUrl: String? = nil) { + self.authenticationUrl = authenticationUrl + } + + private enum CodingKeys: String, CodingKey { + case authenticationUrl = "AuthenticationUrl" + } + } + public struct GetTranscriptRequest: AWSEncodableShape { /// The authentication token associated with the participant's connection. public let connectionToken: String @@ -707,7 +805,7 @@ extension ConnectParticipant { public struct StartAttachmentUploadResponse: AWSDecodableShape { /// A unique identifier for the attachment. public let attachmentId: String? - /// Fields to be used while uploading the attachment. + /// The headers to be provided while uploading the file to the URL. public let uploadMetadata: UploadMetadata? @inlinable diff --git a/Sources/Soto/Services/CostExplorer/CostExplorer_api.swift b/Sources/Soto/Services/CostExplorer/CostExplorer_api.swift index a6b144d255..9866a24905 100644 --- a/Sources/Soto/Services/CostExplorer/CostExplorer_api.swift +++ b/Sources/Soto/Services/CostExplorer/CostExplorer_api.swift @@ -517,6 +517,7 @@ public struct CostExplorer: AWSService { /// Retrieves cost and usage metrics for your account. You can specify which cost and usage-related metric that you want the request to return. For example, you can specify BlendedCosts or UsageQuantity. You can also filter and group your data by various dimensions, such as SERVICE or AZ, in a specific time range. For a complete list of valid dimensions, see the GetDimensionValues operation. Management account in an organization in Organizations have access to all member accounts. For information about filter limitations, see Quotas and restrictions in the Billing and Cost Management User Guide. /// /// Parameters: + /// - billingViewArn: The Amazon Resource Name (ARN) that uniquely identifies a specific billing view. The ARN is used to specify which particular billing view you want to interact with or retrieve information from when making API calls related to Amazon Web Services Billing and Cost Management features. The BillingViewArn can be retrieved by calling the ListBillingViews API. /// - filter: Filters Amazon Web Services costs by different dimensions. For example, you can specify SERVICE and LINKED_ACCOUNT and get the costs that are associated with that account's usage of that service. You can nest Expression objects to define any combination of dimension filters. For more information, see Expression. Valid values for MatchOptions for Dimensions are EQUALS and CASE_SENSITIVE. Valid values for MatchOptions for CostCategories and Tags are EQUALS, ABSENT, and CASE_SENSITIVE. Default values are EQUALS and CASE_SENSITIVE. /// - granularity: Sets the Amazon Web Services cost granularity to MONTHLY or DAILY, or HOURLY. If Granularity isn't set, the response object doesn't include the Granularity, either MONTHLY or DAILY, or HOURLY. /// - groupBy: You can group Amazon Web Services costs using up to two different groups, either dimensions, tag keys, cost categories, or any two group by types. Valid values for the DIMENSION type are AZ, INSTANCE_TYPE, LEGAL_ENTITY_NAME, INVOICING_ENTITY, LINKED_ACCOUNT, OPERATION, PLATFORM, PURCHASE_TYPE, SERVICE, TENANCY, RECORD_TYPE, and USAGE_TYPE. When you group by the TAG type and include a valid tag key, you get all tag values, including empty strings. @@ -526,6 +527,7 @@ public struct CostExplorer: AWSService { /// - logger: Logger use during operation @inlinable public func getCostAndUsage( + billingViewArn: String? = nil, filter: Expression? = nil, granularity: Granularity, groupBy: [GroupDefinition]? = nil, @@ -535,6 +537,7 @@ public struct CostExplorer: AWSService { logger: Logger = AWSClient.loggingDisabled ) async throws -> GetCostAndUsageResponse { let input = GetCostAndUsageRequest( + billingViewArn: billingViewArn, filter: filter, granularity: granularity, groupBy: groupBy, @@ -561,6 +564,7 @@ public struct CostExplorer: AWSService { /// Retrieves cost and usage metrics with resources for your account. You can specify which cost and usage-related metric, such as BlendedCosts or UsageQuantity, that you want the request to return. You can also filter and group your data by various dimensions, such as SERVICE or AZ, in a specific time range. For a complete list of valid dimensions, see the GetDimensionValues operation. Management account in an organization in Organizations have access to all member accounts. Hourly granularity is only available for EC2-Instances (Elastic Compute Cloud) resource-level data. All other resource-level data is available at daily granularity. This is an opt-in only feature. You can enable this feature from the Cost Explorer Settings page. For information about how to access the Settings page, see Controlling Access for Cost Explorer in the Billing and Cost Management User Guide. /// /// Parameters: + /// - billingViewArn: The Amazon Resource Name (ARN) that uniquely identifies a specific billing view. The ARN is used to specify which particular billing view you want to interact with or retrieve information from when making API calls related to Amazon Web Services Billing and Cost Management features. The BillingViewArn can be retrieved by calling the ListBillingViews API. /// - filter: Filters Amazon Web Services costs by different dimensions. For example, you can specify SERVICE and LINKED_ACCOUNT and get the costs that are associated with that account's usage of that service. You can nest Expression objects to define any combination of dimension filters. For more information, see Expression. The GetCostAndUsageWithResources operation requires that you either group by or filter by a ResourceId. It requires the Expression "SERVICE = Amazon Elastic Compute Cloud - Compute" in the filter. Valid values for MatchOptions for Dimensions are EQUALS and CASE_SENSITIVE. Valid values for MatchOptions for CostCategories and Tags are EQUALS, ABSENT, and CASE_SENSITIVE. Default values are EQUALS and CASE_SENSITIVE. /// - granularity: Sets the Amazon Web Services cost granularity to MONTHLY, DAILY, or HOURLY. If Granularity isn't set, the response object doesn't include the Granularity, MONTHLY, DAILY, or HOURLY. /// - groupBy: You can group Amazon Web Services costs using up to two different groups: DIMENSION, TAG, COST_CATEGORY. @@ -570,6 +574,7 @@ public struct CostExplorer: AWSService { /// - logger: Logger use during operation @inlinable public func getCostAndUsageWithResources( + billingViewArn: String? = nil, filter: Expression, granularity: Granularity, groupBy: [GroupDefinition]? = nil, @@ -579,6 +584,7 @@ public struct CostExplorer: AWSService { logger: Logger = AWSClient.loggingDisabled ) async throws -> GetCostAndUsageWithResourcesResponse { let input = GetCostAndUsageWithResourcesRequest( + billingViewArn: billingViewArn, filter: filter, granularity: granularity, groupBy: groupBy, @@ -605,6 +611,7 @@ public struct CostExplorer: AWSService { /// Retrieves an array of Cost Category names and values incurred cost. If some Cost Category names and values are not associated with any cost, they will not be returned by this API. /// /// Parameters: + /// - billingViewArn: The Amazon Resource Name (ARN) that uniquely identifies a specific billing view. The ARN is used to specify which particular billing view you want to interact with or retrieve information from when making API calls related to Amazon Web Services Billing and Cost Management features. The BillingViewArn can be retrieved by calling the ListBillingViews API. /// - costCategoryName: /// - filter: /// - maxResults: This field is only used when the SortBy value is provided in the request. The maximum number of objects that are returned for this request. If MaxResults isn't specified with the SortBy value, the request returns 1000 results as the default value for this parameter. For GetCostCategories, MaxResults has an upper quota of 1000. @@ -615,6 +622,7 @@ public struct CostExplorer: AWSService { /// - logger: Logger use during operation @inlinable public func getCostCategories( + billingViewArn: String? = nil, costCategoryName: String? = nil, filter: Expression? = nil, maxResults: Int? = nil, @@ -625,6 +633,7 @@ public struct CostExplorer: AWSService { logger: Logger = AWSClient.loggingDisabled ) async throws -> GetCostCategoriesResponse { let input = GetCostCategoriesRequest( + billingViewArn: billingViewArn, costCategoryName: costCategoryName, filter: filter, maxResults: maxResults, @@ -652,6 +661,7 @@ public struct CostExplorer: AWSService { /// Retrieves a forecast for how much Amazon Web Services predicts that you will spend over the forecast time period that you select, based on your past costs. /// /// Parameters: + /// - billingViewArn: The Amazon Resource Name (ARN) that uniquely identifies a specific billing view. The ARN is used to specify which particular billing view you want to interact with or retrieve information from when making API calls related to Amazon Web Services Billing and Cost Management features. The BillingViewArn can be retrieved by calling the ListBillingViews API. /// - filter: The filters that you want to use to filter your forecast. The GetCostForecast API supports filtering by the following dimensions: AZ INSTANCE_TYPE LINKED_ACCOUNT LINKED_ACCOUNT_NAME OPERATION PURCHASE_TYPE REGION SERVICE USAGE_TYPE USAGE_TYPE_GROUP RECORD_TYPE OPERATING_SYSTEM TENANCY SCOPE PLATFORM SUBSCRIPTION_ID LEGAL_ENTITY_NAME DEPLOYMENT_OPTION DATABASE_ENGINE INSTANCE_TYPE_FAMILY BILLING_ENTITY RESERVATION_ID SAVINGS_PLAN_ARN /// - granularity: How granular you want the forecast to be. You can get 3 months of DAILY forecasts or 12 months of MONTHLY forecasts. The GetCostForecast operation supports only DAILY and MONTHLY granularities. /// - metric: Which metric Cost Explorer uses to create your forecast. For more information about blended and unblended rates, see Why does the "blended" annotation appear on some line items in my bill?. Valid values for a GetCostForecast call are the following: AMORTIZED_COST BLENDED_COST NET_AMORTIZED_COST NET_UNBLENDED_COST UNBLENDED_COST @@ -660,6 +670,7 @@ public struct CostExplorer: AWSService { /// - logger: Logger use during operation @inlinable public func getCostForecast( + billingViewArn: String? = nil, filter: Expression? = nil, granularity: Granularity, metric: Metric, @@ -668,6 +679,7 @@ public struct CostExplorer: AWSService { logger: Logger = AWSClient.loggingDisabled ) async throws -> GetCostForecastResponse { let input = GetCostForecastRequest( + billingViewArn: billingViewArn, filter: filter, granularity: granularity, metric: metric, @@ -693,6 +705,7 @@ public struct CostExplorer: AWSService { /// Retrieves all available filter values for a specified filter over a period of time. You can search the dimension values for an arbitrary string. /// /// Parameters: + /// - billingViewArn: The Amazon Resource Name (ARN) that uniquely identifies a specific billing view. The ARN is used to specify which particular billing view you want to interact with or retrieve information from when making API calls related to Amazon Web Services Billing and Cost Management features. The BillingViewArn can be retrieved by calling the ListBillingViews API. /// - context: The context for the call to GetDimensionValues. This can be RESERVATIONS or COST_AND_USAGE. The default value is COST_AND_USAGE. If the context is set to RESERVATIONS, the resulting dimension values can be used in the GetReservationUtilization operation. If the context is set to COST_AND_USAGE, the resulting dimension values can be used in the GetCostAndUsage operation. If you set the context to COST_AND_USAGE, you can use the following dimensions for searching: AZ - The Availability Zone. An example is us-east-1a. BILLING_ENTITY - The Amazon Web Services seller that your account is with. Possible values are the following: - Amazon Web Services(Amazon Web Services): The entity that sells Amazon Web Services services. - AISPL (Amazon Internet Services Pvt. Ltd.): The local Indian entity that's an acting reseller for Amazon Web Services services in India. - Amazon Web Services Marketplace: The entity that supports the sale of solutions that are built on Amazon Web Services by third-party software providers. CACHE_ENGINE - The Amazon ElastiCache operating system. Examples are Windows or Linux. DEPLOYMENT_OPTION - The scope of Amazon Relational Database Service deployments. Valid values are SingleAZ and MultiAZ. DATABASE_ENGINE - The Amazon Relational Database Service database. Examples are Aurora or MySQL. INSTANCE_TYPE - The type of Amazon EC2 instance. An example is m4.xlarge. INSTANCE_TYPE_FAMILY - A family of instance types optimized to fit different use cases. Examples are Compute Optimized (for example, C4, C5, C6g, and C7g), Memory Optimization (for example, R4, R5n, R5b, and R6g). INVOICING_ENTITY - The name of the entity that issues the Amazon Web Services invoice. LEGAL_ENTITY_NAME - The name of the organization that sells you Amazon Web Services services, such as Amazon Web Services. LINKED_ACCOUNT - The description in the attribute map that includes the full name of the member account. The value field contains the Amazon Web Services ID of the member account. OPERATING_SYSTEM - The operating system. Examples are Windows or Linux. OPERATION - The action performed. Examples include RunInstance and CreateBucket. PLATFORM - The Amazon EC2 operating system. Examples are Windows or Linux. PURCHASE_TYPE - The reservation type of the purchase that this usage is related to. Examples include On-Demand Instances and Standard Reserved Instances. RESERVATION_ID - The unique identifier for an Amazon Web Services Reservation Instance. SAVINGS_PLAN_ARN - The unique identifier for your Savings Plans. SAVINGS_PLANS_TYPE - Type of Savings Plans (EC2 Instance or Compute). SERVICE - The Amazon Web Services service such as Amazon DynamoDB. TENANCY - The tenancy of a resource. Examples are shared or dedicated. USAGE_TYPE - The type of usage. An example is DataTransfer-In-Bytes. The response for the GetDimensionValues operation includes a unit attribute. Examples include GB and Hrs. USAGE_TYPE_GROUP - The grouping of common usage types. An example is Amazon EC2: CloudWatch – Alarms. The response for this operation includes a unit attribute. REGION - The Amazon Web Services Region. RECORD_TYPE - The different types of charges such as Reserved Instance (RI) fees, usage costs, tax refunds, and credits. RESOURCE_ID - The unique identifier of the resource. ResourceId is an opt-in feature only available for last 14 days for EC2-Compute Service. If you set the context to RESERVATIONS, you can use the following dimensions for searching: AZ - The Availability Zone. An example is us-east-1a. CACHE_ENGINE - The Amazon ElastiCache operating system. Examples are Windows or Linux. DEPLOYMENT_OPTION - The scope of Amazon Relational Database Service deployments. Valid values are SingleAZ and MultiAZ. INSTANCE_TYPE - The type of Amazon EC2 instance. An example is m4.xlarge. LINKED_ACCOUNT - The description in the attribute map that includes the full name of the member account. The value field contains the Amazon Web Services ID of the member account. PLATFORM - The Amazon EC2 operating system. Examples are Windows or Linux. REGION - The Amazon Web Services Region. SCOPE (Utilization only) - The scope of a Reserved Instance (RI). Values are regional or a single Availability Zone. TAG (Coverage only) - The tags that are associated with a Reserved Instance (RI). TENANCY - The tenancy of a resource. Examples are shared or dedicated. If you set the context to SAVINGS_PLANS, you can use the following dimensions for searching: SAVINGS_PLANS_TYPE - Type of Savings Plans (EC2 Instance or Compute) PAYMENT_OPTION - The payment option for the given Savings Plans (for example, All Upfront) REGION - The Amazon Web Services Region. INSTANCE_TYPE_FAMILY - The family of instances (For example, m5) LINKED_ACCOUNT - The description in the attribute map that includes the full name of the member account. The value field contains the Amazon Web Services ID of the member account. SAVINGS_PLAN_ARN - The unique identifier for your Savings Plans. /// - dimension: The name of the dimension. Each Dimension is available for a different Context. For more information, see Context. LINK_ACCOUNT_NAME and SERVICE_CODE can only be used in CostCategoryRule. /// - filter: @@ -704,6 +717,7 @@ public struct CostExplorer: AWSService { /// - logger: Logger use during operation @inlinable public func getDimensionValues( + billingViewArn: String? = nil, context: Context? = nil, dimension: Dimension, filter: Expression? = nil, @@ -715,6 +729,7 @@ public struct CostExplorer: AWSService { logger: Logger = AWSClient.loggingDisabled ) async throws -> GetDimensionValuesResponse { let input = GetDimensionValuesRequest( + billingViewArn: billingViewArn, context: context, dimension: dimension, filter: filter, @@ -1148,6 +1163,7 @@ public struct CostExplorer: AWSService { /// Queries for available tag keys and tag values for a specified period. You can search the tag values for an arbitrary string. /// /// Parameters: + /// - billingViewArn: The Amazon Resource Name (ARN) that uniquely identifies a specific billing view. The ARN is used to specify which particular billing view you want to interact with or retrieve information from when making API calls related to Amazon Web Services Billing and Cost Management features. The BillingViewArn can be retrieved by calling the ListBillingViews API. /// - filter: /// - maxResults: This field is only used when SortBy is provided in the request. The maximum number of objects that are returned for this request. If MaxResults isn't specified with SortBy, the request returns 1000 results as the default value for this parameter. For GetTags, MaxResults has an upper quota of 1000. /// - nextPageToken: The token to retrieve the next set of results. Amazon Web Services provides the token when the response from a previous call has more results than the maximum page size. @@ -1158,6 +1174,7 @@ public struct CostExplorer: AWSService { /// - logger: Logger use during operation @inlinable public func getTags( + billingViewArn: String? = nil, filter: Expression? = nil, maxResults: Int? = nil, nextPageToken: String? = nil, @@ -1168,6 +1185,7 @@ public struct CostExplorer: AWSService { logger: Logger = AWSClient.loggingDisabled ) async throws -> GetTagsResponse { let input = GetTagsRequest( + billingViewArn: billingViewArn, filter: filter, maxResults: maxResults, nextPageToken: nextPageToken, @@ -1195,6 +1213,7 @@ public struct CostExplorer: AWSService { /// Retrieves a forecast for how much Amazon Web Services predicts that you will use over the forecast time period that you select, based on your past usage. /// /// Parameters: + /// - billingViewArn: The Amazon Resource Name (ARN) that uniquely identifies a specific billing view. The ARN is used to specify which particular billing view you want to interact with or retrieve information from when making API calls related to Amazon Web Services Billing and Cost Management features. The BillingViewArn can be retrieved by calling the ListBillingViews API. /// - filter: The filters that you want to use to filter your forecast. The GetUsageForecast API supports filtering by the following dimensions: AZ INSTANCE_TYPE LINKED_ACCOUNT LINKED_ACCOUNT_NAME OPERATION PURCHASE_TYPE REGION SERVICE USAGE_TYPE USAGE_TYPE_GROUP RECORD_TYPE OPERATING_SYSTEM TENANCY SCOPE PLATFORM SUBSCRIPTION_ID LEGAL_ENTITY_NAME DEPLOYMENT_OPTION DATABASE_ENGINE INSTANCE_TYPE_FAMILY BILLING_ENTITY RESERVATION_ID SAVINGS_PLAN_ARN /// - granularity: How granular you want the forecast to be. You can get 3 months of DAILY forecasts or 12 months of MONTHLY forecasts. The GetUsageForecast operation supports only DAILY and MONTHLY granularities. /// - metric: Which metric Cost Explorer uses to create your forecast. Valid values for a GetUsageForecast call are the following: USAGE_QUANTITY NORMALIZED_USAGE_AMOUNT @@ -1203,6 +1222,7 @@ public struct CostExplorer: AWSService { /// - logger: Logger use during operation @inlinable public func getUsageForecast( + billingViewArn: String? = nil, filter: Expression? = nil, granularity: Granularity, metric: Metric, @@ -1211,6 +1231,7 @@ public struct CostExplorer: AWSService { logger: Logger = AWSClient.loggingDisabled ) async throws -> GetUsageForecastResponse { let input = GetUsageForecastRequest( + billingViewArn: billingViewArn, filter: filter, granularity: granularity, metric: metric, diff --git a/Sources/Soto/Services/CostExplorer/CostExplorer_shapes.swift b/Sources/Soto/Services/CostExplorer/CostExplorer_shapes.swift index 7c90d7f69b..d208c440c0 100644 --- a/Sources/Soto/Services/CostExplorer/CostExplorer_shapes.swift +++ b/Sources/Soto/Services/CostExplorer/CostExplorer_shapes.swift @@ -2152,6 +2152,8 @@ extension CostExplorer { } public struct GetCostAndUsageRequest: AWSEncodableShape { + /// The Amazon Resource Name (ARN) that uniquely identifies a specific billing view. The ARN is used to specify which particular billing view you want to interact with or retrieve information from when making API calls related to Amazon Web Services Billing and Cost Management features. The BillingViewArn can be retrieved by calling the ListBillingViews API. + public let billingViewArn: String? /// Filters Amazon Web Services costs by different dimensions. For example, you can specify SERVICE and LINKED_ACCOUNT and get the costs that are associated with that account's usage of that service. You can nest Expression objects to define any combination of dimension filters. For more information, see Expression. Valid values for MatchOptions for Dimensions are EQUALS and CASE_SENSITIVE. Valid values for MatchOptions for CostCategories and Tags are EQUALS, ABSENT, and CASE_SENSITIVE. Default values are EQUALS and CASE_SENSITIVE. public let filter: Expression? /// Sets the Amazon Web Services cost granularity to MONTHLY or DAILY, or HOURLY. If Granularity isn't set, the response object doesn't include the Granularity, either MONTHLY or DAILY, or HOURLY. @@ -2166,7 +2168,8 @@ extension CostExplorer { public let timePeriod: DateInterval @inlinable - public init(filter: Expression? = nil, granularity: Granularity, groupBy: [GroupDefinition]? = nil, metrics: [String], nextPageToken: String? = nil, timePeriod: DateInterval) { + public init(billingViewArn: String? = nil, filter: Expression? = nil, granularity: Granularity, groupBy: [GroupDefinition]? = nil, metrics: [String], nextPageToken: String? = nil, timePeriod: DateInterval) { + self.billingViewArn = billingViewArn self.filter = filter self.granularity = granularity self.groupBy = groupBy @@ -2176,6 +2179,9 @@ extension CostExplorer { } public func validate(name: String) throws { + try self.validate(self.billingViewArn, name: "billingViewArn", parent: name, max: 2048) + try self.validate(self.billingViewArn, name: "billingViewArn", parent: name, min: 20) + try self.validate(self.billingViewArn, name: "billingViewArn", parent: name, pattern: "^arn:aws[a-z-]*:(billing)::[0-9]{12}:billingview/[-a-zA-Z0-9/:_+=.-@]{1,43}$") try self.filter?.validate(name: "\(name).filter") try self.groupBy?.forEach { try $0.validate(name: "\(name).groupBy[]") @@ -2190,6 +2196,7 @@ extension CostExplorer { } private enum CodingKeys: String, CodingKey { + case billingViewArn = "BillingViewArn" case filter = "Filter" case granularity = "Granularity" case groupBy = "GroupBy" @@ -2226,6 +2233,8 @@ extension CostExplorer { } public struct GetCostAndUsageWithResourcesRequest: AWSEncodableShape { + /// The Amazon Resource Name (ARN) that uniquely identifies a specific billing view. The ARN is used to specify which particular billing view you want to interact with or retrieve information from when making API calls related to Amazon Web Services Billing and Cost Management features. The BillingViewArn can be retrieved by calling the ListBillingViews API. + public let billingViewArn: String? /// Filters Amazon Web Services costs by different dimensions. For example, you can specify SERVICE and LINKED_ACCOUNT and get the costs that are associated with that account's usage of that service. You can nest Expression objects to define any combination of dimension filters. For more information, see Expression. The GetCostAndUsageWithResources operation requires that you either group by or filter by a ResourceId. It requires the Expression "SERVICE = Amazon Elastic Compute Cloud - Compute" in the filter. Valid values for MatchOptions for Dimensions are EQUALS and CASE_SENSITIVE. Valid values for MatchOptions for CostCategories and Tags are EQUALS, ABSENT, and CASE_SENSITIVE. Default values are EQUALS and CASE_SENSITIVE. public let filter: Expression /// Sets the Amazon Web Services cost granularity to MONTHLY, DAILY, or HOURLY. If Granularity isn't set, the response object doesn't include the Granularity, MONTHLY, DAILY, or HOURLY. @@ -2240,7 +2249,8 @@ extension CostExplorer { public let timePeriod: DateInterval @inlinable - public init(filter: Expression, granularity: Granularity, groupBy: [GroupDefinition]? = nil, metrics: [String]? = nil, nextPageToken: String? = nil, timePeriod: DateInterval) { + public init(billingViewArn: String? = nil, filter: Expression, granularity: Granularity, groupBy: [GroupDefinition]? = nil, metrics: [String]? = nil, nextPageToken: String? = nil, timePeriod: DateInterval) { + self.billingViewArn = billingViewArn self.filter = filter self.granularity = granularity self.groupBy = groupBy @@ -2250,6 +2260,9 @@ extension CostExplorer { } public func validate(name: String) throws { + try self.validate(self.billingViewArn, name: "billingViewArn", parent: name, max: 2048) + try self.validate(self.billingViewArn, name: "billingViewArn", parent: name, min: 20) + try self.validate(self.billingViewArn, name: "billingViewArn", parent: name, pattern: "^arn:aws[a-z-]*:(billing)::[0-9]{12}:billingview/[-a-zA-Z0-9/:_+=.-@]{1,43}$") try self.filter.validate(name: "\(name).filter") try self.groupBy?.forEach { try $0.validate(name: "\(name).groupBy[]") @@ -2264,6 +2277,7 @@ extension CostExplorer { } private enum CodingKeys: String, CodingKey { + case billingViewArn = "BillingViewArn" case filter = "Filter" case granularity = "Granularity" case groupBy = "GroupBy" @@ -2300,6 +2314,8 @@ extension CostExplorer { } public struct GetCostCategoriesRequest: AWSEncodableShape { + /// The Amazon Resource Name (ARN) that uniquely identifies a specific billing view. The ARN is used to specify which particular billing view you want to interact with or retrieve information from when making API calls related to Amazon Web Services Billing and Cost Management features. The BillingViewArn can be retrieved by calling the ListBillingViews API. + public let billingViewArn: String? public let costCategoryName: String? public let filter: Expression? /// This field is only used when the SortBy value is provided in the request. The maximum number of objects that are returned for this request. If MaxResults isn't specified with the SortBy value, the request returns 1000 results as the default value for this parameter. For GetCostCategories, MaxResults has an upper quota of 1000. @@ -2313,7 +2329,8 @@ extension CostExplorer { public let timePeriod: DateInterval @inlinable - public init(costCategoryName: String? = nil, filter: Expression? = nil, maxResults: Int? = nil, nextPageToken: String? = nil, searchString: String? = nil, sortBy: [SortDefinition]? = nil, timePeriod: DateInterval) { + public init(billingViewArn: String? = nil, costCategoryName: String? = nil, filter: Expression? = nil, maxResults: Int? = nil, nextPageToken: String? = nil, searchString: String? = nil, sortBy: [SortDefinition]? = nil, timePeriod: DateInterval) { + self.billingViewArn = billingViewArn self.costCategoryName = costCategoryName self.filter = filter self.maxResults = maxResults @@ -2324,6 +2341,9 @@ extension CostExplorer { } public func validate(name: String) throws { + try self.validate(self.billingViewArn, name: "billingViewArn", parent: name, max: 2048) + try self.validate(self.billingViewArn, name: "billingViewArn", parent: name, min: 20) + try self.validate(self.billingViewArn, name: "billingViewArn", parent: name, pattern: "^arn:aws[a-z-]*:(billing)::[0-9]{12}:billingview/[-a-zA-Z0-9/:_+=.-@]{1,43}$") try self.validate(self.costCategoryName, name: "costCategoryName", parent: name, max: 50) try self.validate(self.costCategoryName, name: "costCategoryName", parent: name, min: 1) try self.validate(self.costCategoryName, name: "costCategoryName", parent: name, pattern: "^(?! )[\\p{L}\\p{N}\\p{Z}-_]*(? CreateLocationNfsResponse { @@ -562,7 +608,7 @@ public struct DataSync: AWSService { logger: logger ) } - /// Creates a transfer location for a Network File System (NFS) file server. DataSync can use this location as a source or destination for transferring data. Before you begin, make sure that you understand how DataSync accesses NFS file servers. If you're copying data to or from an Snowcone device, you can also use CreateLocationNfs to create your transfer location. For more information, see Configuring transfers with Snowcone. + /// Creates a transfer location for a Network File System (NFS) file server. DataSync can use this location as a source or destination for transferring data. Before you begin, make sure that you understand how DataSync accesses NFS file servers. /// /// Parameters: /// - mountOptions: Specifies the options that DataSync can use to mount your NFS file server. @@ -2000,7 +2046,7 @@ public struct DataSync: AWSService { return try await self.updateDiscoveryJob(input, logger: logger) } - /// Modifies some configurations of the Microsoft Azure Blob Storage transfer location that you're using with DataSync. + /// Modifies the following configurations of the Microsoft Azure Blob Storage transfer location that you're using with DataSync. For more information, see Configuring DataSync transfers with Azure Blob Storage. @Sendable @inlinable public func updateLocationAzureBlob(_ input: UpdateLocationAzureBlobRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateLocationAzureBlobResponse { @@ -2013,7 +2059,7 @@ public struct DataSync: AWSService { logger: logger ) } - /// Modifies some configurations of the Microsoft Azure Blob Storage transfer location that you're using with DataSync. + /// Modifies the following configurations of the Microsoft Azure Blob Storage transfer location that you're using with DataSync. For more information, see Configuring DataSync transfers with Azure Blob Storage. /// /// Parameters: /// - accessTier: Specifies the access tier that you want your objects or files transferred into. This only applies when using the location as a transfer destination. For more information, see Access tiers. @@ -2047,7 +2093,191 @@ public struct DataSync: AWSService { return try await self.updateLocationAzureBlob(input, logger: logger) } - /// Updates some parameters of a previously created location for a Hadoop Distributed File System cluster. + /// Modifies the following configuration parameters of the Amazon EFS transfer location that you're using with DataSync. For more information, see Configuring DataSync transfers with Amazon EFS. + @Sendable + @inlinable + public func updateLocationEfs(_ input: UpdateLocationEfsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateLocationEfsResponse { + try await self.client.execute( + operation: "UpdateLocationEfs", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Modifies the following configuration parameters of the Amazon EFS transfer location that you're using with DataSync. For more information, see Configuring DataSync transfers with Amazon EFS. + /// + /// Parameters: + /// - accessPointArn: Specifies the Amazon Resource Name (ARN) of the access point that DataSync uses to mount your Amazon EFS file system. For more information, see Accessing restricted Amazon EFS file systems. + /// - fileSystemAccessRoleArn: Specifies an Identity and Access Management (IAM) role that allows DataSync to access your Amazon EFS file system. For information on creating this role, see Creating a DataSync IAM role for Amazon EFS file system access. + /// - inTransitEncryption: Specifies whether you want DataSync to use Transport Layer Security (TLS) 1.2 encryption when it transfers data to or from your Amazon EFS file system. If you specify an access point using AccessPointArn or an IAM role using FileSystemAccessRoleArn, you must set this parameter to TLS1_2. + /// - locationArn: Specifies the Amazon Resource Name (ARN) of the Amazon EFS transfer location that you're updating. + /// - subdirectory: Specifies a mount path for your Amazon EFS file system. This is where DataSync reads or writes data on your file system (depending on if this is a source or destination location). By default, DataSync uses the root directory (or access point if you provide one by using AccessPointArn). You can also include subdirectories using forward slashes (for example, /path/to/folder). + /// - logger: Logger use during operation + @inlinable + public func updateLocationEfs( + accessPointArn: String? = nil, + fileSystemAccessRoleArn: String? = nil, + inTransitEncryption: EfsInTransitEncryption? = nil, + locationArn: String, + subdirectory: String? = nil, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> UpdateLocationEfsResponse { + let input = UpdateLocationEfsRequest( + accessPointArn: accessPointArn, + fileSystemAccessRoleArn: fileSystemAccessRoleArn, + inTransitEncryption: inTransitEncryption, + locationArn: locationArn, + subdirectory: subdirectory + ) + return try await self.updateLocationEfs(input, logger: logger) + } + + /// Modifies the following configuration parameters of the Amazon FSx for Lustre transfer location that you're using with DataSync. For more information, see Configuring DataSync transfers with FSx for Lustre. + @Sendable + @inlinable + public func updateLocationFsxLustre(_ input: UpdateLocationFsxLustreRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateLocationFsxLustreResponse { + try await self.client.execute( + operation: "UpdateLocationFsxLustre", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Modifies the following configuration parameters of the Amazon FSx for Lustre transfer location that you're using with DataSync. For more information, see Configuring DataSync transfers with FSx for Lustre. + /// + /// Parameters: + /// - locationArn: Specifies the Amazon Resource Name (ARN) of the FSx for Lustre transfer location that you're updating. + /// - subdirectory: Specifies a mount path for your FSx for Lustre file system. The path can include subdirectories. When the location is used as a source, DataSync reads data from the mount path. When the location is used as a destination, DataSync writes data to the mount path. If you don't include this parameter, DataSync uses the file system's root directory (/). + /// - logger: Logger use during operation + @inlinable + public func updateLocationFsxLustre( + locationArn: String, + subdirectory: String? = nil, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> UpdateLocationFsxLustreResponse { + let input = UpdateLocationFsxLustreRequest( + locationArn: locationArn, + subdirectory: subdirectory + ) + return try await self.updateLocationFsxLustre(input, logger: logger) + } + + /// Modifies the following configuration parameters of the Amazon FSx for NetApp ONTAP transfer location that you're using with DataSync. For more information, see Configuring DataSync transfers with FSx for ONTAP. + @Sendable + @inlinable + public func updateLocationFsxOntap(_ input: UpdateLocationFsxOntapRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateLocationFsxOntapResponse { + try await self.client.execute( + operation: "UpdateLocationFsxOntap", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Modifies the following configuration parameters of the Amazon FSx for NetApp ONTAP transfer location that you're using with DataSync. For more information, see Configuring DataSync transfers with FSx for ONTAP. + /// + /// Parameters: + /// - locationArn: Specifies the Amazon Resource Name (ARN) of the FSx for ONTAP transfer location that you're updating. + /// - protocol: Specifies the data transfer protocol that DataSync uses to access your Amazon FSx file system. + /// - subdirectory: Specifies a path to the file share in the storage virtual machine (SVM) where you want to transfer data to or from. You can specify a junction path (also known as a mount point), qtree path (for NFS file shares), or share name (for SMB file shares). For example, your mount path might be /vol1, /vol1/tree1, or /share1. Don't specify a junction path in the SVM's root volume. For more information, see Managing FSx for ONTAP storage virtual machines in the Amazon FSx for NetApp ONTAP User Guide. + /// - logger: Logger use during operation + @inlinable + public func updateLocationFsxOntap( + locationArn: String, + protocol: FsxUpdateProtocol? = nil, + subdirectory: String? = nil, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> UpdateLocationFsxOntapResponse { + let input = UpdateLocationFsxOntapRequest( + locationArn: locationArn, + protocol: `protocol`, + subdirectory: subdirectory + ) + return try await self.updateLocationFsxOntap(input, logger: logger) + } + + /// Modifies the following configuration parameters of the Amazon FSx for OpenZFS transfer location that you're using with DataSync. For more information, see Configuring DataSync transfers with FSx for OpenZFS. Request parameters related to SMB aren't supported with the UpdateLocationFsxOpenZfs operation. + @Sendable + @inlinable + public func updateLocationFsxOpenZfs(_ input: UpdateLocationFsxOpenZfsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateLocationFsxOpenZfsResponse { + try await self.client.execute( + operation: "UpdateLocationFsxOpenZfs", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Modifies the following configuration parameters of the Amazon FSx for OpenZFS transfer location that you're using with DataSync. For more information, see Configuring DataSync transfers with FSx for OpenZFS. Request parameters related to SMB aren't supported with the UpdateLocationFsxOpenZfs operation. + /// + /// Parameters: + /// - locationArn: Specifies the Amazon Resource Name (ARN) of the FSx for OpenZFS transfer location that you're updating. + /// - protocol: + /// - subdirectory: Specifies a subdirectory in the location's path that must begin with /fsx. DataSync uses this subdirectory to read or write data (depending on whether the file system is a source or destination location). + /// - logger: Logger use during operation + @inlinable + public func updateLocationFsxOpenZfs( + locationArn: String, + protocol: FsxProtocol? = nil, + subdirectory: String? = nil, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> UpdateLocationFsxOpenZfsResponse { + let input = UpdateLocationFsxOpenZfsRequest( + locationArn: locationArn, + protocol: `protocol`, + subdirectory: subdirectory + ) + return try await self.updateLocationFsxOpenZfs(input, logger: logger) + } + + /// Modifies the following configuration parameters of the Amazon FSx for Windows File Server transfer location that you're using with DataSync. For more information, see Configuring DataSync transfers with FSx for Windows File Server. + @Sendable + @inlinable + public func updateLocationFsxWindows(_ input: UpdateLocationFsxWindowsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateLocationFsxWindowsResponse { + try await self.client.execute( + operation: "UpdateLocationFsxWindows", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Modifies the following configuration parameters of the Amazon FSx for Windows File Server transfer location that you're using with DataSync. For more information, see Configuring DataSync transfers with FSx for Windows File Server. + /// + /// Parameters: + /// - domain: Specifies the name of the Windows domain that your FSx for Windows File Server file system belongs to. If you have multiple Active Directory domains in your environment, configuring this parameter makes sure that DataSync connects to the right file system. + /// - locationArn: Specifies the ARN of the FSx for Windows File Server transfer location that you're updating. + /// - password: Specifies the password of the user with the permissions to mount and access the files, folders, and file metadata in your FSx for Windows File Server file system. + /// - subdirectory: Specifies a mount path for your file system using forward slashes. DataSync uses this subdirectory to read or write data (depending on whether the file system is a source or destination location). + /// - user: Specifies the user with the permissions to mount and access the files, folders, and file metadata in your FSx for Windows File Server file system. For information about choosing a user with the right level of access for your transfer, see required permissions for FSx for Windows File Server locations. + /// - logger: Logger use during operation + @inlinable + public func updateLocationFsxWindows( + domain: String? = nil, + locationArn: String, + password: String? = nil, + subdirectory: String? = nil, + user: String? = nil, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> UpdateLocationFsxWindowsResponse { + let input = UpdateLocationFsxWindowsRequest( + domain: domain, + locationArn: locationArn, + password: password, + subdirectory: subdirectory, + user: user + ) + return try await self.updateLocationFsxWindows(input, logger: logger) + } + + /// Modifies the following configuration parameters of the Hadoop Distributed File System (HDFS) transfer location that you're using with DataSync. For more information, see Configuring DataSync transfers with an HDFS cluster. @Sendable @inlinable public func updateLocationHdfs(_ input: UpdateLocationHdfsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateLocationHdfsResponse { @@ -2060,7 +2290,7 @@ public struct DataSync: AWSService { logger: logger ) } - /// Updates some parameters of a previously created location for a Hadoop Distributed File System cluster. + /// Modifies the following configuration parameters of the Hadoop Distributed File System (HDFS) transfer location that you're using with DataSync. For more information, see Configuring DataSync transfers with an HDFS cluster. /// /// Parameters: /// - agentArns: The Amazon Resource Names (ARNs) of the DataSync agents that can connect to your HDFS cluster. @@ -2112,7 +2342,7 @@ public struct DataSync: AWSService { return try await self.updateLocationHdfs(input, logger: logger) } - /// Modifies some configurations of the Network File System (NFS) transfer location that you're using with DataSync. For more information, see Configuring transfers to or from an NFS file server. + /// Modifies the following configuration parameters of the Network File System (NFS) transfer location that you're using with DataSync. For more information, see Configuring transfers with an NFS file server. @Sendable @inlinable public func updateLocationNfs(_ input: UpdateLocationNfsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateLocationNfsResponse { @@ -2125,7 +2355,7 @@ public struct DataSync: AWSService { logger: logger ) } - /// Modifies some configurations of the Network File System (NFS) transfer location that you're using with DataSync. For more information, see Configuring transfers to or from an NFS file server. + /// Modifies the following configuration parameters of the Network File System (NFS) transfer location that you're using with DataSync. For more information, see Configuring transfers with an NFS file server. /// /// Parameters: /// - locationArn: Specifies the Amazon Resource Name (ARN) of the NFS transfer location that you want to update. @@ -2150,7 +2380,7 @@ public struct DataSync: AWSService { return try await self.updateLocationNfs(input, logger: logger) } - /// Updates some parameters of an existing DataSync location for an object storage system. + /// Modifies the following configuration parameters of the object storage transfer location that you're using with DataSync. For more information, see Configuring DataSync transfers with an object storage system. @Sendable @inlinable public func updateLocationObjectStorage(_ input: UpdateLocationObjectStorageRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateLocationObjectStorageResponse { @@ -2163,7 +2393,7 @@ public struct DataSync: AWSService { logger: logger ) } - /// Updates some parameters of an existing DataSync location for an object storage system. + /// Modifies the following configuration parameters of the object storage transfer location that you're using with DataSync. For more information, see Configuring DataSync transfers with an object storage system. /// /// Parameters: /// - accessKey: Specifies the access key (for example, a user name) if credentials are required to authenticate with the object storage server. @@ -2200,7 +2430,45 @@ public struct DataSync: AWSService { return try await self.updateLocationObjectStorage(input, logger: logger) } - /// Updates some of the parameters of a Server Message Block (SMB) file server location that you can use for DataSync transfers. + /// Modifies the following configuration parameters of the Amazon S3 transfer location that you're using with DataSync. Before you begin, make sure that you read the following topics: Storage class considerations with Amazon S3 locations Evaluating S3 request costs when using DataSync + @Sendable + @inlinable + public func updateLocationS3(_ input: UpdateLocationS3Request, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateLocationS3Response { + try await self.client.execute( + operation: "UpdateLocationS3", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Modifies the following configuration parameters of the Amazon S3 transfer location that you're using with DataSync. Before you begin, make sure that you read the following topics: Storage class considerations with Amazon S3 locations Evaluating S3 request costs when using DataSync + /// + /// Parameters: + /// - locationArn: Specifies the Amazon Resource Name (ARN) of the Amazon S3 transfer location that you're updating. + /// - s3Config: + /// - s3StorageClass: Specifies the storage class that you want your objects to use when Amazon S3 is a transfer destination. For buckets in Amazon Web Services Regions, the storage class defaults to STANDARD. For buckets on Outposts, the storage class defaults to OUTPOSTS. For more information, see Storage class considerations with Amazon S3 transfers. + /// - subdirectory: Specifies a prefix in the S3 bucket that DataSync reads from or writes to (depending on whether the bucket is a source or destination location). DataSync can't transfer objects with a prefix that begins with a slash (/) or includes //, /./, or /../ patterns. For example: /photos photos//2006/January photos/./2006/February photos/../2006/March + /// - logger: Logger use during operation + @inlinable + public func updateLocationS3( + locationArn: String, + s3Config: S3Config? = nil, + s3StorageClass: S3StorageClass? = nil, + subdirectory: String? = nil, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> UpdateLocationS3Response { + let input = UpdateLocationS3Request( + locationArn: locationArn, + s3Config: s3Config, + s3StorageClass: s3StorageClass, + subdirectory: subdirectory + ) + return try await self.updateLocationS3(input, logger: logger) + } + + /// Modifies the following configuration parameters of the Server Message Block (SMB) transfer location that you're using with DataSync. For more information, see Configuring DataSync transfers with an SMB file server. @Sendable @inlinable public func updateLocationSmb(_ input: UpdateLocationSmbRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateLocationSmbResponse { @@ -2213,7 +2481,7 @@ public struct DataSync: AWSService { logger: logger ) } - /// Updates some of the parameters of a Server Message Block (SMB) file server location that you can use for DataSync transfers. + /// Modifies the following configuration parameters of the Server Message Block (SMB) transfer location that you're using with DataSync. For more information, see Configuring DataSync transfers with an SMB file server. /// /// Parameters: /// - agentArns: Specifies the DataSync agent (or agents) that can connect to your SMB file server. You specify an agent by using its Amazon Resource Name (ARN). diff --git a/Sources/Soto/Services/DataSync/DataSync_shapes.swift b/Sources/Soto/Services/DataSync/DataSync_shapes.swift index 7ea63c27d8..223ec4f0f7 100644 --- a/Sources/Soto/Services/DataSync/DataSync_shapes.swift +++ b/Sources/Soto/Services/DataSync/DataSync_shapes.swift @@ -690,7 +690,7 @@ extension DataSync { public let fileSystemAccessRoleArn: String? /// Specifies whether you want DataSync to use Transport Layer Security (TLS) 1.2 encryption when it transfers data to or from your Amazon EFS file system. If you specify an access point using AccessPointArn or an IAM role using FileSystemAccessRoleArn, you must set this parameter to TLS1_2. public let inTransitEncryption: EfsInTransitEncryption? - /// Specifies a mount path for your Amazon EFS file system. This is where DataSync reads or writes data (depending on if this is a source or destination location) on your file system. By default, DataSync uses the root directory (or access point if you provide one by using AccessPointArn). You can also include subdirectories using forward slashes (for example, /path/to/folder). + /// Specifies a mount path for your Amazon EFS file system. This is where DataSync reads or writes data on your file system (depending on if this is a source or destination location). By default, DataSync uses the root directory (or access point if you provide one by using AccessPointArn). You can also include subdirectories using forward slashes (for example, /path/to/folder). public let subdirectory: String? /// Specifies the key-value pair that represents a tag that you want to add to the resource. The value can be an empty string. This value helps you manage, filter, and search for your resources. We recommend that you create a name tag for your location. public let tags: [TagListEntry]? @@ -748,13 +748,13 @@ extension DataSync { } public struct CreateLocationFsxLustreRequest: AWSEncodableShape { - /// The Amazon Resource Name (ARN) for the FSx for Lustre file system. + /// Specifies the Amazon Resource Name (ARN) of the FSx for Lustre file system. public let fsxFilesystemArn: String - /// The Amazon Resource Names (ARNs) of the security groups that are used to configure the FSx for Lustre file system. + /// Specifies the Amazon Resource Names (ARNs) of up to five security groups that provide access to your FSx for Lustre file system. The security groups must be able to access the file system's ports. The file system must also allow access from the security groups. For information about file system access, see the Amazon FSx for Lustre User Guide . public let securityGroupArns: [String] - /// A subdirectory in the location's path. This subdirectory in the FSx for Lustre file system is used to read data from the FSx for Lustre source location or write data to the FSx for Lustre destination. + /// Specifies a mount path for your FSx for Lustre file system. The path can include subdirectories. When the location is used as a source, DataSync reads data from the mount path. When the location is used as a destination, DataSync writes data to the mount path. If you don't include this parameter, DataSync uses the file system's root directory (/). public let subdirectory: String? - /// The key-value pair that represents a tag that you want to add to the resource. The value can be an empty string. This value helps you manage, filter, and search for your resources. We recommend that you create a name tag for your location. + /// Specifies labels that help you categorize, filter, and search for your Amazon Web Services resources. We recommend creating at least a name tag for your location. public let tags: [TagListEntry]? @inlinable @@ -791,7 +791,7 @@ extension DataSync { } public struct CreateLocationFsxLustreResponse: AWSDecodableShape { - /// The Amazon Resource Name (ARN) of the FSx for Lustre file system location that's created. + /// The Amazon Resource Name (ARN) of the FSx for Lustre file system location that you created. public let locationArn: String? @inlinable @@ -810,7 +810,7 @@ extension DataSync { public let securityGroupArns: [String] /// Specifies the ARN of the storage virtual machine (SVM) in your file system where you want to copy data to or from. public let storageVirtualMachineArn: String - /// Specifies a path to the file share in the SVM where you'll copy your data. You can specify a junction path (also known as a mount point), qtree path (for NFS file shares), or share name (for SMB file shares). For example, your mount path might be /vol1, /vol1/tree1, or /share1. Don't specify a junction path in the SVM's root volume. For more information, see Managing FSx for ONTAP storage virtual machines in the Amazon FSx for NetApp ONTAP User Guide. + /// Specifies a path to the file share in the SVM where you want to transfer data to or from. You can specify a junction path (also known as a mount point), qtree path (for NFS file shares), or share name (for SMB file shares). For example, your mount path might be /vol1, /vol1/tree1, or /share1. Don't specify a junction path in the SVM's root volume. For more information, see Managing FSx for ONTAP storage virtual machines in the Amazon FSx for NetApp ONTAP User Guide. public let subdirectory: String? /// Specifies labels that help you categorize, filter, and search for your Amazon Web Services resources. We recommend creating at least a name tag for your location. public let tags: [TagListEntry]? @@ -928,7 +928,7 @@ extension DataSync { } public struct CreateLocationFsxWindowsRequest: AWSEncodableShape { - /// Specifies the name of the Microsoft Active Directory domain that the FSx for Windows File Server file system belongs to. If you have multiple Active Directory domains in your environment, configuring this parameter makes sure that DataSync connects to the right file system. + /// Specifies the name of the Windows domain that the FSx for Windows File Server file system belongs to. If you have multiple Active Directory domains in your environment, configuring this parameter makes sure that DataSync connects to the right file system. public let domain: String? /// Specifies the Amazon Resource Name (ARN) for the FSx for Windows File Server file system. public let fsxFilesystemArn: String @@ -2886,7 +2886,7 @@ extension DataSync { } public struct FsxProtocolSmb: AWSEncodableShape & AWSDecodableShape { - /// Specifies the fully qualified domain name (FQDN) of the Microsoft Active Directory that your storage virtual machine (SVM) belongs to. If you have multiple domains in your environment, configuring this setting makes sure that DataSync connects to the right SVM. + /// Specifies the name of the Windows domain that your storage virtual machine (SVM) belongs to. If you have multiple domains in your environment, configuring this setting makes sure that DataSync connects to the right SVM. If you have multiple Active Directory domains in your environment, configuring this parameter makes sure that DataSync connects to the right SVM. public let domain: String? public let mountOptions: SmbMountOptions? /// Specifies the password of a user who has permission to access your SVM. @@ -2919,6 +2919,61 @@ extension DataSync { } } + public struct FsxUpdateProtocol: AWSEncodableShape { + public let nfs: FsxProtocolNfs? + /// Specifies the Server Message Block (SMB) protocol configuration that DataSync uses to access your FSx for ONTAP file system's storage virtual machine (SVM). + public let smb: FsxUpdateProtocolSmb? + + @inlinable + public init(nfs: FsxProtocolNfs? = nil, smb: FsxUpdateProtocolSmb? = nil) { + self.nfs = nfs + self.smb = smb + } + + public func validate(name: String) throws { + try self.smb?.validate(name: "\(name).smb") + } + + private enum CodingKeys: String, CodingKey { + case nfs = "NFS" + case smb = "SMB" + } + } + + public struct FsxUpdateProtocolSmb: AWSEncodableShape { + /// Specifies the name of the Windows domain that your storage virtual machine (SVM) belongs to. If you have multiple Active Directory domains in your environment, configuring this parameter makes sure that DataSync connects to the right SVM. + public let domain: String? + public let mountOptions: SmbMountOptions? + /// Specifies the password of a user who has permission to access your SVM. + public let password: String? + /// Specifies a user that can mount and access the files, folders, and metadata in your SVM. For information about choosing a user with the right level of access for your transfer, see Using the SMB protocol. + public let user: String? + + @inlinable + public init(domain: String? = nil, mountOptions: SmbMountOptions? = nil, password: String? = nil, user: String? = nil) { + self.domain = domain + self.mountOptions = mountOptions + self.password = password + self.user = user + } + + public func validate(name: String) throws { + try self.validate(self.domain, name: "domain", parent: name, max: 253) + try self.validate(self.domain, name: "domain", parent: name, pattern: "^([A-Za-z0-9]((\\.|-+)?[A-Za-z0-9]){0,252})?$") + try self.validate(self.password, name: "password", parent: name, max: 104) + try self.validate(self.password, name: "password", parent: name, pattern: "^.{0,104}$") + try self.validate(self.user, name: "user", parent: name, max: 104) + try self.validate(self.user, name: "user", parent: name, pattern: "^[^\\x5B\\x5D\\\\/:;|=,+*?]{1,104}$") + } + + private enum CodingKeys: String, CodingKey { + case domain = "Domain" + case mountOptions = "MountOptions" + case password = "Password" + case user = "User" + } + } + public struct GenerateRecommendationsRequest: AWSEncodableShape { /// Specifies the Amazon Resource Name (ARN) of the discovery job that collects information about your on-premises storage system. public let discoveryJobArn: String @@ -4814,6 +4869,194 @@ extension DataSync { public init() {} } + public struct UpdateLocationEfsRequest: AWSEncodableShape { + /// Specifies the Amazon Resource Name (ARN) of the access point that DataSync uses to mount your Amazon EFS file system. For more information, see Accessing restricted Amazon EFS file systems. + public let accessPointArn: String? + /// Specifies an Identity and Access Management (IAM) role that allows DataSync to access your Amazon EFS file system. For information on creating this role, see Creating a DataSync IAM role for Amazon EFS file system access. + public let fileSystemAccessRoleArn: String? + /// Specifies whether you want DataSync to use Transport Layer Security (TLS) 1.2 encryption when it transfers data to or from your Amazon EFS file system. If you specify an access point using AccessPointArn or an IAM role using FileSystemAccessRoleArn, you must set this parameter to TLS1_2. + public let inTransitEncryption: EfsInTransitEncryption? + /// Specifies the Amazon Resource Name (ARN) of the Amazon EFS transfer location that you're updating. + public let locationArn: String + /// Specifies a mount path for your Amazon EFS file system. This is where DataSync reads or writes data on your file system (depending on if this is a source or destination location). By default, DataSync uses the root directory (or access point if you provide one by using AccessPointArn). You can also include subdirectories using forward slashes (for example, /path/to/folder). + public let subdirectory: String? + + @inlinable + public init(accessPointArn: String? = nil, fileSystemAccessRoleArn: String? = nil, inTransitEncryption: EfsInTransitEncryption? = nil, locationArn: String, subdirectory: String? = nil) { + self.accessPointArn = accessPointArn + self.fileSystemAccessRoleArn = fileSystemAccessRoleArn + self.inTransitEncryption = inTransitEncryption + self.locationArn = locationArn + self.subdirectory = subdirectory + } + + public func validate(name: String) throws { + try self.validate(self.accessPointArn, name: "accessPointArn", parent: name, max: 128) + try self.validate(self.accessPointArn, name: "accessPointArn", parent: name, pattern: "^(^arn:(aws|aws-cn|aws-us-gov|aws-iso|aws-iso-b):elasticfilesystem:[a-z\\-0-9]+:[0-9]{12}:access-point/fsap-[0-9a-f]{8,40}$)|(^$)$") + try self.validate(self.fileSystemAccessRoleArn, name: "fileSystemAccessRoleArn", parent: name, max: 2048) + try self.validate(self.fileSystemAccessRoleArn, name: "fileSystemAccessRoleArn", parent: name, pattern: "^(^arn:(aws|aws-cn|aws-us-gov|aws-iso|aws-iso-b):iam::[0-9]{12}:role/.*$)|(^$)$") + try self.validate(self.locationArn, name: "locationArn", parent: name, max: 128) + try self.validate(self.locationArn, name: "locationArn", parent: name, pattern: "^arn:(aws|aws-cn|aws-us-gov|aws-iso|aws-iso-b):datasync:[a-z\\-0-9]+:[0-9]{12}:location/loc-[0-9a-z]{17}$") + try self.validate(self.subdirectory, name: "subdirectory", parent: name, max: 4096) + try self.validate(self.subdirectory, name: "subdirectory", parent: name, pattern: "^[a-zA-Z0-9_\\-\\+\\./\\(\\)\\p{Zs}]*$") + } + + private enum CodingKeys: String, CodingKey { + case accessPointArn = "AccessPointArn" + case fileSystemAccessRoleArn = "FileSystemAccessRoleArn" + case inTransitEncryption = "InTransitEncryption" + case locationArn = "LocationArn" + case subdirectory = "Subdirectory" + } + } + + public struct UpdateLocationEfsResponse: AWSDecodableShape { + public init() {} + } + + public struct UpdateLocationFsxLustreRequest: AWSEncodableShape { + /// Specifies the Amazon Resource Name (ARN) of the FSx for Lustre transfer location that you're updating. + public let locationArn: String + /// Specifies a mount path for your FSx for Lustre file system. The path can include subdirectories. When the location is used as a source, DataSync reads data from the mount path. When the location is used as a destination, DataSync writes data to the mount path. If you don't include this parameter, DataSync uses the file system's root directory (/). + public let subdirectory: String? + + @inlinable + public init(locationArn: String, subdirectory: String? = nil) { + self.locationArn = locationArn + self.subdirectory = subdirectory + } + + public func validate(name: String) throws { + try self.validate(self.locationArn, name: "locationArn", parent: name, max: 128) + try self.validate(self.locationArn, name: "locationArn", parent: name, pattern: "^arn:(aws|aws-cn|aws-us-gov|aws-iso|aws-iso-b):datasync:[a-z\\-0-9]+:[0-9]{12}:location/loc-[0-9a-z]{17}$") + try self.validate(self.subdirectory, name: "subdirectory", parent: name, max: 4096) + try self.validate(self.subdirectory, name: "subdirectory", parent: name, pattern: "^[a-zA-Z0-9_\\-\\+\\./\\(\\)\\$\\p{Zs}]+$") + } + + private enum CodingKeys: String, CodingKey { + case locationArn = "LocationArn" + case subdirectory = "Subdirectory" + } + } + + public struct UpdateLocationFsxLustreResponse: AWSDecodableShape { + public init() {} + } + + public struct UpdateLocationFsxOntapRequest: AWSEncodableShape { + /// Specifies the Amazon Resource Name (ARN) of the FSx for ONTAP transfer location that you're updating. + public let locationArn: String + /// Specifies the data transfer protocol that DataSync uses to access your Amazon FSx file system. + public let `protocol`: FsxUpdateProtocol? + /// Specifies a path to the file share in the storage virtual machine (SVM) where you want to transfer data to or from. You can specify a junction path (also known as a mount point), qtree path (for NFS file shares), or share name (for SMB file shares). For example, your mount path might be /vol1, /vol1/tree1, or /share1. Don't specify a junction path in the SVM's root volume. For more information, see Managing FSx for ONTAP storage virtual machines in the Amazon FSx for NetApp ONTAP User Guide. + public let subdirectory: String? + + @inlinable + public init(locationArn: String, protocol: FsxUpdateProtocol? = nil, subdirectory: String? = nil) { + self.locationArn = locationArn + self.`protocol` = `protocol` + self.subdirectory = subdirectory + } + + public func validate(name: String) throws { + try self.validate(self.locationArn, name: "locationArn", parent: name, max: 128) + try self.validate(self.locationArn, name: "locationArn", parent: name, pattern: "^arn:(aws|aws-cn|aws-us-gov|aws-iso|aws-iso-b):datasync:[a-z\\-0-9]+:[0-9]{12}:location/loc-[0-9a-z]{17}$") + try self.`protocol`?.validate(name: "\(name).`protocol`") + try self.validate(self.subdirectory, name: "subdirectory", parent: name, max: 255) + try self.validate(self.subdirectory, name: "subdirectory", parent: name, pattern: "^[^\\u0000\\u0085\\u2028\\u2029\\r\\n]{1,255}$") + } + + private enum CodingKeys: String, CodingKey { + case locationArn = "LocationArn" + case `protocol` = "Protocol" + case subdirectory = "Subdirectory" + } + } + + public struct UpdateLocationFsxOntapResponse: AWSDecodableShape { + public init() {} + } + + public struct UpdateLocationFsxOpenZfsRequest: AWSEncodableShape { + /// Specifies the Amazon Resource Name (ARN) of the FSx for OpenZFS transfer location that you're updating. + public let locationArn: String + public let `protocol`: FsxProtocol? + /// Specifies a subdirectory in the location's path that must begin with /fsx. DataSync uses this subdirectory to read or write data (depending on whether the file system is a source or destination location). + public let subdirectory: String? + + @inlinable + public init(locationArn: String, protocol: FsxProtocol? = nil, subdirectory: String? = nil) { + self.locationArn = locationArn + self.`protocol` = `protocol` + self.subdirectory = subdirectory + } + + public func validate(name: String) throws { + try self.validate(self.locationArn, name: "locationArn", parent: name, max: 128) + try self.validate(self.locationArn, name: "locationArn", parent: name, pattern: "^arn:(aws|aws-cn|aws-us-gov|aws-iso|aws-iso-b):datasync:[a-z\\-0-9]+:[0-9]{12}:location/loc-[0-9a-z]{17}$") + try self.`protocol`?.validate(name: "\(name).`protocol`") + try self.validate(self.subdirectory, name: "subdirectory", parent: name, max: 4096) + try self.validate(self.subdirectory, name: "subdirectory", parent: name, pattern: "^[a-zA-Z0-9_\\-\\+\\./\\(\\)\\$\\p{Zs}]+$") + } + + private enum CodingKeys: String, CodingKey { + case locationArn = "LocationArn" + case `protocol` = "Protocol" + case subdirectory = "Subdirectory" + } + } + + public struct UpdateLocationFsxOpenZfsResponse: AWSDecodableShape { + public init() {} + } + + public struct UpdateLocationFsxWindowsRequest: AWSEncodableShape { + /// Specifies the name of the Windows domain that your FSx for Windows File Server file system belongs to. If you have multiple Active Directory domains in your environment, configuring this parameter makes sure that DataSync connects to the right file system. + public let domain: String? + /// Specifies the ARN of the FSx for Windows File Server transfer location that you're updating. + public let locationArn: String + /// Specifies the password of the user with the permissions to mount and access the files, folders, and file metadata in your FSx for Windows File Server file system. + public let password: String? + /// Specifies a mount path for your file system using forward slashes. DataSync uses this subdirectory to read or write data (depending on whether the file system is a source or destination location). + public let subdirectory: String? + /// Specifies the user with the permissions to mount and access the files, folders, and file metadata in your FSx for Windows File Server file system. For information about choosing a user with the right level of access for your transfer, see required permissions for FSx for Windows File Server locations. + public let user: String? + + @inlinable + public init(domain: String? = nil, locationArn: String, password: String? = nil, subdirectory: String? = nil, user: String? = nil) { + self.domain = domain + self.locationArn = locationArn + self.password = password + self.subdirectory = subdirectory + self.user = user + } + + public func validate(name: String) throws { + try self.validate(self.domain, name: "domain", parent: name, max: 253) + try self.validate(self.domain, name: "domain", parent: name, pattern: "^([A-Za-z0-9]((\\.|-+)?[A-Za-z0-9]){0,252})?$") + try self.validate(self.locationArn, name: "locationArn", parent: name, max: 128) + try self.validate(self.locationArn, name: "locationArn", parent: name, pattern: "^arn:(aws|aws-cn|aws-us-gov|aws-iso|aws-iso-b):datasync:[a-z\\-0-9]+:[0-9]{12}:location/loc-[0-9a-z]{17}$") + try self.validate(self.password, name: "password", parent: name, max: 104) + try self.validate(self.password, name: "password", parent: name, pattern: "^.{0,104}$") + try self.validate(self.subdirectory, name: "subdirectory", parent: name, max: 4096) + try self.validate(self.subdirectory, name: "subdirectory", parent: name, pattern: "^[a-zA-Z0-9_\\-\\+\\./\\(\\)\\$\\p{Zs}]+$") + try self.validate(self.user, name: "user", parent: name, max: 104) + try self.validate(self.user, name: "user", parent: name, pattern: "^[^\\x5B\\x5D\\\\/:;|=,+*?]{1,104}$") + } + + private enum CodingKeys: String, CodingKey { + case domain = "Domain" + case locationArn = "LocationArn" + case password = "Password" + case subdirectory = "Subdirectory" + case user = "User" + } + } + + public struct UpdateLocationFsxWindowsResponse: AWSDecodableShape { + public init() {} + } + public struct UpdateLocationHdfsRequest: AWSEncodableShape { /// The Amazon Resource Names (ARNs) of the DataSync agents that can connect to your HDFS cluster. public let agentArns: [String]? @@ -5014,6 +5257,43 @@ extension DataSync { public init() {} } + public struct UpdateLocationS3Request: AWSEncodableShape { + /// Specifies the Amazon Resource Name (ARN) of the Amazon S3 transfer location that you're updating. + public let locationArn: String + public let s3Config: S3Config? + /// Specifies the storage class that you want your objects to use when Amazon S3 is a transfer destination. For buckets in Amazon Web Services Regions, the storage class defaults to STANDARD. For buckets on Outposts, the storage class defaults to OUTPOSTS. For more information, see Storage class considerations with Amazon S3 transfers. + public let s3StorageClass: S3StorageClass? + /// Specifies a prefix in the S3 bucket that DataSync reads from or writes to (depending on whether the bucket is a source or destination location). DataSync can't transfer objects with a prefix that begins with a slash (/) or includes //, /./, or /../ patterns. For example: /photos photos//2006/January photos/./2006/February photos/../2006/March + public let subdirectory: String? + + @inlinable + public init(locationArn: String, s3Config: S3Config? = nil, s3StorageClass: S3StorageClass? = nil, subdirectory: String? = nil) { + self.locationArn = locationArn + self.s3Config = s3Config + self.s3StorageClass = s3StorageClass + self.subdirectory = subdirectory + } + + public func validate(name: String) throws { + try self.validate(self.locationArn, name: "locationArn", parent: name, max: 128) + try self.validate(self.locationArn, name: "locationArn", parent: name, pattern: "^arn:(aws|aws-cn|aws-us-gov|aws-iso|aws-iso-b):datasync:[a-z\\-0-9]+:[0-9]{12}:location/loc-[0-9a-z]{17}$") + try self.s3Config?.validate(name: "\(name).s3Config") + try self.validate(self.subdirectory, name: "subdirectory", parent: name, max: 4096) + try self.validate(self.subdirectory, name: "subdirectory", parent: name, pattern: "^[a-zA-Z0-9_\\-\\+\\./\\(\\)\\p{Zs}]*$") + } + + private enum CodingKeys: String, CodingKey { + case locationArn = "LocationArn" + case s3Config = "S3Config" + case s3StorageClass = "S3StorageClass" + case subdirectory = "Subdirectory" + } + } + + public struct UpdateLocationS3Response: AWSDecodableShape { + public init() {} + } + public struct UpdateLocationSmbRequest: AWSEncodableShape { /// Specifies the DataSync agent (or agents) that can connect to your SMB file server. You specify an agent by using its Amazon Resource Name (ARN). public let agentArns: [String]? diff --git a/Sources/Soto/Services/DocDB/DocDB_api.swift b/Sources/Soto/Services/DocDB/DocDB_api.swift index 3d955e0eeb..347a682e4a 100644 --- a/Sources/Soto/Services/DocDB/DocDB_api.swift +++ b/Sources/Soto/Services/DocDB/DocDB_api.swift @@ -305,8 +305,10 @@ public struct DocDB: AWSService { /// - engineVersion: The version number of the database engine to use. The --engine-version will default to the latest major engine version. For production workloads, we recommend explicitly declaring this parameter with the intended major engine version. /// - globalClusterIdentifier: The cluster identifier of the new global cluster. /// - kmsKeyId: The KMS key identifier for an encrypted cluster. The KMS key identifier is the Amazon Resource Name (ARN) for the KMS encryption key. If you are creating a cluster using the same Amazon Web Services account that owns the KMS encryption key that is used to encrypt the new cluster, you can use the KMS key alias instead of the ARN for the KMS encryption key. If an encryption key is not specified in KmsKeyId: If the StorageEncrypted parameter is true, Amazon DocumentDB uses your default encryption key. KMS creates the default encryption key for your Amazon Web Services account. Your Amazon Web Services account has a different default encryption key for each Amazon Web Services Regions. + /// - manageMasterUserPassword: Specifies whether to manage the master user password with Amazon Web Services Secrets Manager. Constraint: You can't manage the master user password with Amazon Web Services Secrets Manager if MasterUserPassword is specified. /// - masterUsername: The name of the master user for the cluster. Constraints: Must be from 1 to 63 letters or numbers. The first character must be a letter. Cannot be a reserved word for the chosen database engine. /// - masterUserPassword: The password for the master database user. This password can contain any printable ASCII character except forward slash (/), double quote ("), or the "at" symbol (@). Constraints: Must contain from 8 to 100 characters. + /// - masterUserSecretKmsKeyId: The Amazon Web Services KMS key identifier to encrypt a secret that is automatically generated and managed in Amazon Web Services Secrets Manager. This setting is valid only if the master user password is managed by Amazon DocumentDB in Amazon Web Services Secrets Manager for the DB cluster. The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key. To use a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN. If you don't specify MasterUserSecretKmsKeyId, then the aws/secretsmanager KMS key is used to encrypt the secret. If the secret is in a different Amazon Web Services account, then you can't use the aws/secretsmanager KMS key to encrypt the secret, and you must use a customer managed KMS key. There is a default KMS key for your Amazon Web Services account. Your Amazon Web Services account has a different default KMS key for each Amazon Web Services Region. /// - port: The port number on which the instances in the cluster accept connections. /// - preferredBackupWindow: The daily time range during which automated backups are created if automated backups are enabled using the BackupRetentionPeriod parameter. The default is a 30-minute window selected at random from an 8-hour block of time for each Amazon Web Services Region. Constraints: Must be in the format hh24:mi-hh24:mi. Must be in Universal Coordinated Time (UTC). Must not conflict with the preferred maintenance window. Must be at least 30 minutes. /// - preferredMaintenanceWindow: The weekly time range during which system maintenance can occur, in Universal Coordinated Time (UTC). Format: ddd:hh24:mi-ddd:hh24:mi The default is a 30-minute window selected at random from an 8-hour block of time for each Amazon Web Services Region, occurring on a random day of the week. Valid days: Mon, Tue, Wed, Thu, Fri, Sat, Sun Constraints: Minimum 30-minute window. @@ -329,8 +331,10 @@ public struct DocDB: AWSService { engineVersion: String? = nil, globalClusterIdentifier: String? = nil, kmsKeyId: String? = nil, + manageMasterUserPassword: Bool? = nil, masterUsername: String? = nil, masterUserPassword: String? = nil, + masterUserSecretKmsKeyId: String? = nil, port: Int? = nil, preferredBackupWindow: String? = nil, preferredMaintenanceWindow: String? = nil, @@ -353,8 +357,10 @@ public struct DocDB: AWSService { engineVersion: engineVersion, globalClusterIdentifier: globalClusterIdentifier, kmsKeyId: kmsKeyId, + manageMasterUserPassword: manageMasterUserPassword, masterUsername: masterUsername, masterUserPassword: masterUserPassword, + masterUserSecretKmsKeyId: masterUserSecretKmsKeyId, port: port, preferredBackupWindow: preferredBackupWindow, preferredMaintenanceWindow: preferredMaintenanceWindow, @@ -1622,11 +1628,14 @@ public struct DocDB: AWSService { /// - dbClusterParameterGroupName: The name of the cluster parameter group to use for the cluster. /// - deletionProtection: Specifies whether this cluster can be deleted. If DeletionProtection is enabled, the cluster cannot be deleted unless it is modified and DeletionProtection is disabled. DeletionProtection protects clusters from being accidentally deleted. /// - engineVersion: The version number of the database engine to which you want to upgrade. Changing this parameter results in an outage. The change is applied during the next maintenance window unless ApplyImmediately is enabled. To list all of the available engine versions for Amazon DocumentDB use the following command: aws docdb describe-db-engine-versions --engine docdb --query "DBEngineVersions[].EngineVersion" + /// - manageMasterUserPassword: Specifies whether to manage the master user password with Amazon Web Services Secrets Manager. If the cluster doesn't manage the master user password with Amazon Web Services Secrets Manager, you can turn on this management. In this case, you can't specify MasterUserPassword. If the cluster already manages the master user password with Amazon Web Services Secrets Manager, and you specify that the master user password is not managed with Amazon Web Services Secrets Manager, then you must specify MasterUserPassword. In this case, Amazon DocumentDB deletes the secret and uses the new password for the master user specified by MasterUserPassword. /// - masterUserPassword: The password for the master database user. This password can contain any printable ASCII character except forward slash (/), double quote ("), or the "at" symbol (@). Constraints: Must contain from 8 to 100 characters. + /// - masterUserSecretKmsKeyId: The Amazon Web Services KMS key identifier to encrypt a secret that is automatically generated and managed in Amazon Web Services Secrets Manager. This setting is valid only if both of the following conditions are met: The cluster doesn't manage the master user password in Amazon Web Services Secrets Manager. If the cluster already manages the master user password in Amazon Web Services Secrets Manager, you can't change the KMS key that is used to encrypt the secret. You are enabling ManageMasterUserPassword to manage the master user password in Amazon Web Services Secrets Manager. If you are turning on ManageMasterUserPassword and don't specify MasterUserSecretKmsKeyId, then the aws/secretsmanager KMS key is used to encrypt the secret. If the secret is in a different Amazon Web Services account, then you can't use the aws/secretsmanager KMS key to encrypt the secret, and you must use a customer managed KMS key. The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key. To use a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN. There is a default KMS key for your Amazon Web Services account. Your Amazon Web Services account has a different default KMS key for each Amazon Web Services Region. /// - newDBClusterIdentifier: The new cluster identifier for the cluster when renaming a cluster. This value is stored as a lowercase string. Constraints: Must contain from 1 to 63 letters, numbers, or hyphens. The first character must be a letter. Cannot end with a hyphen or contain two consecutive hyphens. Example: my-cluster2 /// - port: The port number on which the cluster accepts connections. Constraints: Must be a value from 1150 to 65535. Default: The same port as the original cluster. /// - preferredBackupWindow: The daily time range during which automated backups are created if automated backups are enabled, using the BackupRetentionPeriod parameter. The default is a 30-minute window selected at random from an 8-hour block of time for each Amazon Web Services Region. Constraints: Must be in the format hh24:mi-hh24:mi. Must be in Universal Coordinated Time (UTC). Must not conflict with the preferred maintenance window. Must be at least 30 minutes. /// - preferredMaintenanceWindow: The weekly time range during which system maintenance can occur, in Universal Coordinated Time (UTC). Format: ddd:hh24:mi-ddd:hh24:mi The default is a 30-minute window selected at random from an 8-hour block of time for each Amazon Web Services Region, occurring on a random day of the week. Valid days: Mon, Tue, Wed, Thu, Fri, Sat, Sun Constraints: Minimum 30-minute window. + /// - rotateMasterUserPassword: Specifies whether to rotate the secret managed by Amazon Web Services Secrets Manager for the master user password. This setting is valid only if the master user password is managed by Amazon DocumentDB in Amazon Web Services Secrets Manager for the cluster. The secret value contains the updated password. Constraint: You must apply the change immediately when rotating the master user password. /// - storageType: The storage type to associate with the DB cluster. For information on storage types for Amazon DocumentDB clusters, see Cluster storage configurations in the Amazon DocumentDB Developer Guide. Valid values for storage type - standard | iopt1 Default value is standard /// - vpcSecurityGroupIds: A list of virtual private cloud (VPC) security groups that the cluster will belong to. /// - logger: Logger use during operation @@ -1640,11 +1649,14 @@ public struct DocDB: AWSService { dbClusterParameterGroupName: String? = nil, deletionProtection: Bool? = nil, engineVersion: String? = nil, + manageMasterUserPassword: Bool? = nil, masterUserPassword: String? = nil, + masterUserSecretKmsKeyId: String? = nil, newDBClusterIdentifier: String? = nil, port: Int? = nil, preferredBackupWindow: String? = nil, preferredMaintenanceWindow: String? = nil, + rotateMasterUserPassword: Bool? = nil, storageType: String? = nil, vpcSecurityGroupIds: [String]? = nil, logger: Logger = AWSClient.loggingDisabled @@ -1658,11 +1670,14 @@ public struct DocDB: AWSService { dbClusterParameterGroupName: dbClusterParameterGroupName, deletionProtection: deletionProtection, engineVersion: engineVersion, + manageMasterUserPassword: manageMasterUserPassword, masterUserPassword: masterUserPassword, + masterUserSecretKmsKeyId: masterUserSecretKmsKeyId, newDBClusterIdentifier: newDBClusterIdentifier, port: port, preferredBackupWindow: preferredBackupWindow, preferredMaintenanceWindow: preferredMaintenanceWindow, + rotateMasterUserPassword: rotateMasterUserPassword, storageType: storageType, vpcSecurityGroupIds: vpcSecurityGroupIds ) diff --git a/Sources/Soto/Services/DocDB/DocDB_shapes.swift b/Sources/Soto/Services/DocDB/DocDB_shapes.swift index 0640dd8123..b872907a3a 100644 --- a/Sources/Soto/Services/DocDB/DocDB_shapes.swift +++ b/Sources/Soto/Services/DocDB/DocDB_shapes.swift @@ -238,6 +238,28 @@ extension DocDB { } } + public struct ClusterMasterUserSecret: AWSDecodableShape { + /// The Amazon Web Services KMS key identifier that is used to encrypt the secret. + public let kmsKeyId: String? + /// The Amazon Resource Name (ARN) of the secret. + public let secretArn: String? + /// The status of the secret. The possible status values include the following: creating - The secret is being created. active - The secret is available for normal use and rotation. rotating - The secret is being rotated. impaired - The secret can be used to access database credentials, but it can't be rotated. A secret might have this status if, for example, permissions are changed so that Amazon DocumentDB can no longer access either the secret or the KMS key for the secret. When a secret has this status, you can correct the condition that caused the status. Alternatively, modify the instance to turn off automatic management of database credentials, and then modify the instance again to turn on automatic management of database credentials. + public let secretStatus: String? + + @inlinable + public init(kmsKeyId: String? = nil, secretArn: String? = nil, secretStatus: String? = nil) { + self.kmsKeyId = kmsKeyId + self.secretArn = secretArn + self.secretStatus = secretStatus + } + + private enum CodingKeys: String, CodingKey { + case kmsKeyId = "KmsKeyId" + case secretArn = "SecretArn" + case secretStatus = "SecretStatus" + } + } + public struct CopyDBClusterParameterGroupMessage: AWSEncodableShape { public struct _TagsEncoding: ArrayCoderProperties { public static let member = "Tag" } @@ -359,10 +381,14 @@ extension DocDB { public let globalClusterIdentifier: String? /// The KMS key identifier for an encrypted cluster. The KMS key identifier is the Amazon Resource Name (ARN) for the KMS encryption key. If you are creating a cluster using the same Amazon Web Services account that owns the KMS encryption key that is used to encrypt the new cluster, you can use the KMS key alias instead of the ARN for the KMS encryption key. If an encryption key is not specified in KmsKeyId: If the StorageEncrypted parameter is true, Amazon DocumentDB uses your default encryption key. KMS creates the default encryption key for your Amazon Web Services account. Your Amazon Web Services account has a different default encryption key for each Amazon Web Services Regions. public let kmsKeyId: String? + /// Specifies whether to manage the master user password with Amazon Web Services Secrets Manager. Constraint: You can't manage the master user password with Amazon Web Services Secrets Manager if MasterUserPassword is specified. + public let manageMasterUserPassword: Bool? /// The name of the master user for the cluster. Constraints: Must be from 1 to 63 letters or numbers. The first character must be a letter. Cannot be a reserved word for the chosen database engine. public let masterUsername: String? /// The password for the master database user. This password can contain any printable ASCII character except forward slash (/), double quote ("), or the "at" symbol (@). Constraints: Must contain from 8 to 100 characters. public let masterUserPassword: String? + /// The Amazon Web Services KMS key identifier to encrypt a secret that is automatically generated and managed in Amazon Web Services Secrets Manager. This setting is valid only if the master user password is managed by Amazon DocumentDB in Amazon Web Services Secrets Manager for the DB cluster. The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key. To use a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN. If you don't specify MasterUserSecretKmsKeyId, then the aws/secretsmanager KMS key is used to encrypt the secret. If the secret is in a different Amazon Web Services account, then you can't use the aws/secretsmanager KMS key to encrypt the secret, and you must use a customer managed KMS key. There is a default KMS key for your Amazon Web Services account. Your Amazon Web Services account has a different default KMS key for each Amazon Web Services Region. + public let masterUserSecretKmsKeyId: String? /// The port number on which the instances in the cluster accept connections. public let port: Int? /// The daily time range during which automated backups are created if automated backups are enabled using the BackupRetentionPeriod parameter. The default is a 30-minute window selected at random from an 8-hour block of time for each Amazon Web Services Region. Constraints: Must be in the format hh24:mi-hh24:mi. Must be in Universal Coordinated Time (UTC). Must not conflict with the preferred maintenance window. Must be at least 30 minutes. @@ -383,7 +409,7 @@ extension DocDB { public var vpcSecurityGroupIds: [String]? @inlinable - public init(availabilityZones: [String]? = nil, backupRetentionPeriod: Int? = nil, dbClusterIdentifier: String? = nil, dbClusterParameterGroupName: String? = nil, dbSubnetGroupName: String? = nil, deletionProtection: Bool? = nil, enableCloudwatchLogsExports: [String]? = nil, engine: String? = nil, engineVersion: String? = nil, globalClusterIdentifier: String? = nil, kmsKeyId: String? = nil, masterUsername: String? = nil, masterUserPassword: String? = nil, port: Int? = nil, preferredBackupWindow: String? = nil, preferredMaintenanceWindow: String? = nil, preSignedUrl: String? = nil, storageEncrypted: Bool? = nil, storageType: String? = nil, tags: [Tag]? = nil, vpcSecurityGroupIds: [String]? = nil) { + public init(availabilityZones: [String]? = nil, backupRetentionPeriod: Int? = nil, dbClusterIdentifier: String? = nil, dbClusterParameterGroupName: String? = nil, dbSubnetGroupName: String? = nil, deletionProtection: Bool? = nil, enableCloudwatchLogsExports: [String]? = nil, engine: String? = nil, engineVersion: String? = nil, globalClusterIdentifier: String? = nil, kmsKeyId: String? = nil, manageMasterUserPassword: Bool? = nil, masterUsername: String? = nil, masterUserPassword: String? = nil, masterUserSecretKmsKeyId: String? = nil, port: Int? = nil, preferredBackupWindow: String? = nil, preferredMaintenanceWindow: String? = nil, preSignedUrl: String? = nil, storageEncrypted: Bool? = nil, storageType: String? = nil, tags: [Tag]? = nil, vpcSecurityGroupIds: [String]? = nil) { self.availabilityZones = availabilityZones self.backupRetentionPeriod = backupRetentionPeriod self.dbClusterIdentifier = dbClusterIdentifier @@ -395,8 +421,10 @@ extension DocDB { self.engineVersion = engineVersion self.globalClusterIdentifier = globalClusterIdentifier self.kmsKeyId = kmsKeyId + self.manageMasterUserPassword = manageMasterUserPassword self.masterUsername = masterUsername self.masterUserPassword = masterUserPassword + self.masterUserSecretKmsKeyId = masterUserSecretKmsKeyId self.port = port self.preferredBackupWindow = preferredBackupWindow self.preferredMaintenanceWindow = preferredMaintenanceWindow @@ -425,8 +453,10 @@ extension DocDB { case engineVersion = "EngineVersion" case globalClusterIdentifier = "GlobalClusterIdentifier" case kmsKeyId = "KmsKeyId" + case manageMasterUserPassword = "ManageMasterUserPassword" case masterUsername = "MasterUsername" case masterUserPassword = "MasterUserPassword" + case masterUserSecretKmsKeyId = "MasterUserSecretKmsKeyId" case port = "Port" case preferredBackupWindow = "PreferredBackupWindow" case preferredMaintenanceWindow = "PreferredMaintenanceWindow" @@ -821,6 +851,8 @@ extension DocDB { public let latestRestorableTime: Date? /// Contains the master user name for the cluster. public let masterUsername: String? + /// The secret managed by Amazon DocumentDB in Amazon Web Services Secrets Manager for the master user password. + public let masterUserSecret: ClusterMasterUserSecret? /// Specifies whether the cluster has instances in multiple Availability Zones. public let multiAZ: Bool? /// Specifies the progress of the operation as a percentage. @@ -849,7 +881,7 @@ extension DocDB { public var vpcSecurityGroups: [VpcSecurityGroupMembership]? @inlinable - public init(associatedRoles: [DBClusterRole]? = nil, availabilityZones: [String]? = nil, backupRetentionPeriod: Int? = nil, cloneGroupId: String? = nil, clusterCreateTime: Date? = nil, dbClusterArn: String? = nil, dbClusterIdentifier: String? = nil, dbClusterMembers: [DBClusterMember]? = nil, dbClusterParameterGroup: String? = nil, dbClusterResourceId: String? = nil, dbSubnetGroup: String? = nil, deletionProtection: Bool? = nil, earliestRestorableTime: Date? = nil, enabledCloudwatchLogsExports: [String]? = nil, endpoint: String? = nil, engine: String? = nil, engineVersion: String? = nil, hostedZoneId: String? = nil, kmsKeyId: String? = nil, latestRestorableTime: Date? = nil, masterUsername: String? = nil, multiAZ: Bool? = nil, percentProgress: String? = nil, port: Int? = nil, preferredBackupWindow: String? = nil, preferredMaintenanceWindow: String? = nil, readerEndpoint: String? = nil, readReplicaIdentifiers: [String]? = nil, replicationSourceIdentifier: String? = nil, status: String? = nil, storageEncrypted: Bool? = nil, storageType: String? = nil, vpcSecurityGroups: [VpcSecurityGroupMembership]? = nil) { + public init(associatedRoles: [DBClusterRole]? = nil, availabilityZones: [String]? = nil, backupRetentionPeriod: Int? = nil, cloneGroupId: String? = nil, clusterCreateTime: Date? = nil, dbClusterArn: String? = nil, dbClusterIdentifier: String? = nil, dbClusterMembers: [DBClusterMember]? = nil, dbClusterParameterGroup: String? = nil, dbClusterResourceId: String? = nil, dbSubnetGroup: String? = nil, deletionProtection: Bool? = nil, earliestRestorableTime: Date? = nil, enabledCloudwatchLogsExports: [String]? = nil, endpoint: String? = nil, engine: String? = nil, engineVersion: String? = nil, hostedZoneId: String? = nil, kmsKeyId: String? = nil, latestRestorableTime: Date? = nil, masterUsername: String? = nil, masterUserSecret: ClusterMasterUserSecret? = nil, multiAZ: Bool? = nil, percentProgress: String? = nil, port: Int? = nil, preferredBackupWindow: String? = nil, preferredMaintenanceWindow: String? = nil, readerEndpoint: String? = nil, readReplicaIdentifiers: [String]? = nil, replicationSourceIdentifier: String? = nil, status: String? = nil, storageEncrypted: Bool? = nil, storageType: String? = nil, vpcSecurityGroups: [VpcSecurityGroupMembership]? = nil) { self.associatedRoles = associatedRoles self.availabilityZones = availabilityZones self.backupRetentionPeriod = backupRetentionPeriod @@ -871,6 +903,7 @@ extension DocDB { self.kmsKeyId = kmsKeyId self.latestRestorableTime = latestRestorableTime self.masterUsername = masterUsername + self.masterUserSecret = masterUserSecret self.multiAZ = multiAZ self.percentProgress = percentProgress self.port = port @@ -907,6 +940,7 @@ extension DocDB { case kmsKeyId = "KmsKeyId" case latestRestorableTime = "LatestRestorableTime" case masterUsername = "MasterUsername" + case masterUserSecret = "MasterUserSecret" case multiAZ = "MultiAZ" case percentProgress = "PercentProgress" case port = "Port" @@ -2716,8 +2750,12 @@ extension DocDB { public let deletionProtection: Bool? /// The version number of the database engine to which you want to upgrade. Changing this parameter results in an outage. The change is applied during the next maintenance window unless ApplyImmediately is enabled. To list all of the available engine versions for Amazon DocumentDB use the following command: aws docdb describe-db-engine-versions --engine docdb --query "DBEngineVersions[].EngineVersion" public let engineVersion: String? + /// Specifies whether to manage the master user password with Amazon Web Services Secrets Manager. If the cluster doesn't manage the master user password with Amazon Web Services Secrets Manager, you can turn on this management. In this case, you can't specify MasterUserPassword. If the cluster already manages the master user password with Amazon Web Services Secrets Manager, and you specify that the master user password is not managed with Amazon Web Services Secrets Manager, then you must specify MasterUserPassword. In this case, Amazon DocumentDB deletes the secret and uses the new password for the master user specified by MasterUserPassword. + public let manageMasterUserPassword: Bool? /// The password for the master database user. This password can contain any printable ASCII character except forward slash (/), double quote ("), or the "at" symbol (@). Constraints: Must contain from 8 to 100 characters. public let masterUserPassword: String? + /// The Amazon Web Services KMS key identifier to encrypt a secret that is automatically generated and managed in Amazon Web Services Secrets Manager. This setting is valid only if both of the following conditions are met: The cluster doesn't manage the master user password in Amazon Web Services Secrets Manager. If the cluster already manages the master user password in Amazon Web Services Secrets Manager, you can't change the KMS key that is used to encrypt the secret. You are enabling ManageMasterUserPassword to manage the master user password in Amazon Web Services Secrets Manager. If you are turning on ManageMasterUserPassword and don't specify MasterUserSecretKmsKeyId, then the aws/secretsmanager KMS key is used to encrypt the secret. If the secret is in a different Amazon Web Services account, then you can't use the aws/secretsmanager KMS key to encrypt the secret, and you must use a customer managed KMS key. The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key. To use a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN. There is a default KMS key for your Amazon Web Services account. Your Amazon Web Services account has a different default KMS key for each Amazon Web Services Region. + public let masterUserSecretKmsKeyId: String? /// The new cluster identifier for the cluster when renaming a cluster. This value is stored as a lowercase string. Constraints: Must contain from 1 to 63 letters, numbers, or hyphens. The first character must be a letter. Cannot end with a hyphen or contain two consecutive hyphens. Example: my-cluster2 public let newDBClusterIdentifier: String? /// The port number on which the cluster accepts connections. Constraints: Must be a value from 1150 to 65535. Default: The same port as the original cluster. @@ -2726,6 +2764,8 @@ extension DocDB { public let preferredBackupWindow: String? /// The weekly time range during which system maintenance can occur, in Universal Coordinated Time (UTC). Format: ddd:hh24:mi-ddd:hh24:mi The default is a 30-minute window selected at random from an 8-hour block of time for each Amazon Web Services Region, occurring on a random day of the week. Valid days: Mon, Tue, Wed, Thu, Fri, Sat, Sun Constraints: Minimum 30-minute window. public let preferredMaintenanceWindow: String? + /// Specifies whether to rotate the secret managed by Amazon Web Services Secrets Manager for the master user password. This setting is valid only if the master user password is managed by Amazon DocumentDB in Amazon Web Services Secrets Manager for the cluster. The secret value contains the updated password. Constraint: You must apply the change immediately when rotating the master user password. + public let rotateMasterUserPassword: Bool? /// The storage type to associate with the DB cluster. For information on storage types for Amazon DocumentDB clusters, see Cluster storage configurations in the Amazon DocumentDB Developer Guide. Valid values for storage type - standard | iopt1 Default value is standard public let storageType: String? /// A list of virtual private cloud (VPC) security groups that the cluster will belong to. @@ -2733,7 +2773,7 @@ extension DocDB { public var vpcSecurityGroupIds: [String]? @inlinable - public init(allowMajorVersionUpgrade: Bool? = nil, applyImmediately: Bool? = nil, backupRetentionPeriod: Int? = nil, cloudwatchLogsExportConfiguration: CloudwatchLogsExportConfiguration? = nil, dbClusterIdentifier: String? = nil, dbClusterParameterGroupName: String? = nil, deletionProtection: Bool? = nil, engineVersion: String? = nil, masterUserPassword: String? = nil, newDBClusterIdentifier: String? = nil, port: Int? = nil, preferredBackupWindow: String? = nil, preferredMaintenanceWindow: String? = nil, storageType: String? = nil, vpcSecurityGroupIds: [String]? = nil) { + public init(allowMajorVersionUpgrade: Bool? = nil, applyImmediately: Bool? = nil, backupRetentionPeriod: Int? = nil, cloudwatchLogsExportConfiguration: CloudwatchLogsExportConfiguration? = nil, dbClusterIdentifier: String? = nil, dbClusterParameterGroupName: String? = nil, deletionProtection: Bool? = nil, engineVersion: String? = nil, manageMasterUserPassword: Bool? = nil, masterUserPassword: String? = nil, masterUserSecretKmsKeyId: String? = nil, newDBClusterIdentifier: String? = nil, port: Int? = nil, preferredBackupWindow: String? = nil, preferredMaintenanceWindow: String? = nil, rotateMasterUserPassword: Bool? = nil, storageType: String? = nil, vpcSecurityGroupIds: [String]? = nil) { self.allowMajorVersionUpgrade = allowMajorVersionUpgrade self.applyImmediately = applyImmediately self.backupRetentionPeriod = backupRetentionPeriod @@ -2742,11 +2782,14 @@ extension DocDB { self.dbClusterParameterGroupName = dbClusterParameterGroupName self.deletionProtection = deletionProtection self.engineVersion = engineVersion + self.manageMasterUserPassword = manageMasterUserPassword self.masterUserPassword = masterUserPassword + self.masterUserSecretKmsKeyId = masterUserSecretKmsKeyId self.newDBClusterIdentifier = newDBClusterIdentifier self.port = port self.preferredBackupWindow = preferredBackupWindow self.preferredMaintenanceWindow = preferredMaintenanceWindow + self.rotateMasterUserPassword = rotateMasterUserPassword self.storageType = storageType self.vpcSecurityGroupIds = vpcSecurityGroupIds } @@ -2760,11 +2803,14 @@ extension DocDB { case dbClusterParameterGroupName = "DBClusterParameterGroupName" case deletionProtection = "DeletionProtection" case engineVersion = "EngineVersion" + case manageMasterUserPassword = "ManageMasterUserPassword" case masterUserPassword = "MasterUserPassword" + case masterUserSecretKmsKeyId = "MasterUserSecretKmsKeyId" case newDBClusterIdentifier = "NewDBClusterIdentifier" case port = "Port" case preferredBackupWindow = "PreferredBackupWindow" case preferredMaintenanceWindow = "PreferredMaintenanceWindow" + case rotateMasterUserPassword = "RotateMasterUserPassword" case storageType = "StorageType" case vpcSecurityGroupIds = "VpcSecurityGroupIds" } diff --git a/Sources/Soto/Services/ECR/ECR_api.swift b/Sources/Soto/Services/ECR/ECR_api.swift index f03150ceed..fa03553d4a 100644 --- a/Sources/Soto/Services/ECR/ECR_api.swift +++ b/Sources/Soto/Services/ECR/ECR_api.swift @@ -123,33 +123,51 @@ public struct ECR: AWSService { /// FIPS and dualstack endpoints static var variantEndpoints: [EndpointVariantType: AWSServiceConfig.EndpointVariant] {[ + [.dualstack]: .init(endpoints: [ + "af-south-1": "api.ecr.af-south-1.api.aws", + "ap-east-1": "api.ecr.ap-east-1.api.aws", + "ap-northeast-1": "api.ecr.ap-northeast-1.api.aws", + "ap-northeast-2": "api.ecr.ap-northeast-2.api.aws", + "ap-northeast-3": "api.ecr.ap-northeast-3.api.aws", + "ap-south-1": "api.ecr.ap-south-1.api.aws", + "ap-south-2": "api.ecr.ap-south-2.api.aws", + "ap-southeast-1": "api.ecr.ap-southeast-1.api.aws", + "ap-southeast-2": "api.ecr.ap-southeast-2.api.aws", + "ap-southeast-3": "api.ecr.ap-southeast-3.api.aws", + "ap-southeast-4": "api.ecr.ap-southeast-4.api.aws", + "ap-southeast-5": "api.ecr.ap-southeast-5.api.aws", + "ca-central-1": "api.ecr.ca-central-1.api.aws", + "ca-west-1": "api.ecr.ca-west-1.api.aws", + "cn-north-1": "api.ecr.cn-north-1.api.amazonwebservices.com.cn", + "cn-northwest-1": "api.ecr.cn-northwest-1.api.amazonwebservices.com.cn", + "eu-central-1": "api.ecr.eu-central-1.api.aws", + "eu-central-2": "api.ecr.eu-central-2.api.aws", + "eu-north-1": "api.ecr.eu-north-1.api.aws", + "eu-south-1": "api.ecr.eu-south-1.api.aws", + "eu-south-2": "api.ecr.eu-south-2.api.aws", + "eu-west-1": "api.ecr.eu-west-1.api.aws", + "eu-west-2": "api.ecr.eu-west-2.api.aws", + "eu-west-3": "api.ecr.eu-west-3.api.aws", + "il-central-1": "api.ecr.il-central-1.api.aws", + "me-central-1": "api.ecr.me-central-1.api.aws", + "me-south-1": "api.ecr.me-south-1.api.aws", + "sa-east-1": "api.ecr.sa-east-1.api.aws", + "us-east-1": "api.ecr.us-east-1.api.aws", + "us-east-2": "api.ecr.us-east-2.api.aws", + "us-gov-east-1": "api.ecr.us-gov-east-1.api.aws", + "us-gov-west-1": "api.ecr.us-gov-west-1.api.aws", + "us-west-1": "api.ecr.us-west-1.api.aws", + "us-west-2": "api.ecr.us-west-2.api.aws" + ]), + [.dualstack, .fips]: .init(endpoints: [ + "us-east-1": "api.ecr-fips.us-east-1.api.aws", + "us-east-2": "api.ecr-fips.us-east-2.api.aws", + "us-gov-east-1": "api.ecr-fips.us-gov-east-1.api.aws", + "us-gov-west-1": "api.ecr-fips.us-gov-west-1.api.aws", + "us-west-1": "api.ecr-fips.us-west-1.api.aws", + "us-west-2": "api.ecr-fips.us-west-2.api.aws" + ]), [.fips]: .init(endpoints: [ - "af-south-1": "ecr-fips.af-south-1.amazonaws.com", - "ap-east-1": "ecr-fips.ap-east-1.amazonaws.com", - "ap-northeast-1": "ecr-fips.ap-northeast-1.amazonaws.com", - "ap-northeast-2": "ecr-fips.ap-northeast-2.amazonaws.com", - "ap-northeast-3": "ecr-fips.ap-northeast-3.amazonaws.com", - "ap-south-1": "ecr-fips.ap-south-1.amazonaws.com", - "ap-south-2": "ecr-fips.ap-south-2.amazonaws.com", - "ap-southeast-1": "ecr-fips.ap-southeast-1.amazonaws.com", - "ap-southeast-2": "ecr-fips.ap-southeast-2.amazonaws.com", - "ap-southeast-3": "ecr-fips.ap-southeast-3.amazonaws.com", - "ap-southeast-4": "ecr-fips.ap-southeast-4.amazonaws.com", - "ap-southeast-5": "ecr-fips.ap-southeast-5.amazonaws.com", - "ca-central-1": "ecr-fips.ca-central-1.amazonaws.com", - "ca-west-1": "ecr-fips.ca-west-1.amazonaws.com", - "eu-central-1": "ecr-fips.eu-central-1.amazonaws.com", - "eu-central-2": "ecr-fips.eu-central-2.amazonaws.com", - "eu-north-1": "ecr-fips.eu-north-1.amazonaws.com", - "eu-south-1": "ecr-fips.eu-south-1.amazonaws.com", - "eu-south-2": "ecr-fips.eu-south-2.amazonaws.com", - "eu-west-1": "ecr-fips.eu-west-1.amazonaws.com", - "eu-west-2": "ecr-fips.eu-west-2.amazonaws.com", - "eu-west-3": "ecr-fips.eu-west-3.amazonaws.com", - "il-central-1": "ecr-fips.il-central-1.amazonaws.com", - "me-central-1": "ecr-fips.me-central-1.amazonaws.com", - "me-south-1": "ecr-fips.me-south-1.amazonaws.com", - "sa-east-1": "ecr-fips.sa-east-1.amazonaws.com", "us-east-1": "ecr-fips.us-east-1.amazonaws.com", "us-east-2": "ecr-fips.us-east-2.amazonaws.com", "us-gov-east-1": "ecr-fips.us-gov-east-1.amazonaws.com", @@ -917,7 +935,7 @@ public struct ECR: AWSService { return try await self.describeRepositoryCreationTemplates(input, logger: logger) } - /// Retrieves the basic scan type version name. + /// Retrieves the account setting value for the specified setting name. @Sendable @inlinable public func getAccountSetting(_ input: GetAccountSettingRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetAccountSettingResponse { @@ -930,10 +948,10 @@ public struct ECR: AWSService { logger: logger ) } - /// Retrieves the basic scan type version name. + /// Retrieves the account setting value for the specified setting name. /// /// Parameters: - /// - name: Basic scan type version name. + /// - name: The name of the account setting, such as BASIC_SCAN_TYPE_VERSION or REGISTRY_POLICY_SCOPE. /// - logger: Logger use during operation @inlinable public func getAccountSetting( @@ -1269,7 +1287,7 @@ public struct ECR: AWSService { return try await self.listTagsForResource(input, logger: logger) } - /// Allows you to change the basic scan type version by setting the name parameter to either CLAIR to AWS_NATIVE. + /// Allows you to change the basic scan type version or registry policy scope. @Sendable @inlinable public func putAccountSetting(_ input: PutAccountSettingRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> PutAccountSettingResponse { @@ -1282,11 +1300,11 @@ public struct ECR: AWSService { logger: logger ) } - /// Allows you to change the basic scan type version by setting the name parameter to either CLAIR to AWS_NATIVE. + /// Allows you to change the basic scan type version or registry policy scope. /// /// Parameters: - /// - name: Basic scan type version name. - /// - value: Setting value that determines what basic scan type is being used: AWS_NATIVE or CLAIR. + /// - name: The name of the account setting, such as BASIC_SCAN_TYPE_VERSION or REGISTRY_POLICY_SCOPE. + /// - value: Setting value that is specified. The following are valid values for the basic scan type being used: AWS_NATIVE or CLAIR. The following are valid values for the registry policy scope being used: V1 or V2. /// - logger: Logger use during operation @inlinable public func putAccountSetting( diff --git a/Sources/Soto/Services/ECR/ECR_shapes.swift b/Sources/Soto/Services/ECR/ECR_shapes.swift index 642b767edc..0b369e7e58 100644 --- a/Sources/Soto/Services/ECR/ECR_shapes.swift +++ b/Sources/Soto/Services/ECR/ECR_shapes.swift @@ -1564,7 +1564,7 @@ extension ECR { } public struct GetAccountSettingRequest: AWSEncodableShape { - /// Basic scan type version name. + /// The name of the account setting, such as BASIC_SCAN_TYPE_VERSION or REGISTRY_POLICY_SCOPE. public let name: String @inlinable @@ -1583,9 +1583,9 @@ extension ECR { } public struct GetAccountSettingResponse: AWSDecodableShape { - /// Retrieves the basic scan type version name. + /// Retrieves the name of the account setting. public let name: String? - /// Retrieves the value that specifies what basic scan type is being used: AWS_NATIVE or CLAIR. + /// The setting value for the setting name. The following are valid values for the basic scan type being used: AWS_NATIVE or CLAIR. The following are valid values for the registry policy scope being used: V1 or V2. public let value: String? @inlinable @@ -2538,9 +2538,9 @@ extension ECR { } public struct PutAccountSettingRequest: AWSEncodableShape { - /// Basic scan type version name. + /// The name of the account setting, such as BASIC_SCAN_TYPE_VERSION or REGISTRY_POLICY_SCOPE. public let name: String - /// Setting value that determines what basic scan type is being used: AWS_NATIVE or CLAIR. + /// Setting value that is specified. The following are valid values for the basic scan type being used: AWS_NATIVE or CLAIR. The following are valid values for the registry policy scope being used: V1 or V2. public let value: String @inlinable @@ -2561,9 +2561,9 @@ extension ECR { } public struct PutAccountSettingResponse: AWSDecodableShape { - /// Retrieves the the basic scan type version name. + /// Retrieves the name of the account setting. public let name: String? - /// Retrieves the basic scan type value, either AWS_NATIVE or -. + /// Retrieves the value of the specified account setting. public let value: String? @inlinable diff --git a/Sources/Soto/Services/ECRPublic/ECRPublic_api.swift b/Sources/Soto/Services/ECRPublic/ECRPublic_api.swift index d07f3ae6d0..621db768ea 100644 --- a/Sources/Soto/Services/ECRPublic/ECRPublic_api.swift +++ b/Sources/Soto/Services/ECRPublic/ECRPublic_api.swift @@ -68,6 +68,7 @@ public struct ECRPublic: AWSService { apiVersion: "2020-10-30", endpoint: endpoint, serviceEndpoints: Self.serviceEndpoints, + variantEndpoints: Self.variantEndpoints, errorType: ECRPublicErrorType.self, xmlNamespace: "http://ecr-public.amazonaws.com/doc/2020-12-02/", middleware: middleware, @@ -85,6 +86,12 @@ public struct ECRPublic: AWSService { ]} + /// FIPS and dualstack endpoints + static var variantEndpoints: [EndpointVariantType: AWSServiceConfig.EndpointVariant] {[ + [.dualstack]: .init(endpoints: [ + "us-east-1": "api.ecr-public.us-east-1.api.aws" + ]) + ]} // MARK: API Calls diff --git a/Sources/Soto/Services/EKS/EKS_api.swift b/Sources/Soto/Services/EKS/EKS_api.swift index d1a97599c4..05d7bedcf3 100644 --- a/Sources/Soto/Services/EKS/EKS_api.swift +++ b/Sources/Soto/Services/EKS/EKS_api.swift @@ -1048,6 +1048,53 @@ public struct EKS: AWSService { return try await self.describeCluster(input, logger: logger) } + /// Lists available Kubernetes versions for Amazon EKS clusters. + @Sendable + @inlinable + public func describeClusterVersions(_ input: DescribeClusterVersionsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeClusterVersionsResponse { + try await self.client.execute( + operation: "DescribeClusterVersions", + path: "/cluster-versions", + httpMethod: .GET, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Lists available Kubernetes versions for Amazon EKS clusters. + /// + /// Parameters: + /// - clusterType: The type of cluster to filter versions by. + /// - clusterVersions: List of specific cluster versions to describe. + /// - defaultOnly: Filter to show only default versions. + /// - includeAll: Include all available versions in the response. + /// - maxResults: Maximum number of results to return. + /// - nextToken: Pagination token for the next set of results. + /// - status: Filter versions by their current status. + /// - logger: Logger use during operation + @inlinable + public func describeClusterVersions( + clusterType: String? = nil, + clusterVersions: [String]? = nil, + defaultOnly: Bool? = nil, + includeAll: Bool? = nil, + maxResults: Int? = nil, + nextToken: String? = nil, + status: ClusterVersionStatus? = nil, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> DescribeClusterVersionsResponse { + let input = DescribeClusterVersionsRequest( + clusterType: clusterType, + clusterVersions: clusterVersions, + defaultOnly: defaultOnly, + includeAll: includeAll, + maxResults: maxResults, + nextToken: nextToken, + status: status + ) + return try await self.describeClusterVersions(input, logger: logger) + } + /// Returns descriptive information about a subscription. @Sendable @inlinable @@ -2329,6 +2376,55 @@ extension EKS { return self.describeAddonVersionsPaginator(input, logger: logger) } + /// Return PaginatorSequence for operation ``describeClusterVersions(_:logger:)``. + /// + /// - Parameters: + /// - input: Input for operation + /// - logger: Logger used for logging + @inlinable + public func describeClusterVersionsPaginator( + _ input: DescribeClusterVersionsRequest, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + return .init( + input: input, + command: self.describeClusterVersions, + inputKey: \DescribeClusterVersionsRequest.nextToken, + outputKey: \DescribeClusterVersionsResponse.nextToken, + logger: logger + ) + } + /// Return PaginatorSequence for operation ``describeClusterVersions(_:logger:)``. + /// + /// - Parameters: + /// - clusterType: The type of cluster to filter versions by. + /// - clusterVersions: List of specific cluster versions to describe. + /// - defaultOnly: Filter to show only default versions. + /// - includeAll: Include all available versions in the response. + /// - maxResults: Maximum number of results to return. + /// - status: Filter versions by their current status. + /// - logger: Logger used for logging + @inlinable + public func describeClusterVersionsPaginator( + clusterType: String? = nil, + clusterVersions: [String]? = nil, + defaultOnly: Bool? = nil, + includeAll: Bool? = nil, + maxResults: Int? = nil, + status: ClusterVersionStatus? = nil, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + let input = DescribeClusterVersionsRequest( + clusterType: clusterType, + clusterVersions: clusterVersions, + defaultOnly: defaultOnly, + includeAll: includeAll, + maxResults: maxResults, + status: status + ) + return self.describeClusterVersionsPaginator(input, logger: logger) + } + /// Return PaginatorSequence for operation ``listAccessEntries(_:logger:)``. /// /// - Parameters: @@ -2807,6 +2903,21 @@ extension EKS.DescribeAddonVersionsRequest: AWSPaginateToken { } } +extension EKS.DescribeClusterVersionsRequest: AWSPaginateToken { + @inlinable + public func usingPaginationToken(_ token: String) -> EKS.DescribeClusterVersionsRequest { + return .init( + clusterType: self.clusterType, + clusterVersions: self.clusterVersions, + defaultOnly: self.defaultOnly, + includeAll: self.includeAll, + maxResults: self.maxResults, + nextToken: token, + status: self.status + ) + } +} + extension EKS.ListAccessEntriesRequest: AWSPaginateToken { @inlinable public func usingPaginationToken(_ token: String) -> EKS.ListAccessEntriesRequest { diff --git a/Sources/Soto/Services/EKS/EKS_shapes.swift b/Sources/Soto/Services/EKS/EKS_shapes.swift index 1d59f812de..57d8193a21 100644 --- a/Sources/Soto/Services/EKS/EKS_shapes.swift +++ b/Sources/Soto/Services/EKS/EKS_shapes.swift @@ -130,6 +130,13 @@ extension EKS { public var description: String { return self.rawValue } } + public enum ClusterVersionStatus: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case extendedSupport = "extended-support" + case standardSupport = "standard-support" + case unsupported = "unsupported" + public var description: String { return self.rawValue } + } + public enum ConfigStatus: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case active = "ACTIVE" case creating = "CREATING" @@ -537,6 +544,24 @@ extension EKS { } } + public struct AddonCompatibilityDetail: AWSDecodableShape { + /// A list of compatible add-on versions. + public let compatibleVersions: [String]? + /// The name of the Amazon EKS add-on. + public let name: String? + + @inlinable + public init(compatibleVersions: [String]? = nil, name: String? = nil) { + self.compatibleVersions = compatibleVersions + self.name = name + } + + private enum CodingKeys: String, CodingKey { + case compatibleVersions = "compatibleVersions" + case name = "name" + } + } + public struct AddonHealth: AWSDecodableShape { /// An object representing the health issues for an add-on. public let issues: [AddonIssue]? @@ -1083,6 +1108,52 @@ extension EKS { } } + public struct ClusterVersionInformation: AWSDecodableShape { + /// The type of cluster this version is for. + public let clusterType: String? + /// The Kubernetes version for the cluster. + public let clusterVersion: String? + /// Default platform version for this Kubernetes version. + public let defaultPlatformVersion: String? + /// Indicates if this is a default version. + public let defaultVersion: Bool? + /// Date when extended support ends for this version. + public let endOfExtendedSupportDate: Date? + /// Date when standard support ends for this version. + public let endOfStandardSupportDate: Date? + /// The patch version of Kubernetes for this cluster version. + public let kubernetesPatchVersion: String? + /// The release date of this cluster version. + public let releaseDate: Date? + /// Current status of this cluster version. + public let status: ClusterVersionStatus? + + @inlinable + public init(clusterType: String? = nil, clusterVersion: String? = nil, defaultPlatformVersion: String? = nil, defaultVersion: Bool? = nil, endOfExtendedSupportDate: Date? = nil, endOfStandardSupportDate: Date? = nil, kubernetesPatchVersion: String? = nil, releaseDate: Date? = nil, status: ClusterVersionStatus? = nil) { + self.clusterType = clusterType + self.clusterVersion = clusterVersion + self.defaultPlatformVersion = defaultPlatformVersion + self.defaultVersion = defaultVersion + self.endOfExtendedSupportDate = endOfExtendedSupportDate + self.endOfStandardSupportDate = endOfStandardSupportDate + self.kubernetesPatchVersion = kubernetesPatchVersion + self.releaseDate = releaseDate + self.status = status + } + + private enum CodingKeys: String, CodingKey { + case clusterType = "clusterType" + case clusterVersion = "clusterVersion" + case defaultPlatformVersion = "defaultPlatformVersion" + case defaultVersion = "defaultVersion" + case endOfExtendedSupportDate = "endOfExtendedSupportDate" + case endOfStandardSupportDate = "endOfStandardSupportDate" + case kubernetesPatchVersion = "kubernetesPatchVersion" + case releaseDate = "releaseDate" + case status = "status" + } + } + public struct Compatibility: AWSDecodableShape { /// The supported Kubernetes version of the cluster. public let clusterVersion: String? @@ -2395,6 +2466,71 @@ extension EKS { } } + public struct DescribeClusterVersionsRequest: AWSEncodableShape { + /// The type of cluster to filter versions by. + public let clusterType: String? + /// List of specific cluster versions to describe. + public let clusterVersions: [String]? + /// Filter to show only default versions. + public let defaultOnly: Bool? + /// Include all available versions in the response. + public let includeAll: Bool? + /// Maximum number of results to return. + public let maxResults: Int? + /// Pagination token for the next set of results. + public let nextToken: String? + /// Filter versions by their current status. + public let status: ClusterVersionStatus? + + @inlinable + public init(clusterType: String? = nil, clusterVersions: [String]? = nil, defaultOnly: Bool? = nil, includeAll: Bool? = nil, maxResults: Int? = nil, nextToken: String? = nil, status: ClusterVersionStatus? = nil) { + self.clusterType = clusterType + self.clusterVersions = clusterVersions + self.defaultOnly = defaultOnly + self.includeAll = includeAll + self.maxResults = maxResults + self.nextToken = nextToken + self.status = status + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodeQuery(self.clusterType, key: "clusterType") + request.encodeQuery(self.clusterVersions, key: "clusterVersions") + request.encodeQuery(self.defaultOnly, key: "defaultOnly") + request.encodeQuery(self.includeAll, key: "includeAll") + request.encodeQuery(self.maxResults, key: "maxResults") + request.encodeQuery(self.nextToken, key: "nextToken") + request.encodeQuery(self.status, key: "status") + } + + public func validate(name: String) throws { + try self.validate(self.maxResults, name: "maxResults", parent: name, max: 100) + try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) + } + + private enum CodingKeys: CodingKey {} + } + + public struct DescribeClusterVersionsResponse: AWSDecodableShape { + /// List of cluster version information objects. + public let clusterVersions: [ClusterVersionInformation]? + /// Pagination token for the next set of results. + public let nextToken: String? + + @inlinable + public init(clusterVersions: [ClusterVersionInformation]? = nil, nextToken: String? = nil) { + self.clusterVersions = clusterVersions + self.nextToken = nextToken + } + + private enum CodingKeys: String, CodingKey { + case clusterVersions = "clusterVersions" + case nextToken = "nextToken" + } + } + public struct DescribeEksAnywhereSubscriptionRequest: AWSEncodableShape { /// The ID of the subscription. public let id: String @@ -3065,15 +3201,19 @@ extension EKS { } public struct InsightCategorySpecificSummary: AWSDecodableShape { + /// A list of AddonCompatibilityDetail objects for Amazon EKS add-ons. + public let addonCompatibilityDetails: [AddonCompatibilityDetail]? /// The summary information about deprecated resource usage for an insight check in the UPGRADE_READINESS category. public let deprecationDetails: [DeprecationDetail]? @inlinable - public init(deprecationDetails: [DeprecationDetail]? = nil) { + public init(addonCompatibilityDetails: [AddonCompatibilityDetail]? = nil, deprecationDetails: [DeprecationDetail]? = nil) { + self.addonCompatibilityDetails = addonCompatibilityDetails self.deprecationDetails = deprecationDetails } private enum CodingKeys: String, CodingKey { + case addonCompatibilityDetails = "addonCompatibilityDetails" case deprecationDetails = "deprecationDetails" } } diff --git a/Sources/Soto/Services/Glue/Glue_api.swift b/Sources/Soto/Services/Glue/Glue_api.swift index 1ac60a7196..442e29a8e1 100644 --- a/Sources/Soto/Services/Glue/Glue_api.swift +++ b/Sources/Soto/Services/Glue/Glue_api.swift @@ -81,12 +81,48 @@ public struct Glue: AWSService { /// FIPS and dualstack endpoints static var variantEndpoints: [EndpointVariantType: AWSServiceConfig.EndpointVariant] {[ [.dualstack]: .init(endpoints: [ + "af-south-1": "glue.af-south-1.api.aws", + "ap-east-1": "glue.ap-east-1.api.aws", + "ap-northeast-1": "glue.ap-northeast-1.api.aws", + "ap-northeast-2": "glue.ap-northeast-2.api.aws", + "ap-northeast-3": "glue.ap-northeast-3.api.aws", + "ap-south-1": "glue.ap-south-1.api.aws", + "ap-south-2": "glue.ap-south-2.api.aws", + "ap-southeast-1": "glue.ap-southeast-1.api.aws", + "ap-southeast-2": "glue.ap-southeast-2.api.aws", + "ap-southeast-3": "glue.ap-southeast-3.api.aws", + "ap-southeast-4": "glue.ap-southeast-4.api.aws", + "ap-southeast-5": "glue.ap-southeast-5.api.aws", + "ca-central-1": "glue.ca-central-1.api.aws", + "ca-west-1": "glue.ca-west-1.api.aws", + "cn-north-1": "glue.cn-north-1.api.amazonwebservices.com.cn", + "cn-northwest-1": "glue.cn-northwest-1.api.amazonwebservices.com.cn", + "eu-central-1": "glue.eu-central-1.api.aws", + "eu-central-2": "glue.eu-central-2.api.aws", + "eu-north-1": "glue.eu-north-1.api.aws", + "eu-south-1": "glue.eu-south-1.api.aws", + "eu-south-2": "glue.eu-south-2.api.aws", + "eu-west-1": "glue.eu-west-1.api.aws", + "eu-west-2": "glue.eu-west-2.api.aws", + "eu-west-3": "glue.eu-west-3.api.aws", + "il-central-1": "glue.il-central-1.api.aws", + "me-central-1": "glue.me-central-1.api.aws", + "me-south-1": "glue.me-south-1.api.aws", + "sa-east-1": "glue.sa-east-1.api.aws", + "us-east-1": "glue.us-east-1.api.aws", + "us-east-2": "glue.us-east-2.api.aws", "us-gov-east-1": "glue.us-gov-east-1.api.aws", - "us-gov-west-1": "glue.us-gov-west-1.api.aws" + "us-gov-west-1": "glue.us-gov-west-1.api.aws", + "us-west-1": "glue.us-west-1.api.aws", + "us-west-2": "glue.us-west-2.api.aws" ]), [.dualstack, .fips]: .init(endpoints: [ + "us-east-1": "glue-fips.us-east-1.api.aws", + "us-east-2": "glue-fips.us-east-2.api.aws", "us-gov-east-1": "glue-fips.us-gov-east-1.api.aws", - "us-gov-west-1": "glue-fips.us-gov-west-1.api.aws" + "us-gov-west-1": "glue-fips.us-gov-west-1.api.aws", + "us-west-1": "glue-fips.us-west-1.api.aws", + "us-west-2": "glue-fips.us-west-2.api.aws" ]), [.fips]: .init(endpoints: [ "us-east-1": "glue-fips.us-east-1.amazonaws.com", @@ -3463,13 +3499,15 @@ public struct Glue: AWSService { /// Retrieves all catalogs defined in a catalog in the Glue Data Catalog. For a Redshift-federated catalog use case, this operation returns the list of catalogs mapped to Redshift databases in the Redshift namespace catalog. /// /// Parameters: + /// - includeRoot: Whether to list the default catalog in the account and region in the response. Defaults to false. When true and ParentCatalogId = NULL | Amazon Web Services Account ID, all catalogs and the default catalog are enumerated in the response. When the ParentCatalogId is not equal to null, and this attribute is passed as false or true, an InvalidInputException is thrown. /// - maxResults: The maximum number of catalogs to return in one response. /// - nextToken: A continuation token, if this is a continuation call. /// - parentCatalogId: The ID of the parent catalog in which the catalog resides. If none is provided, the Amazon Web Services Account Number is used by default. - /// - recursive: When specified as true, iterates through the account and returns all catalog resources (including top-level resources and child resources) + /// - recursive: Whether to list all catalogs across the catalog hierarchy, starting from the ParentCatalogId. Defaults to false . When true, all catalog objects in the ParentCatalogID hierarchy are enumerated in the response. /// - logger: Logger use during operation @inlinable public func getCatalogs( + includeRoot: Bool? = nil, maxResults: Int? = nil, nextToken: String? = nil, parentCatalogId: String? = nil, @@ -3477,6 +3515,7 @@ public struct Glue: AWSService { logger: Logger = AWSClient.loggingDisabled ) async throws -> GetCatalogsResponse { let input = GetCatalogsRequest( + includeRoot: includeRoot, maxResults: maxResults, nextToken: nextToken, parentCatalogId: parentCatalogId, @@ -7777,7 +7816,7 @@ public struct Glue: AWSService { /// - notificationProperty: Specifies configuration properties of a job run notification. /// - numberOfWorkers: The number of workers of a defined workerType that are allocated when a job runs. /// - securityConfiguration: The name of the SecurityConfiguration structure to be used with this job run. - /// - timeout: The JobRun timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. This value overrides the timeout value set in the parent job. Streaming jobs must have timeout values less than 7 days or 10080 minutes. When the value is left blank, the job will be restarted after 7 days based if you have not setup a maintenance window. If you have setup maintenance window, it will be restarted during the maintenance window after 7 days. + /// - timeout: The JobRun timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. This value overrides the timeout value set in the parent job. Jobs must have timeout values less than 7 days or 10080 minutes. Otherwise, the jobs will throw an exception. When the value is left blank, the timeout is defaulted to 2880 minutes. Any existing Glue jobs that had a timeout value greater than 7 days will be defaulted to 7 days. For instance if you have specified a timeout of 20 days for a batch job, it will be stopped on the 7th day. /// - workerType: The type of predefined worker that is allocated when a job runs. Accepts a value of G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs. For the G.1X worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 94GB disk, and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs. For the G.2X worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 138GB disk, and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs. For the G.4X worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk, and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm). For the G.8X worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB disk, and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services Regions as supported for the G.4X worker type. For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of memory) with 84GB disk, and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 or later streaming jobs. For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk, and provides up to 8 Ray workers based on the autoscaler. /// - logger: Logger use during operation @inlinable diff --git a/Sources/Soto/Services/Glue/Glue_shapes.swift b/Sources/Soto/Services/Glue/Glue_shapes.swift index 3af22a0d2f..ed6cf15c72 100644 --- a/Sources/Soto/Services/Glue/Glue_shapes.swift +++ b/Sources/Soto/Services/Glue/Glue_shapes.swift @@ -11371,17 +11371,20 @@ extension Glue { } public struct GetCatalogsRequest: AWSEncodableShape { + /// Whether to list the default catalog in the account and region in the response. Defaults to false. When true and ParentCatalogId = NULL | Amazon Web Services Account ID, all catalogs and the default catalog are enumerated in the response. When the ParentCatalogId is not equal to null, and this attribute is passed as false or true, an InvalidInputException is thrown. + public let includeRoot: Bool? /// The maximum number of catalogs to return in one response. public let maxResults: Int? /// A continuation token, if this is a continuation call. public let nextToken: String? /// The ID of the parent catalog in which the catalog resides. If none is provided, the Amazon Web Services Account Number is used by default. public let parentCatalogId: String? - /// When specified as true, iterates through the account and returns all catalog resources (including top-level resources and child resources) + /// Whether to list all catalogs across the catalog hierarchy, starting from the ParentCatalogId. Defaults to false . When true, all catalog objects in the ParentCatalogID hierarchy are enumerated in the response. public let recursive: Bool? @inlinable - public init(maxResults: Int? = nil, nextToken: String? = nil, parentCatalogId: String? = nil, recursive: Bool? = nil) { + public init(includeRoot: Bool? = nil, maxResults: Int? = nil, nextToken: String? = nil, parentCatalogId: String? = nil, recursive: Bool? = nil) { + self.includeRoot = includeRoot self.maxResults = maxResults self.nextToken = nextToken self.parentCatalogId = parentCatalogId @@ -11397,6 +11400,7 @@ extension Glue { } private enum CodingKeys: String, CodingKey { + case includeRoot = "IncludeRoot" case maxResults = "MaxResults" case nextToken = "NextToken" case parentCatalogId = "ParentCatalogId" @@ -16483,7 +16487,7 @@ extension Glue { public let startedOn: Date? /// This field holds details that pertain to the state of a job run. The field is nullable. For example, when a job run is in a WAITING state as a result of job run queuing, the field has the reason why the job run is in that state. public let stateDetail: String? - /// The JobRun timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. This value overrides the timeout value set in the parent job. Streaming jobs must have timeout values less than 7 days or 10080 minutes. When the value is left blank, the job will be restarted after 7 days based if you have not setup a maintenance window. If you have setup maintenance window, it will be restarted during the maintenance window after 7 days. + /// The JobRun timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. This value overrides the timeout value set in the parent job. Jobs must have timeout values less than 7 days or 10080 minutes. Otherwise, the jobs will throw an exception. When the value is left blank, the timeout is defaulted to 2880 minutes. Any existing Glue jobs that had a timeout value greater than 7 days will be defaulted to 7 days. For instance if you have specified a timeout of 20 days for a batch job, it will be stopped on the 7th day. public let timeout: Int? /// The name of the trigger that started this job run. public let triggerName: String? @@ -23610,7 +23614,7 @@ extension Glue { public let numberOfWorkers: Int? /// The name of the SecurityConfiguration structure to be used with this job run. public let securityConfiguration: String? - /// The JobRun timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. This value overrides the timeout value set in the parent job. Streaming jobs must have timeout values less than 7 days or 10080 minutes. When the value is left blank, the job will be restarted after 7 days based if you have not setup a maintenance window. If you have setup maintenance window, it will be restarted during the maintenance window after 7 days. + /// The JobRun timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. This value overrides the timeout value set in the parent job. Jobs must have timeout values less than 7 days or 10080 minutes. Otherwise, the jobs will throw an exception. When the value is left blank, the timeout is defaulted to 2880 minutes. Any existing Glue jobs that had a timeout value greater than 7 days will be defaulted to 7 days. For instance if you have specified a timeout of 20 days for a batch job, it will be stopped on the 7th day. public let timeout: Int? /// The type of predefined worker that is allocated when a job runs. Accepts a value of G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs. For the G.1X worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 94GB disk, and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs. For the G.2X worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 138GB disk, and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs. For the G.4X worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk, and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm). For the G.8X worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB disk, and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services Regions as supported for the G.4X worker type. For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of memory) with 84GB disk, and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 or later streaming jobs. For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk, and provides up to 8 Ray workers based on the autoscaler. public let workerType: WorkerType? diff --git a/Sources/Soto/Services/IoT/IoT_api.swift b/Sources/Soto/Services/IoT/IoT_api.swift index 8739a2b6ef..5566434ab5 100644 --- a/Sources/Soto/Services/IoT/IoT_api.swift +++ b/Sources/Soto/Services/IoT/IoT_api.swift @@ -890,7 +890,7 @@ public struct IoT: AWSService { /// - mandatoryParameters: A list of parameters that are required by the StartCommandExecution API. These parameters need to be specified only when using the AWS-IoT-FleetWise namespace. You can either specify them here or when running the command using the StartCommandExecution API. /// - namespace: The namespace of the command. The MQTT reserved topics and validations will be used for command executions according to the namespace setting. /// - payload: The payload object for the command. You must specify this information when using the AWS-IoT namespace. You can upload a static payload file from your local storage that contains the instructions for the device to process. The payload file can use any format. To make sure that the device correctly interprets the payload, we recommend you to specify the payload content type. - /// - roleArn: The IAM role that allows access to create the command. + /// - roleArn: The IAM role that you must provide when using the AWS-IoT-FleetWise namespace. The role grants IoT Device Management the permission to access IoT FleetWise resources for generating the payload for the command. This field is not required when you use the AWS-IoT namespace. /// - tags: Name-value pairs that are used as metadata to manage a command. /// - logger: Logger use during operation @inlinable @@ -5024,6 +5024,35 @@ public struct IoT: AWSService { return try await self.getStatistics(input, logger: logger) } + /// Retrieves the live connectivity status per device. + @Sendable + @inlinable + public func getThingConnectivityData(_ input: GetThingConnectivityDataRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetThingConnectivityDataResponse { + try await self.client.execute( + operation: "GetThingConnectivityData", + path: "/things/{thingName}/connectivity-data", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Retrieves the live connectivity status per device. + /// + /// Parameters: + /// - thingName: The name of your IoT thing. + /// - logger: Logger use during operation + @inlinable + public func getThingConnectivityData( + thingName: String, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> GetThingConnectivityDataResponse { + let input = GetThingConnectivityDataRequest( + thingName: thingName + ) + return try await self.getThingConnectivityData(input, logger: logger) + } + /// Gets information about the rule. Requires permission to access the GetTopicRule action. @Sendable @inlinable @@ -5632,7 +5661,7 @@ public struct IoT: AWSService { return try await self.listCertificatesByCA(input, logger: logger) } - /// List all command executions. You must provide only the startedTimeFilter or the completedTimeFilter information. If you provide both time filters, the API will generate an error. You can use this information to find command executions that started within a specific timeframe. + /// List all command executions. You must provide only the startedTimeFilter or the completedTimeFilter information. If you provide both time filters, the API will generate an error. You can use this information to retrieve a list of command executions within a specific timeframe. You must provide only the commandArn or the thingArn information depending on whether you want to list executions for a specific command or an IoT thing. If you provide both fields, the API will generate an error. For more information about considerations for using this API, see List command executions in your account (CLI). @Sendable @inlinable public func listCommandExecutions(_ input: ListCommandExecutionsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListCommandExecutionsResponse { @@ -5645,7 +5674,7 @@ public struct IoT: AWSService { logger: logger ) } - /// List all command executions. You must provide only the startedTimeFilter or the completedTimeFilter information. If you provide both time filters, the API will generate an error. You can use this information to find command executions that started within a specific timeframe. + /// List all command executions. You must provide only the startedTimeFilter or the completedTimeFilter information. If you provide both time filters, the API will generate an error. You can use this information to retrieve a list of command executions within a specific timeframe. You must provide only the commandArn or the thingArn information depending on whether you want to list executions for a specific command or an IoT thing. If you provide both fields, the API will generate an error. For more information about considerations for using this API, see List command executions in your account (CLI). /// /// Parameters: /// - commandArn: The Amazon Resource Number (ARN) of the command. You can use this information to list all command executions for a particular command. diff --git a/Sources/Soto/Services/IoT/IoT_shapes.swift b/Sources/Soto/Services/IoT/IoT_shapes.swift index bb321b7b20..6db355bce8 100644 --- a/Sources/Soto/Services/IoT/IoT_shapes.swift +++ b/Sources/Soto/Services/IoT/IoT_shapes.swift @@ -308,6 +308,24 @@ extension IoT { public var description: String { return self.rawValue } } + public enum DisconnectReasonValue: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case authError = "AUTH_ERROR" + case clientError = "CLIENT_ERROR" + case clientInitiatedDisconnect = "CLIENT_INITIATED_DISCONNECT" + case connectionLost = "CONNECTION_LOST" + case customauthTtlExpiration = "CUSTOMAUTH_TTL_EXPIRATION" + case duplicateClientid = "DUPLICATE_CLIENTID" + case forbiddenAccess = "FORBIDDEN_ACCESS" + case mqttKeepAliveTimeout = "MQTT_KEEP_ALIVE_TIMEOUT" + case none = "NONE" + case serverError = "SERVER_ERROR" + case serverInitiatedDisconnect = "SERVER_INITIATED_DISCONNECT" + case throttled = "THROTTLED" + case unknown = "UNKNOWN" + case websocketTtlExpiration = "WEBSOCKET_TTL_EXPIRATION" + public var description: String { return self.rawValue } + } + public enum DomainConfigurationStatus: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case disabled = "DISABLED" case enabled = "ENABLED" @@ -3457,7 +3475,7 @@ extension IoT { public let namespace: CommandNamespace? /// The payload object for the command. You must specify this information when using the AWS-IoT namespace. You can upload a static payload file from your local storage that contains the instructions for the device to process. The payload file can use any format. To make sure that the device correctly interprets the payload, we recommend you to specify the payload content type. public let payload: CommandPayload? - /// The IAM role that allows access to create the command. + /// The IAM role that you must provide when using the AWS-IoT-FleetWise namespace. The role grants IoT Device Management the permission to access IoT FleetWise resources for generating the payload for the command. This field is not required when you use the AWS-IoT namespace. public let roleArn: String? /// Name-value pairs that are used as metadata to manage a command. public let tags: [Tag]? @@ -9593,7 +9611,7 @@ extension IoT { public let statusReason: StatusReason? /// The Amazon Resource Number (ARN) of the device on which the command execution is being performed. public let targetArn: String? - /// The time to live (TTL) parameter for the GetCommandExecution API. + /// The time to live (TTL) parameter that indicates the duration for which executions will be retained in your account. The default value is six months. public let timeToLive: Date? @inlinable @@ -9677,7 +9695,7 @@ extension IoT { public let payload: CommandPayload? /// Indicates whether the command is being deleted. public let pendingDeletion: Bool? - /// The IAM role that allows access to retrieve information about the command. + /// The IAM role that you provided when creating the command with AWS-IoT-FleetWise as the namespace. public let roleArn: String? @inlinable @@ -10309,6 +10327,56 @@ extension IoT { } } + public struct GetThingConnectivityDataRequest: AWSEncodableShape { + /// The name of your IoT thing. + public let thingName: String + + @inlinable + public init(thingName: String) { + self.thingName = thingName + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.thingName, key: "thingName") + } + + public func validate(name: String) throws { + try self.validate(self.thingName, name: "thingName", parent: name, max: 128) + try self.validate(self.thingName, name: "thingName", parent: name, min: 1) + try self.validate(self.thingName, name: "thingName", parent: name, pattern: "^[a-zA-Z0-9:_-]+$") + } + + private enum CodingKeys: CodingKey {} + } + + public struct GetThingConnectivityDataResponse: AWSDecodableShape { + /// A Boolean that indicates the connectivity status. + public let connected: Bool? + /// The reason why the client is disconnecting. + public let disconnectReason: DisconnectReasonValue? + /// The name of your IoT thing. + public let thingName: String? + /// The timestamp of when the event occurred. + public let timestamp: Date? + + @inlinable + public init(connected: Bool? = nil, disconnectReason: DisconnectReasonValue? = nil, thingName: String? = nil, timestamp: Date? = nil) { + self.connected = connected + self.disconnectReason = disconnectReason + self.thingName = thingName + self.timestamp = timestamp + } + + private enum CodingKeys: String, CodingKey { + case connected = "connected" + case disconnectReason = "disconnectReason" + case thingName = "thingName" + case timestamp = "timestamp" + } + } + public struct GetTopicRuleDestinationRequest: AWSEncodableShape { /// The ARN of the topic rule destination. public let arn: String diff --git a/Sources/Soto/Services/IoTSecureTunneling/IoTSecureTunneling_api.swift b/Sources/Soto/Services/IoTSecureTunneling/IoTSecureTunneling_api.swift index aff7bdb4be..df19be639b 100644 --- a/Sources/Soto/Services/IoTSecureTunneling/IoTSecureTunneling_api.swift +++ b/Sources/Soto/Services/IoTSecureTunneling/IoTSecureTunneling_api.swift @@ -83,22 +83,42 @@ public struct IoTSecureTunneling: AWSService { /// FIPS and dualstack endpoints static var variantEndpoints: [EndpointVariantType: AWSServiceConfig.EndpointVariant] {[ + [.dualstack]: .init(endpoints: [ + "ap-east-1": "api.iot-tunneling.ap-east-1.api.aws", + "ap-northeast-1": "api.iot-tunneling.ap-northeast-1.api.aws", + "ap-northeast-2": "api.iot-tunneling.ap-northeast-2.api.aws", + "ap-south-1": "api.iot-tunneling.ap-south-1.api.aws", + "ap-southeast-1": "api.iot-tunneling.ap-southeast-1.api.aws", + "ap-southeast-2": "api.iot-tunneling.ap-southeast-2.api.aws", + "ca-central-1": "api.iot-tunneling.ca-central-1.api.aws", + "cn-north-1": "api.iot-tunneling.cn-north-1.api.amazonwebservices.com.cn", + "cn-northwest-1": "api.iot-tunneling.cn-northwest-1.api.amazonwebservices.com.cn", + "eu-central-1": "api.iot-tunneling.eu-central-1.api.aws", + "eu-north-1": "api.iot-tunneling.eu-north-1.api.aws", + "eu-west-1": "api.iot-tunneling.eu-west-1.api.aws", + "eu-west-2": "api.iot-tunneling.eu-west-2.api.aws", + "eu-west-3": "api.iot-tunneling.eu-west-3.api.aws", + "me-central-1": "api.iot-tunneling.me-central-1.api.aws", + "me-south-1": "api.iot-tunneling.me-south-1.api.aws", + "sa-east-1": "api.iot-tunneling.sa-east-1.api.aws", + "us-east-1": "api.iot-tunneling.us-east-1.api.aws", + "us-east-2": "api.iot-tunneling.us-east-2.api.aws", + "us-gov-east-1": "api.iot-tunneling.us-gov-east-1.api.aws", + "us-gov-west-1": "api.iot-tunneling.us-gov-west-1.api.aws", + "us-west-1": "api.iot-tunneling.us-west-1.api.aws", + "us-west-2": "api.iot-tunneling.us-west-2.api.aws" + ]), + [.dualstack, .fips]: .init(endpoints: [ + "ca-central-1": "api.iot-tunneling-fips.ca-central-1.api.aws", + "us-east-1": "api.iot-tunneling-fips.us-east-1.api.aws", + "us-east-2": "api.iot-tunneling-fips.us-east-2.api.aws", + "us-gov-east-1": "api.iot-tunneling-fips.us-gov-east-1.api.aws", + "us-gov-west-1": "api.iot-tunneling-fips.us-gov-west-1.api.aws", + "us-west-1": "api.iot-tunneling-fips.us-west-1.api.aws", + "us-west-2": "api.iot-tunneling-fips.us-west-2.api.aws" + ]), [.fips]: .init(endpoints: [ - "ap-east-1": "api.tunneling.iot-fips.ap-east-1.amazonaws.com", - "ap-northeast-1": "api.tunneling.iot-fips.ap-northeast-1.amazonaws.com", - "ap-northeast-2": "api.tunneling.iot-fips.ap-northeast-2.amazonaws.com", - "ap-south-1": "api.tunneling.iot-fips.ap-south-1.amazonaws.com", - "ap-southeast-1": "api.tunneling.iot-fips.ap-southeast-1.amazonaws.com", - "ap-southeast-2": "api.tunneling.iot-fips.ap-southeast-2.amazonaws.com", "ca-central-1": "api.tunneling.iot-fips.ca-central-1.amazonaws.com", - "eu-central-1": "api.tunneling.iot-fips.eu-central-1.amazonaws.com", - "eu-north-1": "api.tunneling.iot-fips.eu-north-1.amazonaws.com", - "eu-west-1": "api.tunneling.iot-fips.eu-west-1.amazonaws.com", - "eu-west-2": "api.tunneling.iot-fips.eu-west-2.amazonaws.com", - "eu-west-3": "api.tunneling.iot-fips.eu-west-3.amazonaws.com", - "me-central-1": "api.tunneling.iot-fips.me-central-1.amazonaws.com", - "me-south-1": "api.tunneling.iot-fips.me-south-1.amazonaws.com", - "sa-east-1": "api.tunneling.iot-fips.sa-east-1.amazonaws.com", "us-east-1": "api.tunneling.iot-fips.us-east-1.amazonaws.com", "us-east-2": "api.tunneling.iot-fips.us-east-2.amazonaws.com", "us-gov-east-1": "api.tunneling.iot-fips.us-gov-east-1.amazonaws.com", diff --git a/Sources/Soto/Services/MWAA/MWAA_api.swift b/Sources/Soto/Services/MWAA/MWAA_api.swift index 5bf9007015..5e889f5649 100644 --- a/Sources/Soto/Services/MWAA/MWAA_api.swift +++ b/Sources/Soto/Services/MWAA/MWAA_api.swift @@ -127,7 +127,7 @@ public struct MWAA: AWSService { /// /// Parameters: /// - airflowConfigurationOptions: A list of key-value pairs containing the Apache Airflow configuration options you want to attach to your environment. For more information, see Apache Airflow configuration options. - /// - airflowVersion: The Apache Airflow version for your environment. If no value is specified, it defaults to the latest version. For more information, see Apache Airflow versions on Amazon Managed Workflows for Apache Airflow (Amazon MWAA). Valid values: 1.10.12, 2.0.2, 2.2.2, 2.4.3, 2.5.1, 2.6.3, 2.7.2, 2.8.1, 2.9.2, and 2.10.1. + /// - airflowVersion: The Apache Airflow version for your environment. If no value is specified, it defaults to the latest version. For more information, see Apache Airflow versions on Amazon Managed Workflows for Apache Airflow (Amazon MWAA). Valid values: 1.10.12, 2.0.2, 2.2.2, 2.4.3, 2.5.1, 2.6.3, 2.7.2, 2.8.1, 2.9.2, 2.10.1, and 2.10.3. /// - dagS3Path: The relative path to the DAGs folder on your Amazon S3 bucket. For example, dags. For more information, see Adding or updating DAGs. /// - endpointManagement: Defines whether the VPC endpoints configured for the environment are created, and managed, by the customer or by Amazon MWAA. If set to SERVICE, Amazon MWAA will create and manage the required VPC endpoints in your VPC. If set to CUSTOMER, you must create, and manage, the VPC endpoints for your VPC. If you choose to create an environment in a shared VPC, you must set this value to CUSTOMER. In a shared VPC deployment, the environment will remain in PENDING status until you create the VPC endpoints. If you do not take action to create the endpoints within 72 hours, the status will change to CREATE_FAILED. You can delete the failed environment and create a new one. /// - environmentClass: The environment class type. Valid values: mw1.micro, mw1.small, mw1.medium, mw1.large, mw1.xlarge, and mw1.2xlarge. For more information, see Amazon MWAA environment class. @@ -525,7 +525,7 @@ public struct MWAA: AWSService { /// /// Parameters: /// - airflowConfigurationOptions: A list of key-value pairs containing the Apache Airflow configuration options you want to attach to your environment. For more information, see Apache Airflow configuration options. - /// - airflowVersion: The Apache Airflow version for your environment. To upgrade your environment, specify a newer version of Apache Airflow supported by Amazon MWAA. Before you upgrade an environment, make sure your requirements, DAGs, plugins, and other resources used in your workflows are compatible with the new Apache Airflow version. For more information about updating your resources, see Upgrading an Amazon MWAA environment. Valid values: 1.10.12, 2.0.2, 2.2.2, 2.4.3, 2.5.1, 2.6.3, 2.7.2, 2.8.1, 2.9.2, and 2.10.1. + /// - airflowVersion: The Apache Airflow version for your environment. To upgrade your environment, specify a newer version of Apache Airflow supported by Amazon MWAA. Before you upgrade an environment, make sure your requirements, DAGs, plugins, and other resources used in your workflows are compatible with the new Apache Airflow version. For more information about updating your resources, see Upgrading an Amazon MWAA environment. Valid values: 1.10.12, 2.0.2, 2.2.2, 2.4.3, 2.5.1, 2.6.3, 2.7.2, 2.8.1, 2.9.2, 2.10.1, and 2.10.3. /// - dagS3Path: The relative path to the DAGs folder on your Amazon S3 bucket. For example, dags. For more information, see Adding or updating DAGs. /// - environmentClass: The environment class type. Valid values: mw1.micro, mw1.small, mw1.medium, mw1.large, mw1.xlarge, and mw1.2xlarge. For more information, see Amazon MWAA environment class. /// - executionRoleArn: The Amazon Resource Name (ARN) of the execution role in IAM that allows MWAA to access Amazon Web Services resources in your environment. For example, arn:aws:iam::123456789:role/my-execution-role. For more information, see Amazon MWAA Execution role. diff --git a/Sources/Soto/Services/MWAA/MWAA_shapes.swift b/Sources/Soto/Services/MWAA/MWAA_shapes.swift index 6fa180b104..0dfc1fdb26 100644 --- a/Sources/Soto/Services/MWAA/MWAA_shapes.swift +++ b/Sources/Soto/Services/MWAA/MWAA_shapes.swift @@ -157,7 +157,7 @@ extension MWAA { public struct CreateEnvironmentInput: AWSEncodableShape { /// A list of key-value pairs containing the Apache Airflow configuration options you want to attach to your environment. For more information, see Apache Airflow configuration options. public let airflowConfigurationOptions: [String: String]? - /// The Apache Airflow version for your environment. If no value is specified, it defaults to the latest version. For more information, see Apache Airflow versions on Amazon Managed Workflows for Apache Airflow (Amazon MWAA). Valid values: 1.10.12, 2.0.2, 2.2.2, 2.4.3, 2.5.1, 2.6.3, 2.7.2, 2.8.1, 2.9.2, and 2.10.1. + /// The Apache Airflow version for your environment. If no value is specified, it defaults to the latest version. For more information, see Apache Airflow versions on Amazon Managed Workflows for Apache Airflow (Amazon MWAA). Valid values: 1.10.12, 2.0.2, 2.2.2, 2.4.3, 2.5.1, 2.6.3, 2.7.2, 2.8.1, 2.9.2, 2.10.1, and 2.10.3. public let airflowVersion: String? /// The relative path to the DAGs folder on your Amazon S3 bucket. For example, dags. For more information, see Adding or updating DAGs. public let dagS3Path: String @@ -471,7 +471,7 @@ extension MWAA { public struct Environment: AWSDecodableShape { /// A list of key-value pairs containing the Apache Airflow configuration options attached to your environment. For more information, see Apache Airflow configuration options. public let airflowConfigurationOptions: [String: String]? - /// The Apache Airflow version on your environment. Valid values: 1.10.12, 2.0.2, 2.2.2, 2.4.3, 2.5.1, 2.6.3, 2.7.2, 2.8.1, 2.9.2, and 2.10.1. + /// The Apache Airflow version on your environment. Valid values: 1.10.12, 2.0.2, 2.2.2, 2.4.3, 2.5.1, 2.6.3, 2.7.2, 2.8.1, 2.9.2, 2.10.1, and 2.10.3. public let airflowVersion: String? /// The Amazon Resource Name (ARN) of the Amazon MWAA environment. public let arn: String? @@ -1139,7 +1139,7 @@ extension MWAA { public struct UpdateEnvironmentInput: AWSEncodableShape { /// A list of key-value pairs containing the Apache Airflow configuration options you want to attach to your environment. For more information, see Apache Airflow configuration options. public let airflowConfigurationOptions: [String: String]? - /// The Apache Airflow version for your environment. To upgrade your environment, specify a newer version of Apache Airflow supported by Amazon MWAA. Before you upgrade an environment, make sure your requirements, DAGs, plugins, and other resources used in your workflows are compatible with the new Apache Airflow version. For more information about updating your resources, see Upgrading an Amazon MWAA environment. Valid values: 1.10.12, 2.0.2, 2.2.2, 2.4.3, 2.5.1, 2.6.3, 2.7.2, 2.8.1, 2.9.2, and 2.10.1. + /// The Apache Airflow version for your environment. To upgrade your environment, specify a newer version of Apache Airflow supported by Amazon MWAA. Before you upgrade an environment, make sure your requirements, DAGs, plugins, and other resources used in your workflows are compatible with the new Apache Airflow version. For more information about updating your resources, see Upgrading an Amazon MWAA environment. Valid values: 1.10.12, 2.0.2, 2.2.2, 2.4.3, 2.5.1, 2.6.3, 2.7.2, 2.8.1, 2.9.2, 2.10.1, and 2.10.3. public let airflowVersion: String? /// The relative path to the DAGs folder on your Amazon S3 bucket. For example, dags. For more information, see Adding or updating DAGs. public let dagS3Path: String? diff --git a/Sources/Soto/Services/Macie2/Macie2_api.swift b/Sources/Soto/Services/Macie2/Macie2_api.swift index 95dba9c310..f37524d56e 100644 --- a/Sources/Soto/Services/Macie2/Macie2_api.swift +++ b/Sources/Soto/Services/Macie2/Macie2_api.swift @@ -79,6 +79,36 @@ public struct Macie2: AWSService { /// FIPS and dualstack endpoints static var variantEndpoints: [EndpointVariantType: AWSServiceConfig.EndpointVariant] {[ + [.dualstack]: .init(endpoints: [ + "af-south-1": "macie2.af-south-1.api.aws", + "ap-east-1": "macie2.ap-east-1.api.aws", + "ap-northeast-1": "macie2.ap-northeast-1.api.aws", + "ap-northeast-2": "macie2.ap-northeast-2.api.aws", + "ap-northeast-3": "macie2.ap-northeast-3.api.aws", + "ap-south-1": "macie2.ap-south-1.api.aws", + "ap-southeast-1": "macie2.ap-southeast-1.api.aws", + "ap-southeast-2": "macie2.ap-southeast-2.api.aws", + "ca-central-1": "macie2.ca-central-1.api.aws", + "eu-central-1": "macie2.eu-central-1.api.aws", + "eu-north-1": "macie2.eu-north-1.api.aws", + "eu-south-1": "macie2.eu-south-1.api.aws", + "eu-west-1": "macie2.eu-west-1.api.aws", + "eu-west-2": "macie2.eu-west-2.api.aws", + "eu-west-3": "macie2.eu-west-3.api.aws", + "il-central-1": "macie2.il-central-1.api.aws", + "me-south-1": "macie2.me-south-1.api.aws", + "sa-east-1": "macie2.sa-east-1.api.aws", + "us-east-1": "macie2.us-east-1.api.aws", + "us-east-2": "macie2.us-east-2.api.aws", + "us-west-1": "macie2.us-west-1.api.aws", + "us-west-2": "macie2.us-west-2.api.aws" + ]), + [.dualstack, .fips]: .init(endpoints: [ + "us-east-1": "macie2-fips.us-east-1.api.aws", + "us-east-2": "macie2-fips.us-east-2.api.aws", + "us-west-1": "macie2-fips.us-west-1.api.aws", + "us-west-2": "macie2-fips.us-west-2.api.aws" + ]), [.fips]: .init(endpoints: [ "us-east-1": "macie2-fips.us-east-1.amazonaws.com", "us-east-2": "macie2-fips.us-east-2.amazonaws.com", @@ -1732,7 +1762,7 @@ public struct Macie2: AWSService { return try await self.listClassificationScopes(input, logger: logger) } - /// Retrieves a subset of information about all the custom data identifiers for an account. + /// Retrieves a subset of information about the custom data identifiers for an account. @Sendable @inlinable public func listCustomDataIdentifiers(_ input: ListCustomDataIdentifiersRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListCustomDataIdentifiersResponse { @@ -1745,7 +1775,7 @@ public struct Macie2: AWSService { logger: logger ) } - /// Retrieves a subset of information about all the custom data identifiers for an account. + /// Retrieves a subset of information about the custom data identifiers for an account. /// /// Parameters: /// - maxResults: The maximum number of items to include in each page of the response. @@ -2151,7 +2181,7 @@ public struct Macie2: AWSService { return try await self.putFindingsPublicationConfiguration(input, logger: logger) } - /// Retrieves (queries) statistical data and other information about Amazon Web Services resources that Amazon Macie monitors and analyzes. + /// Retrieves (queries) statistical data and other information about Amazon Web Services resources that Amazon Macie monitors and analyzes for an account. @Sendable @inlinable public func searchResources(_ input: SearchResourcesRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> SearchResourcesResponse { @@ -2164,7 +2194,7 @@ public struct Macie2: AWSService { logger: logger ) } - /// Retrieves (queries) statistical data and other information about Amazon Web Services resources that Amazon Macie monitors and analyzes. + /// Retrieves (queries) statistical data and other information about Amazon Web Services resources that Amazon Macie monitors and analyzes for an account. /// /// Parameters: /// - bucketCriteria: The filter conditions that determine which S3 buckets to include or exclude from the query results. @@ -2617,7 +2647,7 @@ public struct Macie2: AWSService { /// /// Parameters: /// - resourceArn: The Amazon Resource Name (ARN) of the S3 bucket that the request applies to. - /// - suppressDataIdentifiers: An array of objects, one for each custom data identifier or managed data identifier that detected the type of sensitive data to start excluding or including in the bucket's score. To start including all sensitive data types in the score, don't specify any values for this array. + /// - suppressDataIdentifiers: An array of objects, one for each custom data identifier or managed data identifier that detected a type of sensitive data to exclude from the bucket's score. To include all sensitive data types in the score, don't specify any values for this array. /// - logger: Logger use during operation @inlinable public func updateResourceProfileDetections( diff --git a/Sources/Soto/Services/Macie2/Macie2_shapes.swift b/Sources/Soto/Services/Macie2/Macie2_shapes.swift index 9b345c01fa..d8d07deec2 100644 --- a/Sources/Soto/Services/Macie2/Macie2_shapes.swift +++ b/Sources/Soto/Services/Macie2/Macie2_shapes.swift @@ -90,6 +90,7 @@ extension Macie2 { public enum BucketMetadataErrorCode: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case accessDenied = "ACCESS_DENIED" + case bucketCountExceedsQuota = "BUCKET_COUNT_EXCEEDS_QUOTA" public var description: String { return self.rawValue } } @@ -665,7 +666,7 @@ extension Macie2 { public struct ApiCallDetails: AWSDecodableShape { /// The name of the operation that was invoked most recently and produced the finding. public let api: String? - /// The URL of the Amazon Web Service that provides the operation, for example: s3.amazonaws.com. + /// The URL of the Amazon Web Services service that provides the operation, for example: s3.amazonaws.com. public let apiServiceName: String? /// The first date and time, in UTC and extended ISO 8601 format, when any operation was invoked and produced the finding. @OptionalCustomCoding @@ -934,7 +935,7 @@ extension Macie2 { public let publiclyReadable: Int64? /// The total number of buckets that allow the general public to have write access to the bucket. public let publiclyWritable: Int64? - /// The total number of buckets that Amazon Macie wasn't able to evaluate permissions settings for. Macie can't determine whether these buckets are publicly accessible. + /// The total number of buckets that Amazon Macie wasn't able to evaluate permissions settings for. For example, the buckets' policies or a quota prevented Macie from retrieving the requisite data. Macie can't determine whether the buckets are publicly accessible. public let unknown: Int64? @inlinable @@ -960,7 +961,7 @@ extension Macie2 { public let s3Managed: Int64? /// The total number of buckets that don't specify default server-side encryption behavior for new objects. Default encryption settings aren't configured for these buckets. public let unencrypted: Int64? - /// The total number of buckets that Amazon Macie doesn't have current encryption metadata for. Macie can't provide current data about the default encryption settings for these buckets. + /// The total number of buckets that Amazon Macie doesn't have current encryption metadata for. For example, the buckets' permissions settings or a quota prevented Macie from retrieving the default encryption settings for the buckets. public let unknown: Int64? @inlinable @@ -986,7 +987,7 @@ extension Macie2 { public let `internal`: Int64? /// The total number of buckets that aren't shared with other Amazon Web Services accounts, Amazon CloudFront OAIs, or CloudFront OACs. public let notShared: Int64? - /// The total number of buckets that Amazon Macie wasn't able to evaluate shared access settings for. Macie can't determine whether these buckets are shared with other Amazon Web Services accounts, Amazon CloudFront OAIs, or CloudFront OACs. + /// The total number of buckets that Amazon Macie wasn't able to evaluate shared access settings for. For example, the buckets' permissions settings or a quota prevented Macie from retrieving the requisite data. Macie can't determine whether the buckets are shared with other Amazon Web Services accounts, Amazon CloudFront OAIs, or CloudFront OACs. public let unknown: Int64? @inlinable @@ -1010,7 +1011,7 @@ extension Macie2 { public let allowsUnencryptedObjectUploads: Int64? /// The total number of buckets whose bucket policies require server-side encryption of new objects. PutObject requests for these buckets must include a valid server-side encryption header: the x-amz-server-side-encryption header with a value of AES256 or aws:kms, or the x-amz-server-side-encryption-customer-algorithm header with a value of AES256. public let deniesUnencryptedObjectUploads: Int64? - /// The total number of buckets that Amazon Macie wasn't able to evaluate server-side encryption requirements for. Macie can't determine whether the bucket policies for these buckets require server-side encryption of new objects. + /// The total number of buckets that Amazon Macie wasn't able to evaluate server-side encryption requirements for. For example, the buckets' permissions settings or a quota prevented Macie from retrieving the requisite data. Macie can't determine whether bucket policies for the buckets require server-side encryption of new objects. public let unknown: Int64? @inlinable @@ -1105,13 +1106,13 @@ extension Macie2 { public let classifiableObjectCount: Int64? /// The total storage size, in bytes, of the objects that Amazon Macie can analyze in the bucket. These objects use a supported storage class and have a file name extension for a supported file or storage format. If versioning is enabled for the bucket, Macie calculates this value based on the size of the latest version of each applicable object in the bucket. This value doesn't reflect the storage size of all versions of each applicable object in the bucket. public let classifiableSizeInBytes: Int64? - /// The error code for an error that prevented Amazon Macie from retrieving and processing information about the bucket and the bucket's objects. If this value is ACCESS_DENIED, Macie doesn't have permission to retrieve the information. For example, the bucket has a restrictive bucket policy and Amazon S3 denied the request. If this value is null, Macie was able to retrieve and process the information. + /// The code for an error or issue that prevented Amazon Macie from retrieving and processing information about the bucket and the bucket's objects. Possible values are: ACCESS_DENIED - Macie doesn't have permission to retrieve the information. For example, the bucket has a restrictive bucket policy and Amazon S3 denied the request. BUCKET_COUNT_EXCEEDS_QUOTA - Retrieving and processing the information would exceed the quota for the number of buckets that Macie monitors for an account (10,000). If this value is null, Macie was able to retrieve and process the information. public let errorCode: BucketMetadataErrorCode? - /// A brief description of the error (errorCode) that prevented Amazon Macie from retrieving and processing information about the bucket and the bucket's objects. This value is null if Macie was able to retrieve and process the information. + /// A brief description of the error or issue (errorCode) that prevented Amazon Macie from retrieving and processing information about the bucket and the bucket's objects. This value is null if Macie was able to retrieve and process the information. public let errorMessage: String? /// Specifies whether any one-time or recurring classification jobs are configured to analyze objects in the bucket, and, if so, the details of the job that ran most recently. public let jobDetails: JobDetails? - /// The date and time, in UTC and extended ISO 8601 format, when Amazon Macie most recently analyzed objects in the bucket while performing automated sensitive data discovery. This value is null if automated sensitive data discovery is disabled for your account. + /// The date and time, in UTC and extended ISO 8601 format, when Amazon Macie most recently analyzed objects in the bucket while performing automated sensitive data discovery. This value is null if this analysis hasn't occurred. @OptionalCustomCoding public var lastAutomatedDiscoveryTime: Date? /// The date and time, in UTC and extended ISO 8601 format, when Amazon Macie most recently retrieved bucket or object metadata from Amazon S3 for the bucket. @@ -1127,7 +1128,7 @@ extension Macie2 { public let region: String? /// Specifies whether the bucket is configured to replicate one or more objects to buckets for other Amazon Web Services accounts and, if so, which accounts. public let replicationDetails: ReplicationDetails? - /// The sensitivity score for the bucket, ranging from -1 (classification error) to 100 (sensitive).If automated sensitive data discovery has never been enabled for your account or it’s been disabled for your organization or your standalone account for more than 30 days, possible values are: 1, the bucket is empty; or, 50, the bucket stores objects but it’s been excluded from recent analyses. + /// The sensitivity score for the bucket, ranging from -1 (classification error) to 100 (sensitive).If automated sensitive data discovery has never been enabled for your account or it's been disabled for your organization or standalone account for more than 30 days, possible values are: 1, the bucket is empty; or, 50, the bucket stores objects but it's been excluded from recent analyses. public let sensitivityScore: Int? /// The default server-side encryption settings for the bucket. public let serverSideEncryption: BucketServerSideEncryption? @@ -2326,7 +2327,7 @@ extension Macie2 { public let id: String? /// The name of the custom data identifier or managed data identifier that detected the sensitive data. For a managed data identifier, this value is the same as the unique identifier (id). public let name: String? - /// Specifies whether occurrences of this type of sensitive data are excluded (true) or included (false) in the bucket's sensitivity score. + /// Specifies whether occurrences of this type of sensitive data are excluded (true) or included (false) in the bucket's sensitivity score, if the score is calculated by Amazon Macie. public let suppressed: Bool? /// The type of data identifier that detected the sensitive data. Possible values are: CUSTOM, for a custom data identifier; and, MANAGED, for a managed data identifier. public let type: DataIdentifierType? @@ -2851,7 +2852,7 @@ extension Macie2 { public let bucketCountByObjectEncryptionRequirement: BucketCountPolicyAllowsUnencryptedObjectUploads? /// The total number of buckets that are or aren't shared with other Amazon Web Services accounts, Amazon CloudFront origin access identities (OAIs), or CloudFront origin access controls (OACs). public let bucketCountBySharedAccessType: BucketCountBySharedAccessType? - /// The aggregated sensitive data discovery statistics for the buckets. If automated sensitive data discovery is currently disabled for your account, the value for each statistic is 0. + /// The aggregated sensitive data discovery statistics for the buckets. If automated sensitive data discovery is currently disabled for your account, the value for most statistics is 0. public let bucketStatisticsBySensitivity: BucketStatisticsBySensitivity? /// The total number of objects that Amazon Macie can analyze in the buckets. These objects use a supported storage class and have a file name extension for a supported file or storage format. public let classifiableObjectCount: Int64? @@ -3418,7 +3419,7 @@ extension Macie2 { public struct GetSensitiveDataOccurrencesAvailabilityResponse: AWSDecodableShape { /// Specifies whether occurrences of sensitive data can be retrieved for the finding. Possible values are: AVAILABLE, the sensitive data can be retrieved; and, UNAVAILABLE, the sensitive data can't be retrieved. If this value is UNAVAILABLE, the reasons array indicates why the data can't be retrieved. public let code: AvailabilityCode? - /// Specifies why occurrences of sensitive data can't be retrieved for the finding. Possible values are: ACCOUNT_NOT_IN_ORGANIZATION - The affected account isn't currently part of your organization. Or the account is part of your organization but Macie isn't currently enabled for the account. You're not allowed to access the affected S3 object by using Macie. INVALID_CLASSIFICATION_RESULT - There isn't a corresponding sensitive data discovery result for the finding. Or the corresponding sensitive data discovery result isn't available in the current Amazon Web Services Region, is malformed or corrupted, or uses an unsupported storage format. Macie can't verify the location of the sensitive data to retrieve. INVALID_RESULT_SIGNATURE - The corresponding sensitive data discovery result is stored in an S3 object that wasn't signed by Macie. Macie can't verify the integrity and authenticity of the sensitive data discovery result. Therefore, Macie can't verify the location of the sensitive data to retrieve. MEMBER_ROLE_TOO_PERMISSIVE - The trust or permissions policy for the IAM role in the affected member account doesn't meet Macie requirements for restricting access to the role. Or the role's trust policy doesn't specify the correct external ID for your organization. Macie can't assume the role to retrieve the sensitive data. MISSING_GET_MEMBER_PERMISSION - You're not allowed to retrieve information about the association between your account and the affected account. Macie can't determine whether you’re allowed to access the affected S3 object as the delegated Macie administrator for the affected account. OBJECT_EXCEEDS_SIZE_QUOTA - The storage size of the affected S3 object exceeds the size quota for retrieving occurrences of sensitive data from this type of file. OBJECT_UNAVAILABLE - The affected S3 object isn't available. The object was renamed, moved, deleted, or changed after Macie created the finding. Or the object is encrypted with an KMS key that's currently disabled. RESULT_NOT_SIGNED - The corresponding sensitive data discovery result is stored in an S3 object that hasn't been signed. Macie can't verify the integrity and authenticity of the sensitive data discovery result. Therefore, Macie can't verify the location of the sensitive data to retrieve. ROLE_TOO_PERMISSIVE - Your account is configured to retrieve occurrences of sensitive data by using an IAM role whose trust or permissions policy doesn't meet Macie requirements for restricting access to the role. Macie can’t assume the role to retrieve the sensitive data. UNSUPPORTED_FINDING_TYPE - The specified finding isn't a sensitive data finding. UNSUPPORTED_OBJECT_TYPE - The affected S3 object uses a file or storage format that Macie doesn't support for retrieving occurrences of sensitive data. This value is null if sensitive data can be retrieved for the finding. + /// Specifies why occurrences of sensitive data can't be retrieved for the finding. Possible values are: ACCOUNT_NOT_IN_ORGANIZATION - The affected account isn't currently part of your organization. Or the account is part of your organization but Macie isn't currently enabled for the account. You're not allowed to access the affected S3 object by using Macie. INVALID_CLASSIFICATION_RESULT - There isn't a corresponding sensitive data discovery result for the finding. Or the corresponding sensitive data discovery result isn't available in the current Amazon Web Services Region, is malformed or corrupted, or uses an unsupported storage format. Macie can't verify the location of the sensitive data to retrieve. INVALID_RESULT_SIGNATURE - The corresponding sensitive data discovery result is stored in an S3 object that wasn't signed by Macie. Macie can't verify the integrity and authenticity of the sensitive data discovery result. Therefore, Macie can't verify the location of the sensitive data to retrieve. MEMBER_ROLE_TOO_PERMISSIVE - The trust or permissions policy for the IAM role in the affected member account doesn't meet Macie requirements for restricting access to the role. Or the role's trust policy doesn't specify the correct external ID for your organization. Macie can't assume the role to retrieve the sensitive data. MISSING_GET_MEMBER_PERMISSION - You're not allowed to retrieve information about the association between your account and the affected account. Macie can't determine whether you’re allowed to access the affected S3 object as the delegated Macie administrator for the affected account. OBJECT_EXCEEDS_SIZE_QUOTA - The storage size of the affected S3 object exceeds the size quota for retrieving occurrences of sensitive data from this type of file. OBJECT_UNAVAILABLE - The affected S3 object isn't available. The object was renamed, moved, deleted, or changed after Macie created the finding. Or the object is encrypted with an KMS key that isn’t available. For example, the key is disabled, is scheduled for deletion, or was deleted. RESULT_NOT_SIGNED - The corresponding sensitive data discovery result is stored in an S3 object that hasn't been signed. Macie can't verify the integrity and authenticity of the sensitive data discovery result. Therefore, Macie can't verify the location of the sensitive data to retrieve. ROLE_TOO_PERMISSIVE - Your account is configured to retrieve occurrences of sensitive data by using an IAM role whose trust or permissions policy doesn't meet Macie requirements for restricting access to the role. Macie can’t assume the role to retrieve the sensitive data. UNSUPPORTED_FINDING_TYPE - The specified finding isn't a sensitive data finding. UNSUPPORTED_OBJECT_TYPE - The affected S3 object uses a file or storage format that Macie doesn't support for retrieving occurrences of sensitive data. This value is null if sensitive data can be retrieved for the finding. public let reasons: [UnavailabilityReasonCode]? @inlinable @@ -4673,20 +4674,20 @@ extension Macie2 { public let classifiableObjectCount: Int64? /// The total storage size, in bytes, of the objects that Amazon Macie can analyze in the bucket. These objects use a supported storage class and have a file name extension for a supported file or storage format. If versioning is enabled for the bucket, Macie calculates this value based on the size of the latest version of each applicable object in the bucket. This value doesn't reflect the storage size of all versions of each applicable object in the bucket. public let classifiableSizeInBytes: Int64? - /// The error code for an error that prevented Amazon Macie from retrieving and processing information about the bucket and the bucket's objects. If this value is ACCESS_DENIED, Macie doesn't have permission to retrieve the information. For example, the bucket has a restrictive bucket policy and Amazon S3 denied the request. If this value is null, Macie was able to retrieve and process the information. + /// The code for an error or issue that prevented Amazon Macie from retrieving and processing information about the bucket and the bucket's objects. Possible values are: ACCESS_DENIED - Macie doesn't have permission to retrieve the information. For example, the bucket has a restrictive bucket policy and Amazon S3 denied the request. BUCKET_COUNT_EXCEEDS_QUOTA - Retrieving and processing the information would exceed the quota for the number of buckets that Macie monitors for an account (10,000). If this value is null, Macie was able to retrieve and process the information. public let errorCode: BucketMetadataErrorCode? - /// A brief description of the error (errorCode) that prevented Amazon Macie from retrieving and processing information about the bucket and the bucket's objects. This value is null if Macie was able to retrieve and process the information. + /// A brief description of the error or issue (errorCode) that prevented Amazon Macie from retrieving and processing information about the bucket and the bucket's objects. This value is null if Macie was able to retrieve and process the information. public let errorMessage: String? /// Specifies whether any one-time or recurring classification jobs are configured to analyze objects in the bucket, and, if so, the details of the job that ran most recently. public let jobDetails: JobDetails? - /// The date and time, in UTC and extended ISO 8601 format, when Amazon Macie most recently analyzed objects in the bucket while performing automated sensitive data discovery. This value is null if automated sensitive data discovery is disabled for your account. + /// The date and time, in UTC and extended ISO 8601 format, when Amazon Macie most recently analyzed objects in the bucket while performing automated sensitive data discovery. This value is null if this analysis hasn't occurred. @OptionalCustomCoding public var lastAutomatedDiscoveryTime: Date? /// The total number of objects in the bucket. public let objectCount: Int64? /// The total number of objects in the bucket, grouped by server-side encryption type. This includes a grouping that reports the total number of objects that aren't encrypted or use client-side encryption. public let objectCountByEncryptionType: ObjectCountByEncryptionType? - /// The sensitivity score for the bucket, ranging from -1 (classification error) to 100 (sensitive).If automated sensitive data discovery has never been enabled for your account or it’s been disabled for your organization or your standalone account for more than 30 days, possible values are: 1, the bucket is empty; or, 50, the bucket stores objects but it’s been excluded from recent analyses. + /// The sensitivity score for the bucket, ranging from -1 (classification error) to 100 (sensitive).If automated sensitive data discovery has never been enabled for your account or it's been disabled for your organization or standalone account for more than 30 days, possible values are: 1, the bucket is empty; or, 50, the bucket stores objects but it's been excluded from recent analyses. public let sensitivityScore: Int? /// The total storage size, in bytes, of the bucket. If versioning is enabled for the bucket, Amazon Macie calculates this value based on the size of the latest version of each object in the bucket. This value doesn't reflect the storage size of all versions of each object in the bucket. public let sizeInBytes: Int64? @@ -4738,7 +4739,7 @@ extension Macie2 { } public struct MatchingResource: AWSDecodableShape { - /// The details of an S3 bucket that Amazon Macie monitors and analyzes. + /// The details of an S3 bucket that Amazon Macie monitors and analyzes for your account. public let matchingBucket: MatchingBucket? @inlinable @@ -5304,7 +5305,7 @@ extension Macie2 { } public struct S3ClassificationScopeExclusionUpdate: AWSEncodableShape { - /// Depending on the value specified for the update operation (ClassificationScopeUpdateOperation), an array of strings that: lists the names of buckets to add or remove from the list, or specifies a new set of bucket names that overwrites all existing names in the list. Each string must be the full name of an S3 bucket. Values are case sensitive. + /// Depending on the value specified for the update operation (ClassificationScopeUpdateOperation), an array of strings that: lists the names of buckets to add or remove from the list, or specifies a new set of bucket names that overwrites all existing names in the list. Each string must be the full name of an existing S3 bucket. Values are case sensitive. public let bucketNames: [String]? /// Specifies how to apply the changes to the exclusion list. Valid values are: ADD - Append the specified bucket names to the current list. REMOVE - Remove the specified bucket names from the current list. REPLACE - Overwrite the current list with the specified list of bucket names. If you specify this value, Amazon Macie removes all existing names from the list and adds all the specified names to the list. public let operation: ClassificationScopeUpdateOperation? @@ -6007,7 +6008,7 @@ extension Macie2 { } public struct SuppressDataIdentifier: AWSEncodableShape { - /// The unique identifier for the custom data identifier or managed data identifier that detected the type of sensitive data to exclude or include in the score. + /// The unique identifier for the custom data identifier or managed data identifier that detected the type of sensitive data to exclude from the score. public let id: String? /// The type of data identifier that detected the sensitive data. Possible values are: CUSTOM, for a custom data identifier; and, MANAGED, for a managed data identifier. public let type: DataIdentifierType? @@ -6506,7 +6507,7 @@ extension Macie2 { public struct UpdateResourceProfileDetectionsRequest: AWSEncodableShape { /// The Amazon Resource Name (ARN) of the S3 bucket that the request applies to. public let resourceArn: String? - /// An array of objects, one for each custom data identifier or managed data identifier that detected the type of sensitive data to start excluding or including in the bucket's score. To start including all sensitive data types in the score, don't specify any values for this array. + /// An array of objects, one for each custom data identifier or managed data identifier that detected a type of sensitive data to exclude from the bucket's score. To include all sensitive data types in the score, don't specify any values for this array. public let suppressDataIdentifiers: [SuppressDataIdentifier]? @inlinable @@ -6783,7 +6784,7 @@ extension Macie2 { public let assumedRole: AssumedRole? /// If the action was performed using the credentials for another Amazon Web Services account, the details of that account. public let awsAccount: AwsAccount? - /// If the action was performed by an Amazon Web Services account that belongs to an Amazon Web Service, the name of the service. + /// If the action was performed by an Amazon Web Services account that belongs to an Amazon Web Services service, the name of the service. public let awsService: AwsService? /// If the action was performed with temporary security credentials that were obtained using the GetFederationToken operation of the Security Token Service (STS) API, the identifiers, session context, and other details about the identity. public let federatedUser: FederatedUser? diff --git a/Sources/Soto/Services/MediaConvert/MediaConvert_api.swift b/Sources/Soto/Services/MediaConvert/MediaConvert_api.swift index 16e841b6a5..261775dee7 100644 --- a/Sources/Soto/Services/MediaConvert/MediaConvert_api.swift +++ b/Sources/Soto/Services/MediaConvert/MediaConvert_api.swift @@ -65,7 +65,6 @@ public struct MediaConvert: AWSService { serviceProtocol: .restjson, apiVersion: "2017-08-29", endpoint: endpoint, - serviceEndpoints: Self.serviceEndpoints, variantEndpoints: Self.variantEndpoints, errorType: MediaConvertErrorType.self, middleware: middleware, @@ -76,14 +75,42 @@ public struct MediaConvert: AWSService { } - /// custom endpoints for regions - static var serviceEndpoints: [String: String] {[ - "cn-northwest-1": "mediaconvert.cn-northwest-1.amazonaws.com.cn" - ]} /// FIPS and dualstack endpoints static var variantEndpoints: [EndpointVariantType: AWSServiceConfig.EndpointVariant] {[ + [.dualstack]: .init(endpoints: [ + "af-south-1": "mediaconvert.af-south-1.api.aws", + "ap-northeast-1": "mediaconvert.ap-northeast-1.api.aws", + "ap-northeast-2": "mediaconvert.ap-northeast-2.api.aws", + "ap-northeast-3": "mediaconvert.ap-northeast-3.api.aws", + "ap-south-1": "mediaconvert.ap-south-1.api.aws", + "ap-southeast-1": "mediaconvert.ap-southeast-1.api.aws", + "ap-southeast-2": "mediaconvert.ap-southeast-2.api.aws", + "ap-southeast-4": "mediaconvert.ap-southeast-4.api.aws", + "ca-central-1": "mediaconvert.ca-central-1.api.aws", + "cn-northwest-1": "mediaconvert.cn-northwest-1.api.amazonwebservices.com.cn", + "eu-central-1": "mediaconvert.eu-central-1.api.aws", + "eu-north-1": "mediaconvert.eu-north-1.api.aws", + "eu-west-1": "mediaconvert.eu-west-1.api.aws", + "eu-west-2": "mediaconvert.eu-west-2.api.aws", + "eu-west-3": "mediaconvert.eu-west-3.api.aws", + "me-central-1": "mediaconvert.me-central-1.api.aws", + "sa-east-1": "mediaconvert.sa-east-1.api.aws", + "us-east-1": "mediaconvert.us-east-1.api.aws", + "us-east-2": "mediaconvert.us-east-2.api.aws", + "us-gov-west-1": "mediaconvert.us-gov-west-1.api.aws", + "us-west-1": "mediaconvert.us-west-1.api.aws", + "us-west-2": "mediaconvert.us-west-2.api.aws" + ]), + [.dualstack, .fips]: .init(endpoints: [ + "ca-central-1": "mediaconvert-fips.ca-central-1.api.aws", + "us-east-1": "mediaconvert-fips.us-east-1.api.aws", + "us-east-2": "mediaconvert-fips.us-east-2.api.aws", + "us-gov-west-1": "mediaconvert.us-gov-west-1.api.aws", + "us-west-1": "mediaconvert-fips.us-west-1.api.aws", + "us-west-2": "mediaconvert-fips.us-west-2.api.aws" + ]), [.fips]: .init(endpoints: [ "ca-central-1": "mediaconvert-fips.ca-central-1.amazonaws.com", "us-east-1": "mediaconvert-fips.us-east-1.amazonaws.com", diff --git a/Sources/Soto/Services/MediaConvert/MediaConvert_shapes.swift b/Sources/Soto/Services/MediaConvert/MediaConvert_shapes.swift index f8947f80e8..bd578e8b83 100644 --- a/Sources/Soto/Services/MediaConvert/MediaConvert_shapes.swift +++ b/Sources/Soto/Services/MediaConvert/MediaConvert_shapes.swift @@ -2689,6 +2689,12 @@ extension MediaConvert { public var description: String { return self.rawValue } } + public enum RemoveRubyReserveAttributes: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case disabled = "DISABLED" + case enabled = "ENABLED" + public var description: String { return self.rawValue } + } + public enum RenewalType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case autoRenew = "AUTO_RENEW" case expire = "EXPIRE" @@ -2832,6 +2838,12 @@ extension MediaConvert { public var description: String { return self.rawValue } } + public enum TimecodeTrack: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case disabled = "DISABLED" + case enabled = "ENABLED" + public var description: String { return self.rawValue } + } + public enum TimedMetadata: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case none = "NONE" case passthrough = "PASSTHROUGH" @@ -3595,6 +3607,7 @@ extension MediaConvert { public func validate(name: String) throws { try self.audioNormalizationSettings?.validate(name: "\(name).audioNormalizationSettings") + try self.validate(self.audioSourceName, name: "audioSourceName", parent: name, max: 2048) try self.validate(self.audioType, name: "audioType", parent: name, max: 255) try self.validate(self.audioType, name: "audioType", parent: name, min: 0) try self.codecSettings?.validate(name: "\(name).codecSettings") @@ -4104,6 +4117,8 @@ extension MediaConvert { public let outlineColor: BurninSubtitleOutlineColor? /// Specify the Outline size of the caption text, in pixels. Leave Outline size blank and set Style passthrough to enabled to use the outline size data from your input captions, if present. public let outlineSize: Int? + /// Optionally remove any tts:rubyReserve attributes present in your input, that do not have a tts:ruby attribute in the same element, from your output. Use if your vertical Japanese output captions have alignment issues. To remove ruby reserve attributes when present: Choose Enabled. To not remove any ruby reserve attributes: Keep the default value, Disabled. + public let removeRubyReserveAttributes: RemoveRubyReserveAttributes? /// Specify the color of the shadow cast by the captions. Leave Shadow color blank and set Style passthrough to enabled to use the shadow color data from your input captions, if present. public let shadowColor: BurninSubtitleShadowColor? /// Specify the opacity of the shadow. Enter a value from 0 to 255, where 0 is transparent and 255 is opaque. If Style passthrough is set to Enabled, leave Shadow opacity blank to pass through the shadow style information in your input captions to your output captions. If Style passthrough is set to disabled, leave blank to use a value of 0 and remove all shadows from your output captions. @@ -4122,7 +4137,7 @@ extension MediaConvert { public let yPosition: Int? @inlinable - public init(alignment: BurninSubtitleAlignment? = nil, applyFontColor: BurninSubtitleApplyFontColor? = nil, backgroundColor: BurninSubtitleBackgroundColor? = nil, backgroundOpacity: Int? = nil, fallbackFont: BurninSubtitleFallbackFont? = nil, fontColor: BurninSubtitleFontColor? = nil, fontFileBold: String? = nil, fontFileBoldItalic: String? = nil, fontFileItalic: String? = nil, fontFileRegular: String? = nil, fontOpacity: Int? = nil, fontResolution: Int? = nil, fontScript: FontScript? = nil, fontSize: Int? = nil, hexFontColor: String? = nil, outlineColor: BurninSubtitleOutlineColor? = nil, outlineSize: Int? = nil, shadowColor: BurninSubtitleShadowColor? = nil, shadowOpacity: Int? = nil, shadowXOffset: Int? = nil, shadowYOffset: Int? = nil, stylePassthrough: BurnInSubtitleStylePassthrough? = nil, teletextSpacing: BurninSubtitleTeletextSpacing? = nil, xPosition: Int? = nil, yPosition: Int? = nil) { + public init(alignment: BurninSubtitleAlignment? = nil, applyFontColor: BurninSubtitleApplyFontColor? = nil, backgroundColor: BurninSubtitleBackgroundColor? = nil, backgroundOpacity: Int? = nil, fallbackFont: BurninSubtitleFallbackFont? = nil, fontColor: BurninSubtitleFontColor? = nil, fontFileBold: String? = nil, fontFileBoldItalic: String? = nil, fontFileItalic: String? = nil, fontFileRegular: String? = nil, fontOpacity: Int? = nil, fontResolution: Int? = nil, fontScript: FontScript? = nil, fontSize: Int? = nil, hexFontColor: String? = nil, outlineColor: BurninSubtitleOutlineColor? = nil, outlineSize: Int? = nil, removeRubyReserveAttributes: RemoveRubyReserveAttributes? = nil, shadowColor: BurninSubtitleShadowColor? = nil, shadowOpacity: Int? = nil, shadowXOffset: Int? = nil, shadowYOffset: Int? = nil, stylePassthrough: BurnInSubtitleStylePassthrough? = nil, teletextSpacing: BurninSubtitleTeletextSpacing? = nil, xPosition: Int? = nil, yPosition: Int? = nil) { self.alignment = alignment self.applyFontColor = applyFontColor self.backgroundColor = backgroundColor @@ -4140,6 +4155,7 @@ extension MediaConvert { self.hexFontColor = hexFontColor self.outlineColor = outlineColor self.outlineSize = outlineSize + self.removeRubyReserveAttributes = removeRubyReserveAttributes self.shadowColor = shadowColor self.shadowOpacity = shadowOpacity self.shadowXOffset = shadowXOffset @@ -4197,6 +4213,7 @@ extension MediaConvert { case hexFontColor = "hexFontColor" case outlineColor = "outlineColor" case outlineSize = "outlineSize" + case removeRubyReserveAttributes = "removeRubyReserveAttributes" case shadowColor = "shadowColor" case shadowOpacity = "shadowOpacity" case shadowXOffset = "shadowXOffset" @@ -7842,6 +7859,7 @@ extension MediaConvert { try self.decryptionSettings?.validate(name: "\(name).decryptionSettings") try self.validate(self.dolbyVisionMetadataXml, name: "dolbyVisionMetadataXml", parent: name, min: 14) try self.validate(self.dolbyVisionMetadataXml, name: "dolbyVisionMetadataXml", parent: name, pattern: "^((s3://(.*?)\\.(xml|XML))|(https?://(.*?)\\.(xml|XML)(\\?([^&=]+=[^&]+&)*[^&=]+=[^&]+)?))$") + try self.validate(self.fileInput, name: "fileInput", parent: name, max: 2048) try self.validate(self.fileInput, name: "fileInput", parent: name, pattern: "^s3://([^\\/]+\\/+)+((([^\\/]*)))|^https?://[^\\/].*[^&]$") try self.validate(self.filterStrength, name: "filterStrength", parent: name, max: 5) try self.validate(self.filterStrength, name: "filterStrength", parent: name, min: 0) @@ -8380,7 +8398,7 @@ extension MediaConvert { public let esam: EsamSettings? /// If your source content has EIA-608 Line 21 Data Services, enable this feature to specify what MediaConvert does with the Extended Data Services (XDS) packets. You can choose to pass through XDS packets, or remove them from the output. For more information about XDS, see EIA-608 Line Data Services, section 9.5.1.5 05h Content Advisory. public let extendedDataServices: ExtendedDataServices? - /// Specify the input that MediaConvert references for your default output settings. MediaConvert uses this input's Resolution, Frame rate, and Pixel aspect ratio for all outputs that you don't manually specify different output settings for. Enabling this setting will disable "Follow source" for all other inputs. If MediaConvert cannot follow your source, for example if you specify an audio-only input, MediaConvert uses the first followable input instead. In your JSON job specification, enter an integer from 1 to 150 corresponding to the order of your inputs. + /// Specify the input that MediaConvert references for your default output settings. MediaConvert uses this input's Resolution, Frame rate, and Pixel aspect ratio for all outputs that you don't manually specify different output settings for. Enabling this setting will disable "Follow source" for all other inputs. If MediaConvert cannot follow your source, for example if you specify an audio-only input, MediaConvert uses the first followable input instead. In your JSON job specification, enter an integer from 1 to 150 corresponding to the order of your inputs. public let followSource: Int? /// Use Inputs to define source file used in the transcode job. There can be multiple inputs add in a job. These inputs will be concantenated together to create the output. public let inputs: [Input]? @@ -8534,7 +8552,7 @@ extension MediaConvert { public let esam: EsamSettings? /// If your source content has EIA-608 Line 21 Data Services, enable this feature to specify what MediaConvert does with the Extended Data Services (XDS) packets. You can choose to pass through XDS packets, or remove them from the output. For more information about XDS, see EIA-608 Line Data Services, section 9.5.1.5 05h Content Advisory. public let extendedDataServices: ExtendedDataServices? - /// Specify the input that MediaConvert references for your default output settings. MediaConvert uses this input's Resolution, Frame rate, and Pixel aspect ratio for all outputs that you don't manually specify different output settings for. Enabling this setting will disable "Follow source" for all other inputs. If MediaConvert cannot follow your source, for example if you specify an audio-only input, MediaConvert uses the first followable input instead. In your JSON job specification, enter an integer from 1 to 150 corresponding to the order of your inputs. + /// Specify the input that MediaConvert references for your default output settings. MediaConvert uses this input's Resolution, Frame rate, and Pixel aspect ratio for all outputs that you don't manually specify different output settings for. Enabling this setting will disable "Follow source" for all other inputs. If MediaConvert cannot follow your source, for example if you specify an audio-only input, MediaConvert uses the first followable input instead. In your JSON job specification, enter an integer from 1 to 150 corresponding to the order of your inputs. public let followSource: Int? /// Use Inputs to define the source file used in the transcode job. There can only be one input in a job template. Using the API, you can include multiple inputs when referencing a job template. public let inputs: [InputTemplate]? @@ -10362,6 +10380,7 @@ extension MediaConvert { try $0.validate(name: "\(name).captionDescriptions[]") } try self.containerSettings?.validate(name: "\(name).containerSettings") + try self.validate(self.`extension`, name: "`extension`", parent: name, max: 256) try self.validate(self.nameModifier, name: "nameModifier", parent: name, max: 256) try self.validate(self.nameModifier, name: "nameModifier", parent: name, min: 1) try self.videoDescription?.validate(name: "\(name).videoDescription") @@ -10445,6 +10464,7 @@ extension MediaConvert { public func validate(name: String) throws { try self.automatedEncodingSettings?.validate(name: "\(name).automatedEncodingSettings") + try self.validate(self.name, name: "name", parent: name, max: 2048) try self.outputGroupSettings?.validate(name: "\(name).outputGroupSettings") try self.outputs?.forEach { try $0.validate(name: "\(name).outputs[]") @@ -11909,7 +11929,7 @@ extension MediaConvert { public let colorMetadata: ColorMetadata? /// Use Cropping selection to specify the video area that the service will include in the output video frame. public let crop: Rectangle? - /// Applies only to 29.97 fps outputs. When this feature is enabled, the service will use drop-frame timecode on outputs. If it is not possible to use drop-frame timecode, the system will fall back to non-drop-frame. This setting is enabled by default when Timecode insertion is enabled. + /// Applies only to 29.97 fps outputs. When this feature is enabled, the service will use drop-frame timecode on outputs. If it is not possible to use drop-frame timecode, the system will fall back to non-drop-frame. This setting is enabled by default when Timecode insertion or Timecode track is enabled. public let dropFrameTimecode: DropFrameTimecode? /// Applies only if you set AFD Signaling to Fixed. Use Fixed to specify a four-bit AFD value which the service will write on all frames of this video output. public let fixedAfd: Int? @@ -11925,13 +11945,15 @@ extension MediaConvert { public let sharpness: Int? /// Applies only to H.264, H.265, MPEG2, and ProRes outputs. Only enable Timecode insertion when the input frame rate is identical to the output frame rate. To include timecodes in this output, set Timecode insertion to PIC_TIMING_SEI. To leave them out, set it to DISABLED. Default is DISABLED. When the service inserts timecodes in an output, by default, it uses any embedded timecodes from the input. If none are present, the service will set the timecode for the first output frame to zero. To change this default behavior, adjust the settings under Timecode configuration. In the console, these settings are located under Job > Job settings > Timecode configuration. Note - Timecode source under input settings does not affect the timecodes that are inserted in the output. Source under Job settings > Timecode configuration does. public let timecodeInsertion: VideoTimecodeInsertion? + /// To include a timecode track in your MP4 output: Choose Enabled. MediaConvert writes the timecode track in the Null Media Header box (NMHD), without any timecode text formatting information. You can also specify dropframe or non-dropframe timecode under the Drop Frame Timecode setting. To not include a timecode track: Keep the default value, Disabled. + public let timecodeTrack: TimecodeTrack? /// Find additional transcoding features under Preprocessors. Enable the features at each output individually. These features are disabled by default. public let videoPreprocessors: VideoPreprocessor? /// Use Width to define the video resolution width, in pixels, for this output. To use the same resolution as your input: Leave both Width and Height blank. To evenly scale from your input resolution: Leave Width blank and enter a value for Height. For example, if your input is 1920x1080 and you set Height to 720, your output will be 1280x720. public let width: Int? @inlinable - public init(afdSignaling: AfdSignaling? = nil, antiAlias: AntiAlias? = nil, codecSettings: VideoCodecSettings? = nil, colorMetadata: ColorMetadata? = nil, crop: Rectangle? = nil, dropFrameTimecode: DropFrameTimecode? = nil, fixedAfd: Int? = nil, height: Int? = nil, position: Rectangle? = nil, respondToAfd: RespondToAfd? = nil, scalingBehavior: ScalingBehavior? = nil, sharpness: Int? = nil, timecodeInsertion: VideoTimecodeInsertion? = nil, videoPreprocessors: VideoPreprocessor? = nil, width: Int? = nil) { + public init(afdSignaling: AfdSignaling? = nil, antiAlias: AntiAlias? = nil, codecSettings: VideoCodecSettings? = nil, colorMetadata: ColorMetadata? = nil, crop: Rectangle? = nil, dropFrameTimecode: DropFrameTimecode? = nil, fixedAfd: Int? = nil, height: Int? = nil, position: Rectangle? = nil, respondToAfd: RespondToAfd? = nil, scalingBehavior: ScalingBehavior? = nil, sharpness: Int? = nil, timecodeInsertion: VideoTimecodeInsertion? = nil, timecodeTrack: TimecodeTrack? = nil, videoPreprocessors: VideoPreprocessor? = nil, width: Int? = nil) { self.afdSignaling = afdSignaling self.antiAlias = antiAlias self.codecSettings = codecSettings @@ -11945,6 +11967,7 @@ extension MediaConvert { self.scalingBehavior = scalingBehavior self.sharpness = sharpness self.timecodeInsertion = timecodeInsertion + self.timecodeTrack = timecodeTrack self.videoPreprocessors = videoPreprocessors self.width = width } @@ -11978,6 +12001,7 @@ extension MediaConvert { case scalingBehavior = "scalingBehavior" case sharpness = "sharpness" case timecodeInsertion = "timecodeInsertion" + case timecodeTrack = "timecodeTrack" case videoPreprocessors = "videoPreprocessors" case width = "width" } diff --git a/Sources/Soto/Services/MediaLive/MediaLive_api.swift b/Sources/Soto/Services/MediaLive/MediaLive_api.swift index 0785de2915..a28f25c80d 100644 --- a/Sources/Soto/Services/MediaLive/MediaLive_api.swift +++ b/Sources/Soto/Services/MediaLive/MediaLive_api.swift @@ -331,7 +331,9 @@ public struct MediaLive: AWSService { /// - anywhereSettings: The Elemental Anywhere settings for this channel. /// - cdiInputSpecification: Specification of CDI inputs for this channel /// - channelClass: The class for this channel. STANDARD for a channel with two pipelines or SINGLE_PIPELINE for a channel with one pipeline. + /// - channelEngineVersion: The desired engine version for this channel. /// - destinations: + /// - dryRun: /// - encoderSettings: /// - inputAttachments: List of input attachments for channel. /// - inputSpecification: Specification of network and file inputs for this channel @@ -348,7 +350,9 @@ public struct MediaLive: AWSService { anywhereSettings: AnywhereSettings? = nil, cdiInputSpecification: CdiInputSpecification? = nil, channelClass: ChannelClass? = nil, + channelEngineVersion: ChannelEngineVersionRequest? = nil, destinations: [OutputDestination]? = nil, + dryRun: Bool? = nil, encoderSettings: EncoderSettings? = nil, inputAttachments: [InputAttachment]? = nil, inputSpecification: InputSpecification? = nil, @@ -365,7 +369,9 @@ public struct MediaLive: AWSService { anywhereSettings: anywhereSettings, cdiInputSpecification: cdiInputSpecification, channelClass: channelClass, + channelEngineVersion: channelEngineVersion, destinations: destinations, + dryRun: dryRun, encoderSettings: encoderSettings, inputAttachments: inputAttachments, inputSpecification: inputSpecification, @@ -2899,6 +2905,32 @@ public struct MediaLive: AWSService { return try await self.listTagsForResource(input, logger: logger) } + /// Retrieves an array of all the encoder engine versions that are available in this AWS account. + @Sendable + @inlinable + public func listVersions(_ input: ListVersionsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListVersionsResponse { + try await self.client.execute( + operation: "ListVersions", + path: "/prod/versions", + httpMethod: .GET, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Retrieves an array of all the encoder engine versions that are available in this AWS account. + /// + /// Parameters: + /// - logger: Logger use during operation + @inlinable + public func listVersions( + logger: Logger = AWSClient.loggingDisabled + ) async throws -> ListVersionsResponse { + let input = ListVersionsRequest( + ) + return try await self.listVersions(input, logger: logger) + } + /// Purchase an offering and create a reservation. @Sendable @inlinable @@ -3434,8 +3466,10 @@ public struct MediaLive: AWSService { /// /// Parameters: /// - cdiInputSpecification: Specification of CDI inputs for this channel + /// - channelEngineVersion: Channel engine version for this channel /// - channelId: channel ID /// - destinations: A list of output destinations for this channel. + /// - dryRun: /// - encoderSettings: The encoder settings for this channel. /// - inputAttachments: /// - inputSpecification: Specification of network and file inputs for this channel @@ -3447,8 +3481,10 @@ public struct MediaLive: AWSService { @inlinable public func updateChannel( cdiInputSpecification: CdiInputSpecification? = nil, + channelEngineVersion: ChannelEngineVersionRequest? = nil, channelId: String, destinations: [OutputDestination]? = nil, + dryRun: Bool? = nil, encoderSettings: EncoderSettings? = nil, inputAttachments: [InputAttachment]? = nil, inputSpecification: InputSpecification? = nil, @@ -3460,8 +3496,10 @@ public struct MediaLive: AWSService { ) async throws -> UpdateChannelResponse { let input = UpdateChannelRequest( cdiInputSpecification: cdiInputSpecification, + channelEngineVersion: channelEngineVersion, channelId: channelId, destinations: destinations, + dryRun: dryRun, encoderSettings: encoderSettings, inputAttachments: inputAttachments, inputSpecification: inputSpecification, diff --git a/Sources/Soto/Services/MediaLive/MediaLive_shapes.swift b/Sources/Soto/Services/MediaLive/MediaLive_shapes.swift index e7399b108b..774eae4350 100644 --- a/Sources/Soto/Services/MediaLive/MediaLive_shapes.swift +++ b/Sources/Soto/Services/MediaLive/MediaLive_shapes.swift @@ -3922,6 +3922,8 @@ extension MediaLive { public let cdiInputSpecification: CdiInputSpecification? /// The class for this channel. STANDARD for a channel with two pipelines or SINGLE_PIPELINE for a channel with one pipeline. public let channelClass: ChannelClass? + /// Requested engine version for this channel. + public let channelEngineVersion: ChannelEngineVersionResponse? /// A list of destinations of the channel. For UDP outputs, there is one /// destination per output. For other types (HLS, for example), there is /// one destination per packager. @@ -3954,11 +3956,12 @@ extension MediaLive { public let vpc: VpcOutputSettingsDescription? @inlinable - public init(anywhereSettings: DescribeAnywhereSettings? = nil, arn: String? = nil, cdiInputSpecification: CdiInputSpecification? = nil, channelClass: ChannelClass? = nil, destinations: [OutputDestination]? = nil, egressEndpoints: [ChannelEgressEndpoint]? = nil, encoderSettings: EncoderSettings? = nil, id: String? = nil, inputAttachments: [InputAttachment]? = nil, inputSpecification: InputSpecification? = nil, logLevel: LogLevel? = nil, maintenance: MaintenanceStatus? = nil, name: String? = nil, pipelineDetails: [PipelineDetail]? = nil, pipelinesRunningCount: Int? = nil, roleArn: String? = nil, state: ChannelState? = nil, tags: [String: String]? = nil, vpc: VpcOutputSettingsDescription? = nil) { + public init(anywhereSettings: DescribeAnywhereSettings? = nil, arn: String? = nil, cdiInputSpecification: CdiInputSpecification? = nil, channelClass: ChannelClass? = nil, channelEngineVersion: ChannelEngineVersionResponse? = nil, destinations: [OutputDestination]? = nil, egressEndpoints: [ChannelEgressEndpoint]? = nil, encoderSettings: EncoderSettings? = nil, id: String? = nil, inputAttachments: [InputAttachment]? = nil, inputSpecification: InputSpecification? = nil, logLevel: LogLevel? = nil, maintenance: MaintenanceStatus? = nil, name: String? = nil, pipelineDetails: [PipelineDetail]? = nil, pipelinesRunningCount: Int? = nil, roleArn: String? = nil, state: ChannelState? = nil, tags: [String: String]? = nil, vpc: VpcOutputSettingsDescription? = nil) { self.anywhereSettings = anywhereSettings self.arn = arn self.cdiInputSpecification = cdiInputSpecification self.channelClass = channelClass + self.channelEngineVersion = channelEngineVersion self.destinations = destinations self.egressEndpoints = egressEndpoints self.encoderSettings = encoderSettings @@ -3981,6 +3984,7 @@ extension MediaLive { case arn = "arn" case cdiInputSpecification = "cdiInputSpecification" case channelClass = "channelClass" + case channelEngineVersion = "channelEngineVersion" case destinations = "destinations" case egressEndpoints = "egressEndpoints" case encoderSettings = "encoderSettings" @@ -4013,6 +4017,39 @@ extension MediaLive { } } + public struct ChannelEngineVersionRequest: AWSEncodableShape { + /// The build identifier of the engine version to use for this channel. Specify 'DEFAULT' to reset to the default version. + public let version: String? + + @inlinable + public init(version: String? = nil) { + self.version = version + } + + private enum CodingKeys: String, CodingKey { + case version = "version" + } + } + + public struct ChannelEngineVersionResponse: AWSDecodableShape { + /// The UTC time when the version expires. + @OptionalCustomCoding + public var expirationDate: Date? + /// The build identifier for this version of the channel version. + public let version: String? + + @inlinable + public init(expirationDate: Date? = nil, version: String? = nil) { + self.expirationDate = expirationDate + self.version = version + } + + private enum CodingKeys: String, CodingKey { + case expirationDate = "expirationDate" + case version = "version" + } + } + public struct ChannelSummary: AWSDecodableShape { /// AnywhereSettings settings for this channel. public let anywhereSettings: DescribeAnywhereSettings? @@ -4022,6 +4059,8 @@ extension MediaLive { public let cdiInputSpecification: CdiInputSpecification? /// The class for this channel. STANDARD for a channel with two pipelines or SINGLE_PIPELINE for a channel with one pipeline. public let channelClass: ChannelClass? + /// The engine version that you requested for this channel. + public let channelEngineVersion: ChannelEngineVersionResponse? /// A list of destinations of the channel. For UDP outputs, there is one /// destination per output. For other types (HLS, for example), there is /// one destination per packager. @@ -4047,15 +4086,18 @@ extension MediaLive { public let state: ChannelState? /// A collection of key-value pairs. public let tags: [String: String]? + /// The engine version that the running pipelines are using. + public let usedChannelEngineVersions: [ChannelEngineVersionResponse]? /// Settings for any VPC outputs. public let vpc: VpcOutputSettingsDescription? @inlinable - public init(anywhereSettings: DescribeAnywhereSettings? = nil, arn: String? = nil, cdiInputSpecification: CdiInputSpecification? = nil, channelClass: ChannelClass? = nil, destinations: [OutputDestination]? = nil, egressEndpoints: [ChannelEgressEndpoint]? = nil, id: String? = nil, inputAttachments: [InputAttachment]? = nil, inputSpecification: InputSpecification? = nil, logLevel: LogLevel? = nil, maintenance: MaintenanceStatus? = nil, name: String? = nil, pipelinesRunningCount: Int? = nil, roleArn: String? = nil, state: ChannelState? = nil, tags: [String: String]? = nil, vpc: VpcOutputSettingsDescription? = nil) { + public init(anywhereSettings: DescribeAnywhereSettings? = nil, arn: String? = nil, cdiInputSpecification: CdiInputSpecification? = nil, channelClass: ChannelClass? = nil, channelEngineVersion: ChannelEngineVersionResponse? = nil, destinations: [OutputDestination]? = nil, egressEndpoints: [ChannelEgressEndpoint]? = nil, id: String? = nil, inputAttachments: [InputAttachment]? = nil, inputSpecification: InputSpecification? = nil, logLevel: LogLevel? = nil, maintenance: MaintenanceStatus? = nil, name: String? = nil, pipelinesRunningCount: Int? = nil, roleArn: String? = nil, state: ChannelState? = nil, tags: [String: String]? = nil, usedChannelEngineVersions: [ChannelEngineVersionResponse]? = nil, vpc: VpcOutputSettingsDescription? = nil) { self.anywhereSettings = anywhereSettings self.arn = arn self.cdiInputSpecification = cdiInputSpecification self.channelClass = channelClass + self.channelEngineVersion = channelEngineVersion self.destinations = destinations self.egressEndpoints = egressEndpoints self.id = id @@ -4068,6 +4110,7 @@ extension MediaLive { self.roleArn = roleArn self.state = state self.tags = tags + self.usedChannelEngineVersions = usedChannelEngineVersions self.vpc = vpc } @@ -4076,6 +4119,7 @@ extension MediaLive { case arn = "arn" case cdiInputSpecification = "cdiInputSpecification" case channelClass = "channelClass" + case channelEngineVersion = "channelEngineVersion" case destinations = "destinations" case egressEndpoints = "egressEndpoints" case id = "id" @@ -4088,6 +4132,7 @@ extension MediaLive { case roleArn = "roleArn" case state = "state" case tags = "tags" + case usedChannelEngineVersions = "usedChannelEngineVersions" case vpc = "vpc" } } @@ -4475,7 +4520,10 @@ extension MediaLive { public let cdiInputSpecification: CdiInputSpecification? /// The class for this channel. STANDARD for a channel with two pipelines or SINGLE_PIPELINE for a channel with one pipeline. public let channelClass: ChannelClass? + /// The desired engine version for this channel. + public let channelEngineVersion: ChannelEngineVersionRequest? public let destinations: [OutputDestination]? + public let dryRun: Bool? public let encoderSettings: EncoderSettings? /// List of input attachments for channel. public let inputAttachments: [InputAttachment]? @@ -4500,11 +4548,13 @@ extension MediaLive { public let vpc: VpcOutputSettings? @inlinable - public init(anywhereSettings: AnywhereSettings? = nil, cdiInputSpecification: CdiInputSpecification? = nil, channelClass: ChannelClass? = nil, destinations: [OutputDestination]? = nil, encoderSettings: EncoderSettings? = nil, inputAttachments: [InputAttachment]? = nil, inputSpecification: InputSpecification? = nil, logLevel: LogLevel? = nil, maintenance: MaintenanceCreateSettings? = nil, name: String? = nil, requestId: String? = CreateChannelRequest.idempotencyToken(), roleArn: String? = nil, tags: [String: String]? = nil, vpc: VpcOutputSettings? = nil) { + public init(anywhereSettings: AnywhereSettings? = nil, cdiInputSpecification: CdiInputSpecification? = nil, channelClass: ChannelClass? = nil, channelEngineVersion: ChannelEngineVersionRequest? = nil, destinations: [OutputDestination]? = nil, dryRun: Bool? = nil, encoderSettings: EncoderSettings? = nil, inputAttachments: [InputAttachment]? = nil, inputSpecification: InputSpecification? = nil, logLevel: LogLevel? = nil, maintenance: MaintenanceCreateSettings? = nil, name: String? = nil, requestId: String? = CreateChannelRequest.idempotencyToken(), roleArn: String? = nil, tags: [String: String]? = nil, vpc: VpcOutputSettings? = nil) { self.anywhereSettings = anywhereSettings self.cdiInputSpecification = cdiInputSpecification self.channelClass = channelClass + self.channelEngineVersion = channelEngineVersion self.destinations = destinations + self.dryRun = dryRun self.encoderSettings = encoderSettings self.inputAttachments = inputAttachments self.inputSpecification = inputSpecification @@ -4520,11 +4570,13 @@ extension MediaLive { @available(*, deprecated, message: "Members reserved have been deprecated") @inlinable - public init(anywhereSettings: AnywhereSettings? = nil, cdiInputSpecification: CdiInputSpecification? = nil, channelClass: ChannelClass? = nil, destinations: [OutputDestination]? = nil, encoderSettings: EncoderSettings? = nil, inputAttachments: [InputAttachment]? = nil, inputSpecification: InputSpecification? = nil, logLevel: LogLevel? = nil, maintenance: MaintenanceCreateSettings? = nil, name: String? = nil, requestId: String? = CreateChannelRequest.idempotencyToken(), reserved: String? = nil, roleArn: String? = nil, tags: [String: String]? = nil, vpc: VpcOutputSettings? = nil) { + public init(anywhereSettings: AnywhereSettings? = nil, cdiInputSpecification: CdiInputSpecification? = nil, channelClass: ChannelClass? = nil, channelEngineVersion: ChannelEngineVersionRequest? = nil, destinations: [OutputDestination]? = nil, dryRun: Bool? = nil, encoderSettings: EncoderSettings? = nil, inputAttachments: [InputAttachment]? = nil, inputSpecification: InputSpecification? = nil, logLevel: LogLevel? = nil, maintenance: MaintenanceCreateSettings? = nil, name: String? = nil, requestId: String? = CreateChannelRequest.idempotencyToken(), reserved: String? = nil, roleArn: String? = nil, tags: [String: String]? = nil, vpc: VpcOutputSettings? = nil) { self.anywhereSettings = anywhereSettings self.cdiInputSpecification = cdiInputSpecification self.channelClass = channelClass + self.channelEngineVersion = channelEngineVersion self.destinations = destinations + self.dryRun = dryRun self.encoderSettings = encoderSettings self.inputAttachments = inputAttachments self.inputSpecification = inputSpecification @@ -4553,7 +4605,9 @@ extension MediaLive { case anywhereSettings = "anywhereSettings" case cdiInputSpecification = "cdiInputSpecification" case channelClass = "channelClass" + case channelEngineVersion = "channelEngineVersion" case destinations = "destinations" + case dryRun = "dryRun" case encoderSettings = "encoderSettings" case inputAttachments = "inputAttachments" case inputSpecification = "inputSpecification" @@ -5725,6 +5779,8 @@ extension MediaLive { public let cdiInputSpecification: CdiInputSpecification? /// The class for this channel. STANDARD for a channel with two pipelines or SINGLE_PIPELINE for a channel with one pipeline. public let channelClass: ChannelClass? + /// Requested engine version for this channel. + public let channelEngineVersion: ChannelEngineVersionResponse? /// A list of destinations of the channel. For UDP outputs, there is one /// destination per output. For other types (HLS, for example), there is /// one destination per packager. @@ -5757,11 +5813,12 @@ extension MediaLive { public let vpc: VpcOutputSettingsDescription? @inlinable - public init(anywhereSettings: DescribeAnywhereSettings? = nil, arn: String? = nil, cdiInputSpecification: CdiInputSpecification? = nil, channelClass: ChannelClass? = nil, destinations: [OutputDestination]? = nil, egressEndpoints: [ChannelEgressEndpoint]? = nil, encoderSettings: EncoderSettings? = nil, id: String? = nil, inputAttachments: [InputAttachment]? = nil, inputSpecification: InputSpecification? = nil, logLevel: LogLevel? = nil, maintenance: MaintenanceStatus? = nil, name: String? = nil, pipelineDetails: [PipelineDetail]? = nil, pipelinesRunningCount: Int? = nil, roleArn: String? = nil, state: ChannelState? = nil, tags: [String: String]? = nil, vpc: VpcOutputSettingsDescription? = nil) { + public init(anywhereSettings: DescribeAnywhereSettings? = nil, arn: String? = nil, cdiInputSpecification: CdiInputSpecification? = nil, channelClass: ChannelClass? = nil, channelEngineVersion: ChannelEngineVersionResponse? = nil, destinations: [OutputDestination]? = nil, egressEndpoints: [ChannelEgressEndpoint]? = nil, encoderSettings: EncoderSettings? = nil, id: String? = nil, inputAttachments: [InputAttachment]? = nil, inputSpecification: InputSpecification? = nil, logLevel: LogLevel? = nil, maintenance: MaintenanceStatus? = nil, name: String? = nil, pipelineDetails: [PipelineDetail]? = nil, pipelinesRunningCount: Int? = nil, roleArn: String? = nil, state: ChannelState? = nil, tags: [String: String]? = nil, vpc: VpcOutputSettingsDescription? = nil) { self.anywhereSettings = anywhereSettings self.arn = arn self.cdiInputSpecification = cdiInputSpecification self.channelClass = channelClass + self.channelEngineVersion = channelEngineVersion self.destinations = destinations self.egressEndpoints = egressEndpoints self.encoderSettings = encoderSettings @@ -5784,6 +5841,7 @@ extension MediaLive { case arn = "arn" case cdiInputSpecification = "cdiInputSpecification" case channelClass = "channelClass" + case channelEngineVersion = "channelEngineVersion" case destinations = "destinations" case egressEndpoints = "egressEndpoints" case encoderSettings = "encoderSettings" @@ -6549,6 +6607,8 @@ extension MediaLive { public let cdiInputSpecification: CdiInputSpecification? /// The class for this channel. STANDARD for a channel with two pipelines or SINGLE_PIPELINE for a channel with one pipeline. public let channelClass: ChannelClass? + /// Requested engine version for this channel. + public let channelEngineVersion: ChannelEngineVersionResponse? /// A list of destinations of the channel. For UDP outputs, there is one /// destination per output. For other types (HLS, for example), there is /// one destination per packager. @@ -6581,11 +6641,12 @@ extension MediaLive { public let vpc: VpcOutputSettingsDescription? @inlinable - public init(anywhereSettings: DescribeAnywhereSettings? = nil, arn: String? = nil, cdiInputSpecification: CdiInputSpecification? = nil, channelClass: ChannelClass? = nil, destinations: [OutputDestination]? = nil, egressEndpoints: [ChannelEgressEndpoint]? = nil, encoderSettings: EncoderSettings? = nil, id: String? = nil, inputAttachments: [InputAttachment]? = nil, inputSpecification: InputSpecification? = nil, logLevel: LogLevel? = nil, maintenance: MaintenanceStatus? = nil, name: String? = nil, pipelineDetails: [PipelineDetail]? = nil, pipelinesRunningCount: Int? = nil, roleArn: String? = nil, state: ChannelState? = nil, tags: [String: String]? = nil, vpc: VpcOutputSettingsDescription? = nil) { + public init(anywhereSettings: DescribeAnywhereSettings? = nil, arn: String? = nil, cdiInputSpecification: CdiInputSpecification? = nil, channelClass: ChannelClass? = nil, channelEngineVersion: ChannelEngineVersionResponse? = nil, destinations: [OutputDestination]? = nil, egressEndpoints: [ChannelEgressEndpoint]? = nil, encoderSettings: EncoderSettings? = nil, id: String? = nil, inputAttachments: [InputAttachment]? = nil, inputSpecification: InputSpecification? = nil, logLevel: LogLevel? = nil, maintenance: MaintenanceStatus? = nil, name: String? = nil, pipelineDetails: [PipelineDetail]? = nil, pipelinesRunningCount: Int? = nil, roleArn: String? = nil, state: ChannelState? = nil, tags: [String: String]? = nil, vpc: VpcOutputSettingsDescription? = nil) { self.anywhereSettings = anywhereSettings self.arn = arn self.cdiInputSpecification = cdiInputSpecification self.channelClass = channelClass + self.channelEngineVersion = channelEngineVersion self.destinations = destinations self.egressEndpoints = egressEndpoints self.encoderSettings = encoderSettings @@ -6608,6 +6669,7 @@ extension MediaLive { case arn = "arn" case cdiInputSpecification = "cdiInputSpecification" case channelClass = "channelClass" + case channelEngineVersion = "channelEngineVersion" case destinations = "destinations" case egressEndpoints = "egressEndpoints" case encoderSettings = "encoderSettings" @@ -12136,6 +12198,24 @@ extension MediaLive { } } + public struct ListVersionsRequest: AWSEncodableShape { + public init() {} + } + + public struct ListVersionsResponse: AWSDecodableShape { + /// List of engine versions that are available for this AWS account. + public let versions: [ChannelEngineVersionResponse]? + + @inlinable + public init(versions: [ChannelEngineVersionResponse]? = nil) { + self.versions = versions + } + + private enum CodingKeys: String, CodingKey { + case versions = "versions" + } + } + public struct M2tsSettings: AWSEncodableShape & AWSDecodableShape { /// When set to drop, output audio streams will be removed from the program if the selected input audio stream is removed from the input. This allows the output audio configuration to dynamically change based on input configuration. If this is set to encodeSilence, all output audio streams will output encoded silence when not connected to an active input stream. public let absentInputAudioBehavior: M2tsAbsentInputAudioBehavior? @@ -14160,15 +14240,18 @@ extension MediaLive { public let activeMotionGraphicsActionName: String? /// The current URI being used for HTML5 motion graphics for this pipeline. public let activeMotionGraphicsUri: String? + /// Current engine version of the encoder for this pipeline. + public let channelEngineVersion: ChannelEngineVersionResponse? /// Pipeline ID public let pipelineId: String? @inlinable - public init(activeInputAttachmentName: String? = nil, activeInputSwitchActionName: String? = nil, activeMotionGraphicsActionName: String? = nil, activeMotionGraphicsUri: String? = nil, pipelineId: String? = nil) { + public init(activeInputAttachmentName: String? = nil, activeInputSwitchActionName: String? = nil, activeMotionGraphicsActionName: String? = nil, activeMotionGraphicsUri: String? = nil, channelEngineVersion: ChannelEngineVersionResponse? = nil, pipelineId: String? = nil) { self.activeInputAttachmentName = activeInputAttachmentName self.activeInputSwitchActionName = activeInputSwitchActionName self.activeMotionGraphicsActionName = activeMotionGraphicsActionName self.activeMotionGraphicsUri = activeMotionGraphicsUri + self.channelEngineVersion = channelEngineVersion self.pipelineId = pipelineId } @@ -14177,6 +14260,7 @@ extension MediaLive { case activeInputSwitchActionName = "activeInputSwitchActionName" case activeMotionGraphicsActionName = "activeMotionGraphicsActionName" case activeMotionGraphicsUri = "activeMotionGraphicsUri" + case channelEngineVersion = "channelEngineVersion" case pipelineId = "pipelineId" } } @@ -14544,6 +14628,8 @@ extension MediaLive { public let cdiInputSpecification: CdiInputSpecification? /// The class for this channel. STANDARD for a channel with two pipelines or SINGLE_PIPELINE for a channel with one pipeline. public let channelClass: ChannelClass? + /// Requested engine version for this channel. + public let channelEngineVersion: ChannelEngineVersionResponse? /// A list of destinations of the channel. For UDP outputs, there is one /// destination per output. For other types (HLS, for example), there is /// one destination per packager. @@ -14578,11 +14664,12 @@ extension MediaLive { public let vpc: VpcOutputSettingsDescription? @inlinable - public init(anywhereSettings: DescribeAnywhereSettings? = nil, arn: String? = nil, cdiInputSpecification: CdiInputSpecification? = nil, channelClass: ChannelClass? = nil, destinations: [OutputDestination]? = nil, egressEndpoints: [ChannelEgressEndpoint]? = nil, encoderSettings: EncoderSettings? = nil, id: String? = nil, inputAttachments: [InputAttachment]? = nil, inputSpecification: InputSpecification? = nil, logLevel: LogLevel? = nil, maintenance: MaintenanceStatus? = nil, maintenanceStatus: String? = nil, name: String? = nil, pipelineDetails: [PipelineDetail]? = nil, pipelinesRunningCount: Int? = nil, roleArn: String? = nil, state: ChannelState? = nil, tags: [String: String]? = nil, vpc: VpcOutputSettingsDescription? = nil) { + public init(anywhereSettings: DescribeAnywhereSettings? = nil, arn: String? = nil, cdiInputSpecification: CdiInputSpecification? = nil, channelClass: ChannelClass? = nil, channelEngineVersion: ChannelEngineVersionResponse? = nil, destinations: [OutputDestination]? = nil, egressEndpoints: [ChannelEgressEndpoint]? = nil, encoderSettings: EncoderSettings? = nil, id: String? = nil, inputAttachments: [InputAttachment]? = nil, inputSpecification: InputSpecification? = nil, logLevel: LogLevel? = nil, maintenance: MaintenanceStatus? = nil, maintenanceStatus: String? = nil, name: String? = nil, pipelineDetails: [PipelineDetail]? = nil, pipelinesRunningCount: Int? = nil, roleArn: String? = nil, state: ChannelState? = nil, tags: [String: String]? = nil, vpc: VpcOutputSettingsDescription? = nil) { self.anywhereSettings = anywhereSettings self.arn = arn self.cdiInputSpecification = cdiInputSpecification self.channelClass = channelClass + self.channelEngineVersion = channelEngineVersion self.destinations = destinations self.egressEndpoints = egressEndpoints self.encoderSettings = encoderSettings @@ -14606,6 +14693,7 @@ extension MediaLive { case arn = "arn" case cdiInputSpecification = "cdiInputSpecification" case channelClass = "channelClass" + case channelEngineVersion = "channelEngineVersion" case destinations = "destinations" case egressEndpoints = "egressEndpoints" case encoderSettings = "encoderSettings" @@ -15504,6 +15592,8 @@ extension MediaLive { public let cdiInputSpecification: CdiInputSpecification? /// The class for this channel. STANDARD for a channel with two pipelines or SINGLE_PIPELINE for a channel with one pipeline. public let channelClass: ChannelClass? + /// Requested engine version for this channel. + public let channelEngineVersion: ChannelEngineVersionResponse? /// A list of destinations of the channel. For UDP outputs, there is one /// destination per output. For other types (HLS, for example), there is /// one destination per packager. @@ -15536,11 +15626,12 @@ extension MediaLive { public let vpc: VpcOutputSettingsDescription? @inlinable - public init(anywhereSettings: DescribeAnywhereSettings? = nil, arn: String? = nil, cdiInputSpecification: CdiInputSpecification? = nil, channelClass: ChannelClass? = nil, destinations: [OutputDestination]? = nil, egressEndpoints: [ChannelEgressEndpoint]? = nil, encoderSettings: EncoderSettings? = nil, id: String? = nil, inputAttachments: [InputAttachment]? = nil, inputSpecification: InputSpecification? = nil, logLevel: LogLevel? = nil, maintenance: MaintenanceStatus? = nil, name: String? = nil, pipelineDetails: [PipelineDetail]? = nil, pipelinesRunningCount: Int? = nil, roleArn: String? = nil, state: ChannelState? = nil, tags: [String: String]? = nil, vpc: VpcOutputSettingsDescription? = nil) { + public init(anywhereSettings: DescribeAnywhereSettings? = nil, arn: String? = nil, cdiInputSpecification: CdiInputSpecification? = nil, channelClass: ChannelClass? = nil, channelEngineVersion: ChannelEngineVersionResponse? = nil, destinations: [OutputDestination]? = nil, egressEndpoints: [ChannelEgressEndpoint]? = nil, encoderSettings: EncoderSettings? = nil, id: String? = nil, inputAttachments: [InputAttachment]? = nil, inputSpecification: InputSpecification? = nil, logLevel: LogLevel? = nil, maintenance: MaintenanceStatus? = nil, name: String? = nil, pipelineDetails: [PipelineDetail]? = nil, pipelinesRunningCount: Int? = nil, roleArn: String? = nil, state: ChannelState? = nil, tags: [String: String]? = nil, vpc: VpcOutputSettingsDescription? = nil) { self.anywhereSettings = anywhereSettings self.arn = arn self.cdiInputSpecification = cdiInputSpecification self.channelClass = channelClass + self.channelEngineVersion = channelEngineVersion self.destinations = destinations self.egressEndpoints = egressEndpoints self.encoderSettings = encoderSettings @@ -15563,6 +15654,7 @@ extension MediaLive { case arn = "arn" case cdiInputSpecification = "cdiInputSpecification" case channelClass = "channelClass" + case channelEngineVersion = "channelEngineVersion" case destinations = "destinations" case egressEndpoints = "egressEndpoints" case encoderSettings = "encoderSettings" @@ -16269,6 +16361,8 @@ extension MediaLive { public let cdiInputSpecification: CdiInputSpecification? /// The class for this channel. STANDARD for a channel with two pipelines or SINGLE_PIPELINE for a channel with one pipeline. public let channelClass: ChannelClass? + /// Requested engine version for this channel. + public let channelEngineVersion: ChannelEngineVersionResponse? /// A list of destinations of the channel. For UDP outputs, there is one /// destination per output. For other types (HLS, for example), there is /// one destination per packager. @@ -16301,11 +16395,12 @@ extension MediaLive { public let vpc: VpcOutputSettingsDescription? @inlinable - public init(anywhereSettings: DescribeAnywhereSettings? = nil, arn: String? = nil, cdiInputSpecification: CdiInputSpecification? = nil, channelClass: ChannelClass? = nil, destinations: [OutputDestination]? = nil, egressEndpoints: [ChannelEgressEndpoint]? = nil, encoderSettings: EncoderSettings? = nil, id: String? = nil, inputAttachments: [InputAttachment]? = nil, inputSpecification: InputSpecification? = nil, logLevel: LogLevel? = nil, maintenance: MaintenanceStatus? = nil, name: String? = nil, pipelineDetails: [PipelineDetail]? = nil, pipelinesRunningCount: Int? = nil, roleArn: String? = nil, state: ChannelState? = nil, tags: [String: String]? = nil, vpc: VpcOutputSettingsDescription? = nil) { + public init(anywhereSettings: DescribeAnywhereSettings? = nil, arn: String? = nil, cdiInputSpecification: CdiInputSpecification? = nil, channelClass: ChannelClass? = nil, channelEngineVersion: ChannelEngineVersionResponse? = nil, destinations: [OutputDestination]? = nil, egressEndpoints: [ChannelEgressEndpoint]? = nil, encoderSettings: EncoderSettings? = nil, id: String? = nil, inputAttachments: [InputAttachment]? = nil, inputSpecification: InputSpecification? = nil, logLevel: LogLevel? = nil, maintenance: MaintenanceStatus? = nil, name: String? = nil, pipelineDetails: [PipelineDetail]? = nil, pipelinesRunningCount: Int? = nil, roleArn: String? = nil, state: ChannelState? = nil, tags: [String: String]? = nil, vpc: VpcOutputSettingsDescription? = nil) { self.anywhereSettings = anywhereSettings self.arn = arn self.cdiInputSpecification = cdiInputSpecification self.channelClass = channelClass + self.channelEngineVersion = channelEngineVersion self.destinations = destinations self.egressEndpoints = egressEndpoints self.encoderSettings = encoderSettings @@ -16328,6 +16423,7 @@ extension MediaLive { case arn = "arn" case cdiInputSpecification = "cdiInputSpecification" case channelClass = "channelClass" + case channelEngineVersion = "channelEngineVersion" case destinations = "destinations" case egressEndpoints = "egressEndpoints" case encoderSettings = "encoderSettings" @@ -16925,10 +17021,13 @@ extension MediaLive { public struct UpdateChannelRequest: AWSEncodableShape { /// Specification of CDI inputs for this channel public let cdiInputSpecification: CdiInputSpecification? + /// Channel engine version for this channel + public let channelEngineVersion: ChannelEngineVersionRequest? /// channel ID public let channelId: String /// A list of output destinations for this channel. public let destinations: [OutputDestination]? + public let dryRun: Bool? /// The encoder settings for this channel. public let encoderSettings: EncoderSettings? public let inputAttachments: [InputAttachment]? @@ -16944,10 +17043,12 @@ extension MediaLive { public let roleArn: String? @inlinable - public init(cdiInputSpecification: CdiInputSpecification? = nil, channelId: String, destinations: [OutputDestination]? = nil, encoderSettings: EncoderSettings? = nil, inputAttachments: [InputAttachment]? = nil, inputSpecification: InputSpecification? = nil, logLevel: LogLevel? = nil, maintenance: MaintenanceUpdateSettings? = nil, name: String? = nil, roleArn: String? = nil) { + public init(cdiInputSpecification: CdiInputSpecification? = nil, channelEngineVersion: ChannelEngineVersionRequest? = nil, channelId: String, destinations: [OutputDestination]? = nil, dryRun: Bool? = nil, encoderSettings: EncoderSettings? = nil, inputAttachments: [InputAttachment]? = nil, inputSpecification: InputSpecification? = nil, logLevel: LogLevel? = nil, maintenance: MaintenanceUpdateSettings? = nil, name: String? = nil, roleArn: String? = nil) { self.cdiInputSpecification = cdiInputSpecification + self.channelEngineVersion = channelEngineVersion self.channelId = channelId self.destinations = destinations + self.dryRun = dryRun self.encoderSettings = encoderSettings self.inputAttachments = inputAttachments self.inputSpecification = inputSpecification @@ -16961,8 +17062,10 @@ extension MediaLive { let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer var container = encoder.container(keyedBy: CodingKeys.self) try container.encodeIfPresent(self.cdiInputSpecification, forKey: .cdiInputSpecification) + try container.encodeIfPresent(self.channelEngineVersion, forKey: .channelEngineVersion) request.encodePath(self.channelId, key: "ChannelId") try container.encodeIfPresent(self.destinations, forKey: .destinations) + try container.encodeIfPresent(self.dryRun, forKey: .dryRun) try container.encodeIfPresent(self.encoderSettings, forKey: .encoderSettings) try container.encodeIfPresent(self.inputAttachments, forKey: .inputAttachments) try container.encodeIfPresent(self.inputSpecification, forKey: .inputSpecification) @@ -16985,7 +17088,9 @@ extension MediaLive { private enum CodingKeys: String, CodingKey { case cdiInputSpecification = "cdiInputSpecification" + case channelEngineVersion = "channelEngineVersion" case destinations = "destinations" + case dryRun = "dryRun" case encoderSettings = "encoderSettings" case inputAttachments = "inputAttachments" case inputSpecification = "inputSpecification" diff --git a/Sources/Soto/Services/NetworkFirewall/NetworkFirewall_api.swift b/Sources/Soto/Services/NetworkFirewall/NetworkFirewall_api.swift index 105791400d..1fe6a353ab 100644 --- a/Sources/Soto/Services/NetworkFirewall/NetworkFirewall_api.swift +++ b/Sources/Soto/Services/NetworkFirewall/NetworkFirewall_api.swift @@ -25,7 +25,7 @@ import Foundation /// Service object for interacting with AWS NetworkFirewall service. /// -/// This is the API Reference for Network Firewall. This guide is for developers who need detailed information about the Network Firewall API actions, data types, and errors. The REST API requires you to handle connection details, such as calculating signatures, handling request retries, and error handling. For general information about using the Amazon Web Services REST APIs, see Amazon Web Services APIs. To access Network Firewall using the REST API endpoint: https://network-firewall..amazonaws.com Alternatively, you can use one of the Amazon Web Services SDKs to access an API that's tailored to the programming language or platform that you're using. For more information, see Amazon Web Services SDKs. For descriptions of Network Firewall features, including and step-by-step instructions on how to use them through the Network Firewall console, see the Network Firewall Developer Guide. Network Firewall is a stateful, managed, network firewall and intrusion detection and prevention service for Amazon Virtual Private Cloud (Amazon VPC). With Network Firewall, you can filter traffic at the perimeter of your VPC. This includes filtering traffic going to and coming from an internet gateway, NAT gateway, or over VPN or Direct Connect. Network Firewall uses rules that are compatible with Suricata, a free, open source network analysis and threat detection engine. You can use Network Firewall to monitor and protect your VPC traffic in a number of ways. The following are just a few examples: Allow domains or IP addresses for known Amazon Web Services service endpoints, such as Amazon S3, and block all other forms of traffic. Use custom lists of known bad domains to limit the types of domain names that your applications can access. Perform deep packet inspection on traffic entering or leaving your VPC. Use stateful protocol detection to filter protocols like HTTPS, regardless of the port used. To enable Network Firewall for your VPCs, you perform steps in both Amazon VPC and in Network Firewall. For information about using Amazon VPC, see Amazon VPC User Guide. To start using Network Firewall, do the following: (Optional) If you don't already have a VPC that you want to protect, create it in Amazon VPC. In Amazon VPC, in each Availability Zone where you want to have a firewall endpoint, create a subnet for the sole use of Network Firewall. In Network Firewall, create stateless and stateful rule groups, to define the components of the network traffic filtering behavior that you want your firewall to have. In Network Firewall, create a firewall policy that uses your rule groups and specifies additional default traffic filtering behavior. In Network Firewall, create a firewall and specify your new firewall policy and VPC subnets. Network Firewall creates a firewall endpoint in each subnet that you specify, with the behavior that's defined in the firewall policy. In Amazon VPC, use ingress routing enhancements to route traffic through the new firewall endpoints. +/// This is the API Reference for Network Firewall. This guide is for developers who need detailed information about the Network Firewall API actions, data types, and errors. The REST API requires you to handle connection details, such as calculating signatures, handling request retries, and error handling. For general information about using the Amazon Web Services REST APIs, see Amazon Web Services APIs. To view the complete list of Amazon Web Services Regions where Network Firewall is available, see Service endpoints and quotas in the Amazon Web Services General Reference. To access Network Firewall using the IPv4 REST API endpoint: https://network-firewall..amazonaws.com To access Network Firewall using the Dualstack (IPv4 and IPv6) REST API endpoint: https://network-firewall..aws.api Alternatively, you can use one of the Amazon Web Services SDKs to access an API that's tailored to the programming language or platform that you're using. For more information, see Amazon Web Services SDKs. For descriptions of Network Firewall features, including and step-by-step instructions on how to use them through the Network Firewall console, see the Network Firewall Developer Guide. Network Firewall is a stateful, managed, network firewall and intrusion detection and prevention service for Amazon Virtual Private Cloud (Amazon VPC). With Network Firewall, you can filter traffic at the perimeter of your VPC. This includes filtering traffic going to and coming from an internet gateway, NAT gateway, or over VPN or Direct Connect. Network Firewall uses rules that are compatible with Suricata, a free, open source network analysis and threat detection engine. You can use Network Firewall to monitor and protect your VPC traffic in a number of ways. The following are just a few examples: Allow domains or IP addresses for known Amazon Web Services service endpoints, such as Amazon S3, and block all other forms of traffic. Use custom lists of known bad domains to limit the types of domain names that your applications can access. Perform deep packet inspection on traffic entering or leaving your VPC. Use stateful protocol detection to filter protocols like HTTPS, regardless of the port used. To enable Network Firewall for your VPCs, you perform steps in both Amazon VPC and in Network Firewall. For information about using Amazon VPC, see Amazon VPC User Guide. To start using Network Firewall, do the following: (Optional) If you don't already have a VPC that you want to protect, create it in Amazon VPC. In Amazon VPC, in each Availability Zone where you want to have a firewall endpoint, create a subnet for the sole use of Network Firewall. In Network Firewall, create stateless and stateful rule groups, to define the components of the network traffic filtering behavior that you want your firewall to have. In Network Firewall, create a firewall policy that uses your rule groups and specifies additional default traffic filtering behavior. In Network Firewall, create a firewall and specify your new firewall policy and VPC subnets. Network Firewall creates a firewall endpoint in each subnet that you specify, with the behavior that's defined in the firewall policy. In Amazon VPC, use ingress routing enhancements to route traffic through the new firewall endpoints. public struct NetworkFirewall: AWSService { // MARK: Member variables diff --git a/Sources/Soto/Services/Organizations/Organizations_api.swift b/Sources/Soto/Services/Organizations/Organizations_api.swift index 06a98f6c3d..98f9efeeff 100644 --- a/Sources/Soto/Services/Organizations/Organizations_api.swift +++ b/Sources/Soto/Services/Organizations/Organizations_api.swift @@ -82,6 +82,7 @@ public struct Organizations: AWSService { "aws-cn-global": "organizations.cn-northwest-1.amazonaws.com.cn", "aws-global": "organizations.us-east-1.amazonaws.com", "aws-iso-b-global": "organizations.us-isob-east-1.sc2s.sgov.gov", + "aws-iso-global": "organizations.us-iso-east-1.c2s.ic.gov", "aws-us-gov-global": "organizations.us-gov-west-1.amazonaws.com" ]} @@ -89,6 +90,7 @@ public struct Organizations: AWSService { static var partitionEndpoints: [AWSPartition: (endpoint: String, region: SotoCore.Region)] {[ .aws: (endpoint: "aws-global", region: .useast1), .awscn: (endpoint: "aws-cn-global", region: .cnnorthwest1), + .awsiso: (endpoint: "aws-iso-global", region: .usisoeast1), .awsisob: (endpoint: "aws-iso-b-global", region: .usisobeast1), .awsusgov: (endpoint: "aws-us-gov-global", region: .usgovwest1) ]} diff --git a/Sources/Soto/Services/Outposts/Outposts_api.swift b/Sources/Soto/Services/Outposts/Outposts_api.swift index f952b129bd..450bb349f4 100644 --- a/Sources/Soto/Services/Outposts/Outposts_api.swift +++ b/Sources/Soto/Services/Outposts/Outposts_api.swift @@ -976,7 +976,7 @@ public struct Outposts: AWSService { return try await self.listTagsForResource(input, logger: logger) } - /// Starts the specified capacity task. You can have one active capacity task per order or Outpost. + /// Starts the specified capacity task. You can have one active capacity task for each order and each Outpost. @Sendable @inlinable public func startCapacityTask(_ input: StartCapacityTaskInput, logger: Logger = AWSClient.loggingDisabled) async throws -> StartCapacityTaskOutput { @@ -989,7 +989,7 @@ public struct Outposts: AWSService { logger: logger ) } - /// Starts the specified capacity task. You can have one active capacity task per order or Outpost. + /// Starts the specified capacity task. You can have one active capacity task for each order and each Outpost. /// /// Parameters: /// - dryRun: You can request a dry run to determine if the instance type and instance size changes is above or below available instance capacity. Requesting a dry run does not make any changes to your plan. @@ -1252,7 +1252,7 @@ public struct Outposts: AWSService { /// - fiberOpticCableType: The type of fiber that you will use to attach the Outpost to your network. /// - maximumSupportedWeightLbs: The maximum rack weight that this site can support. NO_LIMIT is over 2000lbs. /// - opticalStandard: The type of optical standard that you will use to attach the Outpost to your network. This field is dependent on uplink speed, fiber type, and distance to the upstream device. For more information about networking requirements for racks, see Network in the Amazon Web Services Outposts User Guide. OPTIC_10GBASE_SR: 10GBASE-SR OPTIC_10GBASE_IR: 10GBASE-IR OPTIC_10GBASE_LR: 10GBASE-LR OPTIC_40GBASE_SR: 40GBASE-SR OPTIC_40GBASE_ESR: 40GBASE-ESR OPTIC_40GBASE_IR4_LR4L: 40GBASE-IR (LR4L) OPTIC_40GBASE_LR4: 40GBASE-LR4 OPTIC_100GBASE_SR4: 100GBASE-SR4 OPTIC_100GBASE_CWDM4: 100GBASE-CWDM4 OPTIC_100GBASE_LR4: 100GBASE-LR4 OPTIC_100G_PSM4_MSA: 100G PSM4 MSA OPTIC_1000BASE_LX: 1000Base-LX OPTIC_1000BASE_SX : 1000Base-SX - /// - powerConnector: The power connector that Amazon Web Services should plan to provide for connections to the hardware. Note the correlation between PowerPhase and PowerConnector. Single-phase AC feed L6-30P – (common in US); 30A; single phase IEC309 (blue) – P+N+E, 6hr; 32 A; single phase Three-phase AC feed AH530P7W (red) – 3P+N+E, 7hr; 30A; three phase AH532P6W (red) – 3P+N+E, 6hr; 32A; three phase + /// - powerConnector: The power connector that Amazon Web Services should plan to provide for connections to the hardware. Note the correlation between PowerPhase and PowerConnector. Single-phase AC feed L6-30P – (common in US); 30A; single phase IEC309 (blue) – P+N+E, 6hr; 32 A; single phase Three-phase AC feed AH530P7W (red) – 3P+N+E, 7hr; 30A; three phase AH532P6W (red) – 3P+N+E, 6hr; 32A; three phase CS8365C – (common in US); 3P+E, 50A; three phase /// - powerDrawKva: The power draw, in kVA, available at the hardware placement position for the rack. /// - powerFeedDrop: Indicates whether the power feed comes above or below the rack. /// - powerPhase: The power option that you can provide for hardware. Single-phase AC feed: 200 V to 277 V, 50 Hz or 60 Hz Three-phase AC feed: 346 V to 480 V, 50 Hz or 60 Hz diff --git a/Sources/Soto/Services/Outposts/Outposts_shapes.swift b/Sources/Soto/Services/Outposts/Outposts_shapes.swift index c0256d5d8c..0e842ffddd 100644 --- a/Sources/Soto/Services/Outposts/Outposts_shapes.swift +++ b/Sources/Soto/Services/Outposts/Outposts_shapes.swift @@ -176,6 +176,7 @@ extension Outposts { public enum PowerConnector: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case ah530p7w = "AH530P7W" case ah532p6w = "AH532P6W" + case cs8365c = "CS8365C" case iec309 = "IEC309" case l630P = "L6_30P" public var description: String { return self.rawValue } @@ -2794,7 +2795,7 @@ extension Outposts { public let maximumSupportedWeightLbs: MaximumSupportedWeightLbs? /// The type of optical standard that you will use to attach the Outpost to your network. This field is dependent on uplink speed, fiber type, and distance to the upstream device. For more information about networking requirements for racks, see Network in the Amazon Web Services Outposts User Guide. OPTIC_10GBASE_SR: 10GBASE-SR OPTIC_10GBASE_IR: 10GBASE-IR OPTIC_10GBASE_LR: 10GBASE-LR OPTIC_40GBASE_SR: 40GBASE-SR OPTIC_40GBASE_ESR: 40GBASE-ESR OPTIC_40GBASE_IR4_LR4L: 40GBASE-IR (LR4L) OPTIC_40GBASE_LR4: 40GBASE-LR4 OPTIC_100GBASE_SR4: 100GBASE-SR4 OPTIC_100GBASE_CWDM4: 100GBASE-CWDM4 OPTIC_100GBASE_LR4: 100GBASE-LR4 OPTIC_100G_PSM4_MSA: 100G PSM4 MSA OPTIC_1000BASE_LX: 1000Base-LX OPTIC_1000BASE_SX : 1000Base-SX public let opticalStandard: OpticalStandard? - /// The power connector that Amazon Web Services should plan to provide for connections to the hardware. Note the correlation between PowerPhase and PowerConnector. Single-phase AC feed L6-30P – (common in US); 30A; single phase IEC309 (blue) – P+N+E, 6hr; 32 A; single phase Three-phase AC feed AH530P7W (red) – 3P+N+E, 7hr; 30A; three phase AH532P6W (red) – 3P+N+E, 6hr; 32A; three phase + /// The power connector that Amazon Web Services should plan to provide for connections to the hardware. Note the correlation between PowerPhase and PowerConnector. Single-phase AC feed L6-30P – (common in US); 30A; single phase IEC309 (blue) – P+N+E, 6hr; 32 A; single phase Three-phase AC feed AH530P7W (red) – 3P+N+E, 7hr; 30A; three phase AH532P6W (red) – 3P+N+E, 6hr; 32A; three phase CS8365C – (common in US); 3P+E, 50A; three phase public let powerConnector: PowerConnector? /// The power draw, in kVA, available at the hardware placement position for the rack. public let powerDrawKva: PowerDrawKva? diff --git a/Sources/Soto/Services/QConnect/QConnect_shapes.swift b/Sources/Soto/Services/QConnect/QConnect_shapes.swift index 654db50d88..e224a49cf9 100644 --- a/Sources/Soto/Services/QConnect/QConnect_shapes.swift +++ b/Sources/Soto/Services/QConnect/QConnect_shapes.swift @@ -1518,15 +1518,18 @@ extension QConnect { public let associationConfigurations: [AssociationConfiguration]? /// The AI Prompt identifier for the Intent Labeling prompt used by the ANSWER_RECOMMENDATION AI Agent. public let intentLabelingGenerationAIPromptId: String? + /// The locale to which specifies the language and region settings that determine the response language for QueryAssistant. Changing this locale to anything other than en_US will turn off recommendations triggered by contact transcripts for agent assistance, as this feature is not supported in multiple languages. + public let locale: String? /// The AI Prompt identifier for the Query Reformulation prompt used by the ANSWER_RECOMMENDATION AI Agent. public let queryReformulationAIPromptId: String? @inlinable - public init(answerGenerationAIGuardrailId: String? = nil, answerGenerationAIPromptId: String? = nil, associationConfigurations: [AssociationConfiguration]? = nil, intentLabelingGenerationAIPromptId: String? = nil, queryReformulationAIPromptId: String? = nil) { + public init(answerGenerationAIGuardrailId: String? = nil, answerGenerationAIPromptId: String? = nil, associationConfigurations: [AssociationConfiguration]? = nil, intentLabelingGenerationAIPromptId: String? = nil, locale: String? = nil, queryReformulationAIPromptId: String? = nil) { self.answerGenerationAIGuardrailId = answerGenerationAIGuardrailId self.answerGenerationAIPromptId = answerGenerationAIPromptId self.associationConfigurations = associationConfigurations self.intentLabelingGenerationAIPromptId = intentLabelingGenerationAIPromptId + self.locale = locale self.queryReformulationAIPromptId = queryReformulationAIPromptId } @@ -1537,6 +1540,8 @@ extension QConnect { try $0.validate(name: "\(name).associationConfigurations[]") } try self.validate(self.intentLabelingGenerationAIPromptId, name: "intentLabelingGenerationAIPromptId", parent: name, pattern: "^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}(:[A-Z0-9_$]+){0,1}$") + try self.validate(self.locale, name: "locale", parent: name, max: 4096) + try self.validate(self.locale, name: "locale", parent: name, min: 1) try self.validate(self.queryReformulationAIPromptId, name: "queryReformulationAIPromptId", parent: name, pattern: "^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}(:[A-Z0-9_$]+){0,1}$") } @@ -1545,6 +1550,7 @@ extension QConnect { case answerGenerationAIPromptId = "answerGenerationAIPromptId" case associationConfigurations = "associationConfigurations" case intentLabelingGenerationAIPromptId = "intentLabelingGenerationAIPromptId" + case locale = "locale" case queryReformulationAIPromptId = "queryReformulationAIPromptId" } } @@ -5508,7 +5514,7 @@ extension QConnect { public struct GuardrailPiiEntityConfig: AWSEncodableShape & AWSDecodableShape { /// Configure AI Guardrail's action when the PII entity is detected. public let action: GuardrailSensitiveInformationAction - /// Configure AI Guardrail type when the PII entity is detected. The following PIIs are used to block or mask sensitive information: General ADDRESS A physical address, such as "100 Main Street, Anytown, USA" or "Suite #12, Building 123". An address can include information such as the street, building, location, city, state, country, county, zip code, precinct, and neighborhood. AGE An individual's age, including the quantity and unit of time. For example, in the phrase "I am 40 years old," Guarrails recognizes "40 years" as an age. NAME An individual's name. This entity type does not include titles, such as Dr., Mr., Mrs., or Miss. AI Guardrail doesn't apply this entity type to names that are part of organizations or addresses. For example, AI Guardrail recognizes the "John Doe Organization" as an organization, and it recognizes "Jane Doe Street" as an address. EMAIL An email address, such as marymajor@email.com. PHONE A phone number. This entity type also includes fax and pager numbers. USERNAME A user name that identifies an account, such as a login name, screen name, nick name, or handle. PASSWORD An alphanumeric string that is used as a password, such as "* very20special#pass*". DRIVER_ID The number assigned to a driver's license, which is an official document permitting an individual to operate one or more motorized vehicles on a public road. A driver's license number consists of alphanumeric characters. LICENSE_PLATE A license plate for a vehicle is issued by the state or country where the vehicle is registered. The format for passenger vehicles is typically five to eight digits, consisting of upper-case letters and numbers. The format varies depending on the location of the issuing state or country. VEHICLE_IDENTIFICATION_NUMBER A Vehicle Identification Number (VIN) uniquely identifies a vehicle. VIN content and format are defined in the ISO 3779 specification. Each country has specific codes and formats for VINs. Finance REDIT_DEBIT_CARD_CVV A three-digit card verification code (CVV) that is present on VISA, MasterCard, and Discover credit and debit cards. For American Express credit or debit cards, the CVV is a four-digit numeric code. CREDIT_DEBIT_CARD_EXPIRY The expiration date for a credit or debit card. This number is usually four digits long and is often formatted as month/year or MM/YY. AI Guardrail recognizes expiration dates such as 01/21, 01/2021, and Jan 2021. CREDIT_DEBIT_CARD_NUMBER The number for a credit or debit card. These numbers can vary from 13 to 16 digits in length. However, Amazon Comprehend also recognizes credit or debit card numbers when only the last four digits are present. PIN A four-digit personal identification number (PIN) with which you can access your bank account. INTERNATIONAL_BANK_ACCOUNT_NUMBER An International Bank Account Number has specific formats in each country. For more information, see www.iban.com/structure. SWIFT_CODE A SWIFT code is a standard format of Bank Identifier Code (BIC) used to specify a particular bank or branch. Banks use these codes for money transfers such as international wire transfers. SWIFT codes consist of eight or 11 characters. The 11-digit codes refer to specific branches, while eight-digit codes (or 11-digit codes ending in 'XXX') refer to the head or primary office. IT IP_ADDRESS An IPv4 address, such as 198.51.100.0. MAC_ADDRESS A media access control (MAC) address is a unique identifier assigned to a network interface controller (NIC). URL A web address, such as www.example.com. AWS_ACCESS_KEY A unique identifier that's associated with a secret access key; you use the access key ID and secret access key to sign programmatic Amazon Web Services requests cryptographically. AWS_SECRET_KEY A unique identifier that's associated with an access key. You use the access key ID and secret access key to sign programmatic Amazon Web Services requests cryptographically. USA specific US_BANK_ACCOUNT_NUMBER A US bank account number, which is typically 10 to 12 digits long. US_BANK_ROUTING_NUMBER A US bank account routing number. These are typically nine digits long, US_INDIVIDUAL_TAX_IDENTIFICATION_NUMBER A US Individual Taxpayer Identification Number (ITIN) is a nine-digit number that starts with a "9" and contain a "7" or "8" as the fourth digit. An ITIN can be formatted with a space or a dash after the third and forth digits. US_PASSPORT_NUMBER A US passport number. Passport numbers range from six to nine alphanumeric characters. US_SOCIAL_SECURITY_NUMBER A US Social Security Number (SSN) is a nine-digit number that is issued to US citizens, permanent residents, and temporary working residents. Canada specific CA_HEALTH_NUMBER A Canadian Health Service Number is a 10-digit unique identifier, required for individuals to access healthcare benefits. CA_SOCIAL_INSURANCE_NUMBER A Canadian Social Insurance Number (SIN) is a nine-digit unique identifier, required for individuals to access government programs and benefits. The SIN is formatted as three groups of three digits, such as 123-456-789. A SIN can be validated through a simple check-digit process called the Luhn algorithm . UK Specific UK_NATIONAL_HEALTH_SERVICE_NUMBER A UK National Health Service Number is a 10-17 digit number, such as 485 555 3456. The current system formats the 10-digit number with spaces after the third and sixth digits. The final digit is an error-detecting checksum. UK_NATIONAL_INSURANCE_NUMBER A UK National Insurance Number (NINO) provides individuals with access to National Insurance (social security) benefits. It is also used for some purposes in the UK tax system. The number is nine digits long and starts with two letters, followed by six numbers and one letter. A NINO can be formatted with a space or a dash after the two letters and after the second, forth, and sixth digits. UK_UNIQUE_TAXPAYER_REFERENCE_NUMBER A UK Unique Taxpayer Reference (UTR) is a 10-digit number that identifies a taxpayer or a business. Custom Regex filter - You can use a regular expressions to define patterns for an AI Guardrail to recognize and act upon such as serial number, booking ID etc.. + /// Configure AI Guardrail type when the PII entity is detected. The following PIIs are used to block or mask sensitive information: General ADDRESS A physical address, such as "100 Main Street, Anytown, USA" or "Suite #12, Building 123". An address can include information such as the street, building, location, city, state, country, county, zip code, precinct, and neighborhood. AGE An individual's age, including the quantity and unit of time. For example, in the phrase "I am 40 years old," Guarrails recognizes "40 years" as an age. NAME An individual's name. This entity type does not include titles, such as Dr., Mr., Mrs., or Miss. AI Guardrail doesn't apply this entity type to names that are part of organizations or addresses. For example, AI Guardrail recognizes the "John Doe Organization" as an organization, and it recognizes "Jane Doe Street" as an address. EMAIL An email address, such as marymajor@email.com. PHONE A phone number. This entity type also includes fax and pager numbers. USERNAME A user name that identifies an account, such as a login name, screen name, nick name, or handle. PASSWORD An alphanumeric string that is used as a password, such as "* very20special#pass*". DRIVER_ID The number assigned to a driver's license, which is an official document permitting an individual to operate one or more motorized vehicles on a public road. A driver's license number consists of alphanumeric characters. LICENSE_PLATE A license plate for a vehicle is issued by the state or country where the vehicle is registered. The format for passenger vehicles is typically five to eight digits, consisting of upper-case letters and numbers. The format varies depending on the location of the issuing state or country. VEHICLE_IDENTIFICATION_NUMBER A Vehicle Identification Number (VIN) uniquely identifies a vehicle. VIN content and format are defined in the ISO 3779 specification. Each country has specific codes and formats for VINs. Finance CREDIT_DEBIT_CARD_CVV A three-digit card verification code (CVV) that is present on VISA, MasterCard, and Discover credit and debit cards. For American Express credit or debit cards, the CVV is a four-digit numeric code. CREDIT_DEBIT_CARD_EXPIRY The expiration date for a credit or debit card. This number is usually four digits long and is often formatted as month/year or MM/YY. AI Guardrail recognizes expiration dates such as 01/21, 01/2021, and Jan 2021. CREDIT_DEBIT_CARD_NUMBER The number for a credit or debit card. These numbers can vary from 13 to 16 digits in length. However, Amazon Comprehend also recognizes credit or debit card numbers when only the last four digits are present. PIN A four-digit personal identification number (PIN) with which you can access your bank account. INTERNATIONAL_BANK_ACCOUNT_NUMBER An International Bank Account Number has specific formats in each country. For more information, see www.iban.com/structure. SWIFT_CODE A SWIFT code is a standard format of Bank Identifier Code (BIC) used to specify a particular bank or branch. Banks use these codes for money transfers such as international wire transfers. SWIFT codes consist of eight or 11 characters. The 11-digit codes refer to specific branches, while eight-digit codes (or 11-digit codes ending in 'XXX') refer to the head or primary office. IT IP_ADDRESS An IPv4 address, such as 198.51.100.0. MAC_ADDRESS A media access control (MAC) address is a unique identifier assigned to a network interface controller (NIC). URL A web address, such as www.example.com. AWS_ACCESS_KEY A unique identifier that's associated with a secret access key; you use the access key ID and secret access key to sign programmatic Amazon Web Services requests cryptographically. AWS_SECRET_KEY A unique identifier that's associated with an access key. You use the access key ID and secret access key to sign programmatic Amazon Web Services requests cryptographically. USA specific US_BANK_ACCOUNT_NUMBER A US bank account number, which is typically 10 to 12 digits long. US_BANK_ROUTING_NUMBER A US bank account routing number. These are typically nine digits long, US_INDIVIDUAL_TAX_IDENTIFICATION_NUMBER A US Individual Taxpayer Identification Number (ITIN) is a nine-digit number that starts with a "9" and contain a "7" or "8" as the fourth digit. An ITIN can be formatted with a space or a dash after the third and forth digits. US_PASSPORT_NUMBER A US passport number. Passport numbers range from six to nine alphanumeric characters. US_SOCIAL_SECURITY_NUMBER A US Social Security Number (SSN) is a nine-digit number that is issued to US citizens, permanent residents, and temporary working residents. Canada specific CA_HEALTH_NUMBER A Canadian Health Service Number is a 10-digit unique identifier, required for individuals to access healthcare benefits. CA_SOCIAL_INSURANCE_NUMBER A Canadian Social Insurance Number (SIN) is a nine-digit unique identifier, required for individuals to access government programs and benefits. The SIN is formatted as three groups of three digits, such as 123-456-789. A SIN can be validated through a simple check-digit process called the Luhn algorithm . UK Specific UK_NATIONAL_HEALTH_SERVICE_NUMBER A UK National Health Service Number is a 10-17 digit number, such as 485 555 3456. The current system formats the 10-digit number with spaces after the third and sixth digits. The final digit is an error-detecting checksum. UK_NATIONAL_INSURANCE_NUMBER A UK National Insurance Number (NINO) provides individuals with access to National Insurance (social security) benefits. It is also used for some purposes in the UK tax system. The number is nine digits long and starts with two letters, followed by six numbers and one letter. A NINO can be formatted with a space or a dash after the two letters and after the second, forth, and sixth digits. UK_UNIQUE_TAXPAYER_REFERENCE_NUMBER A UK Unique Taxpayer Reference (UTR) is a 10-digit number that identifies a taxpayer or a business. Custom Regex filter - You can use a regular expressions to define patterns for an AI Guardrail to recognize and act upon such as serial number, booking ID etc.. public let type: GuardrailPiiEntityType @inlinable @@ -6902,12 +6908,15 @@ extension QConnect { public let answerGenerationAIPromptId: String? /// The association configurations for overriding behavior on this AI Agent. public let associationConfigurations: [AssociationConfiguration]? + /// The locale to which specifies the language and region settings that determine the response language for QueryAssistant. + public let locale: String? @inlinable - public init(answerGenerationAIGuardrailId: String? = nil, answerGenerationAIPromptId: String? = nil, associationConfigurations: [AssociationConfiguration]? = nil) { + public init(answerGenerationAIGuardrailId: String? = nil, answerGenerationAIPromptId: String? = nil, associationConfigurations: [AssociationConfiguration]? = nil, locale: String? = nil) { self.answerGenerationAIGuardrailId = answerGenerationAIGuardrailId self.answerGenerationAIPromptId = answerGenerationAIPromptId self.associationConfigurations = associationConfigurations + self.locale = locale } public func validate(name: String) throws { @@ -6916,12 +6925,15 @@ extension QConnect { try self.associationConfigurations?.forEach { try $0.validate(name: "\(name).associationConfigurations[]") } + try self.validate(self.locale, name: "locale", parent: name, max: 4096) + try self.validate(self.locale, name: "locale", parent: name, min: 1) } private enum CodingKeys: String, CodingKey { case answerGenerationAIGuardrailId = "answerGenerationAIGuardrailId" case answerGenerationAIPromptId = "answerGenerationAIPromptId" case associationConfigurations = "associationConfigurations" + case locale = "locale" } } diff --git a/Sources/Soto/Services/QuickSight/QuickSight_api.swift b/Sources/Soto/Services/QuickSight/QuickSight_api.swift index 77736a6292..f690608b3e 100644 --- a/Sources/Soto/Services/QuickSight/QuickSight_api.swift +++ b/Sources/Soto/Services/QuickSight/QuickSight_api.swift @@ -538,6 +538,7 @@ public struct QuickSight: AWSService { /// - importMode: Indicates whether you want to import the data into SPICE. /// - logicalTableMap: Configures the combination and transformation of the data from the physical tables. /// - name: The display name for the dataset. + /// - performanceConfiguration: The configuration for the performance optimization of the dataset that contains a UniqueKey configuration. /// - permissions: A list of resource permissions on the dataset. /// - physicalTableMap: Declares the physical tables that are available in the underlying data sources. /// - rowLevelPermissionDataSet: The row-level security configuration for the data that you want to create. @@ -557,6 +558,7 @@ public struct QuickSight: AWSService { importMode: DataSetImportMode, logicalTableMap: [String: LogicalTable]? = nil, name: String, + performanceConfiguration: PerformanceConfiguration? = nil, permissions: [ResourcePermission]? = nil, physicalTableMap: [String: PhysicalTable], rowLevelPermissionDataSet: RowLevelPermissionDataSet? = nil, @@ -576,6 +578,7 @@ public struct QuickSight: AWSService { importMode: importMode, logicalTableMap: logicalTableMap, name: name, + performanceConfiguration: performanceConfiguration, permissions: permissions, physicalTableMap: physicalTableMap, rowLevelPermissionDataSet: rowLevelPermissionDataSet, @@ -6872,6 +6875,7 @@ public struct QuickSight: AWSService { /// - importMode: Indicates whether you want to import the data into SPICE. /// - logicalTableMap: Configures the combination and transformation of the data from the physical tables. /// - name: The display name for the dataset. + /// - performanceConfiguration: The configuration for the performance optimization of the dataset that contains a UniqueKey configuration. /// - physicalTableMap: Declares the physical tables that are available in the underlying data sources. /// - rowLevelPermissionDataSet: The row-level security configuration for the data you want to create. /// - rowLevelPermissionTagConfiguration: The configuration of tags on a dataset to set row-level security. Row-level security tags are currently supported for anonymous embedding only. @@ -6888,6 +6892,7 @@ public struct QuickSight: AWSService { importMode: DataSetImportMode, logicalTableMap: [String: LogicalTable]? = nil, name: String, + performanceConfiguration: PerformanceConfiguration? = nil, physicalTableMap: [String: PhysicalTable], rowLevelPermissionDataSet: RowLevelPermissionDataSet? = nil, rowLevelPermissionTagConfiguration: RowLevelPermissionTagConfiguration? = nil, @@ -6904,6 +6909,7 @@ public struct QuickSight: AWSService { importMode: importMode, logicalTableMap: logicalTableMap, name: name, + performanceConfiguration: performanceConfiguration, physicalTableMap: physicalTableMap, rowLevelPermissionDataSet: rowLevelPermissionDataSet, rowLevelPermissionTagConfiguration: rowLevelPermissionTagConfiguration diff --git a/Sources/Soto/Services/QuickSight/QuickSight_shapes.swift b/Sources/Soto/Services/QuickSight/QuickSight_shapes.swift index 5887e17f88..6b3cb2c3cc 100644 --- a/Sources/Soto/Services/QuickSight/QuickSight_shapes.swift +++ b/Sources/Soto/Services/QuickSight/QuickSight_shapes.swift @@ -8636,6 +8636,8 @@ extension QuickSight { public let logicalTableMap: [String: LogicalTable]? /// The display name for the dataset. public let name: String + /// The configuration for the performance optimization of the dataset that contains a UniqueKey configuration. + public let performanceConfiguration: PerformanceConfiguration? /// A list of resource permissions on the dataset. public let permissions: [ResourcePermission]? /// Declares the physical tables that are available in the underlying data sources. @@ -8648,7 +8650,7 @@ extension QuickSight { public let tags: [Tag]? @inlinable - public init(awsAccountId: String, columnGroups: [ColumnGroup]? = nil, columnLevelPermissionRules: [ColumnLevelPermissionRule]? = nil, dataSetId: String, datasetParameters: [DatasetParameter]? = nil, dataSetUsageConfiguration: DataSetUsageConfiguration? = nil, fieldFolders: [String: FieldFolder]? = nil, folderArns: [String]? = nil, importMode: DataSetImportMode, logicalTableMap: [String: LogicalTable]? = nil, name: String, permissions: [ResourcePermission]? = nil, physicalTableMap: [String: PhysicalTable], rowLevelPermissionDataSet: RowLevelPermissionDataSet? = nil, rowLevelPermissionTagConfiguration: RowLevelPermissionTagConfiguration? = nil, tags: [Tag]? = nil) { + public init(awsAccountId: String, columnGroups: [ColumnGroup]? = nil, columnLevelPermissionRules: [ColumnLevelPermissionRule]? = nil, dataSetId: String, datasetParameters: [DatasetParameter]? = nil, dataSetUsageConfiguration: DataSetUsageConfiguration? = nil, fieldFolders: [String: FieldFolder]? = nil, folderArns: [String]? = nil, importMode: DataSetImportMode, logicalTableMap: [String: LogicalTable]? = nil, name: String, performanceConfiguration: PerformanceConfiguration? = nil, permissions: [ResourcePermission]? = nil, physicalTableMap: [String: PhysicalTable], rowLevelPermissionDataSet: RowLevelPermissionDataSet? = nil, rowLevelPermissionTagConfiguration: RowLevelPermissionTagConfiguration? = nil, tags: [Tag]? = nil) { self.awsAccountId = awsAccountId self.columnGroups = columnGroups self.columnLevelPermissionRules = columnLevelPermissionRules @@ -8660,6 +8662,7 @@ extension QuickSight { self.importMode = importMode self.logicalTableMap = logicalTableMap self.name = name + self.performanceConfiguration = performanceConfiguration self.permissions = permissions self.physicalTableMap = physicalTableMap self.rowLevelPermissionDataSet = rowLevelPermissionDataSet @@ -8681,6 +8684,7 @@ extension QuickSight { try container.encode(self.importMode, forKey: .importMode) try container.encodeIfPresent(self.logicalTableMap, forKey: .logicalTableMap) try container.encode(self.name, forKey: .name) + try container.encodeIfPresent(self.performanceConfiguration, forKey: .performanceConfiguration) try container.encodeIfPresent(self.permissions, forKey: .permissions) try container.encode(self.physicalTableMap, forKey: .physicalTableMap) try container.encodeIfPresent(self.rowLevelPermissionDataSet, forKey: .rowLevelPermissionDataSet) @@ -8722,6 +8726,7 @@ extension QuickSight { try self.validate(self.logicalTableMap, name: "logicalTableMap", parent: name, min: 1) try self.validate(self.name, name: "name", parent: name, max: 128) try self.validate(self.name, name: "name", parent: name, min: 1) + try self.performanceConfiguration?.validate(name: "\(name).performanceConfiguration") try self.permissions?.forEach { try $0.validate(name: "\(name).permissions[]") } @@ -8754,6 +8759,7 @@ extension QuickSight { case importMode = "ImportMode" case logicalTableMap = "LogicalTableMap" case name = "Name" + case performanceConfiguration = "PerformanceConfiguration" case permissions = "Permissions" case physicalTableMap = "PhysicalTableMap" case rowLevelPermissionDataSet = "RowLevelPermissionDataSet" @@ -11878,6 +11884,8 @@ extension QuickSight { public let name: String? /// The list of columns after all transforms. These columns are available in templates, analyses, and dashboards. public let outputColumns: [OutputColumn]? + /// The performance optimization configuration of a dataset. + public let performanceConfiguration: PerformanceConfiguration? /// Declares the physical tables that are available in the underlying data sources. public let physicalTableMap: [String: PhysicalTable]? /// The row-level security configuration for the dataset. @@ -11886,7 +11894,7 @@ extension QuickSight { public let rowLevelPermissionTagConfiguration: RowLevelPermissionTagConfiguration? @inlinable - public init(arn: String? = nil, columnGroups: [ColumnGroup]? = nil, columnLevelPermissionRules: [ColumnLevelPermissionRule]? = nil, consumedSpiceCapacityInBytes: Int64? = nil, createdTime: Date? = nil, dataSetId: String? = nil, datasetParameters: [DatasetParameter]? = nil, dataSetUsageConfiguration: DataSetUsageConfiguration? = nil, fieldFolders: [String: FieldFolder]? = nil, importMode: DataSetImportMode? = nil, lastUpdatedTime: Date? = nil, logicalTableMap: [String: LogicalTable]? = nil, name: String? = nil, outputColumns: [OutputColumn]? = nil, physicalTableMap: [String: PhysicalTable]? = nil, rowLevelPermissionDataSet: RowLevelPermissionDataSet? = nil, rowLevelPermissionTagConfiguration: RowLevelPermissionTagConfiguration? = nil) { + public init(arn: String? = nil, columnGroups: [ColumnGroup]? = nil, columnLevelPermissionRules: [ColumnLevelPermissionRule]? = nil, consumedSpiceCapacityInBytes: Int64? = nil, createdTime: Date? = nil, dataSetId: String? = nil, datasetParameters: [DatasetParameter]? = nil, dataSetUsageConfiguration: DataSetUsageConfiguration? = nil, fieldFolders: [String: FieldFolder]? = nil, importMode: DataSetImportMode? = nil, lastUpdatedTime: Date? = nil, logicalTableMap: [String: LogicalTable]? = nil, name: String? = nil, outputColumns: [OutputColumn]? = nil, performanceConfiguration: PerformanceConfiguration? = nil, physicalTableMap: [String: PhysicalTable]? = nil, rowLevelPermissionDataSet: RowLevelPermissionDataSet? = nil, rowLevelPermissionTagConfiguration: RowLevelPermissionTagConfiguration? = nil) { self.arn = arn self.columnGroups = columnGroups self.columnLevelPermissionRules = columnLevelPermissionRules @@ -11901,6 +11909,7 @@ extension QuickSight { self.logicalTableMap = logicalTableMap self.name = name self.outputColumns = outputColumns + self.performanceConfiguration = performanceConfiguration self.physicalTableMap = physicalTableMap self.rowLevelPermissionDataSet = rowLevelPermissionDataSet self.rowLevelPermissionTagConfiguration = rowLevelPermissionTagConfiguration @@ -11921,6 +11930,7 @@ extension QuickSight { case logicalTableMap = "LogicalTableMap" case name = "Name" case outputColumns = "OutputColumns" + case performanceConfiguration = "PerformanceConfiguration" case physicalTableMap = "PhysicalTableMap" case rowLevelPermissionDataSet = "RowLevelPermissionDataSet" case rowLevelPermissionTagConfiguration = "RowLevelPermissionTagConfiguration" @@ -29876,6 +29886,28 @@ extension QuickSight { } } + public struct PerformanceConfiguration: AWSEncodableShape & AWSDecodableShape { + /// A UniqueKey configuration. + public let uniqueKeys: [UniqueKey]? + + @inlinable + public init(uniqueKeys: [UniqueKey]? = nil) { + self.uniqueKeys = uniqueKeys + } + + public func validate(name: String) throws { + try self.uniqueKeys?.forEach { + try $0.validate(name: "\(name).uniqueKeys[]") + } + try self.validate(self.uniqueKeys, name: "uniqueKeys", parent: name, max: 1) + try self.validate(self.uniqueKeys, name: "uniqueKeys", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case uniqueKeys = "UniqueKeys" + } + } + public struct PeriodOverPeriodComputation: AWSEncodableShape & AWSDecodableShape { /// The ID for a computation. public let computationId: String @@ -39909,6 +39941,29 @@ extension QuickSight { } } + public struct UniqueKey: AWSEncodableShape & AWSDecodableShape { + /// The name of the column that is referenced in the UniqueKey configuration. + public let columnNames: [String] + + @inlinable + public init(columnNames: [String]) { + self.columnNames = columnNames + } + + public func validate(name: String) throws { + try self.columnNames.forEach { + try validate($0, name: "columnNames[]", parent: name, max: 128) + try validate($0, name: "columnNames[]", parent: name, min: 1) + } + try self.validate(self.columnNames, name: "columnNames", parent: name, max: 1) + try self.validate(self.columnNames, name: "columnNames", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case columnNames = "ColumnNames" + } + } + public struct UniqueValuesComputation: AWSEncodableShape & AWSDecodableShape { /// The category field that is used in a computation. public let category: DimensionField? @@ -41173,6 +41228,8 @@ extension QuickSight { public let logicalTableMap: [String: LogicalTable]? /// The display name for the dataset. public let name: String + /// The configuration for the performance optimization of the dataset that contains a UniqueKey configuration. + public let performanceConfiguration: PerformanceConfiguration? /// Declares the physical tables that are available in the underlying data sources. public let physicalTableMap: [String: PhysicalTable] /// The row-level security configuration for the data you want to create. @@ -41181,7 +41238,7 @@ extension QuickSight { public let rowLevelPermissionTagConfiguration: RowLevelPermissionTagConfiguration? @inlinable - public init(awsAccountId: String, columnGroups: [ColumnGroup]? = nil, columnLevelPermissionRules: [ColumnLevelPermissionRule]? = nil, dataSetId: String, datasetParameters: [DatasetParameter]? = nil, dataSetUsageConfiguration: DataSetUsageConfiguration? = nil, fieldFolders: [String: FieldFolder]? = nil, importMode: DataSetImportMode, logicalTableMap: [String: LogicalTable]? = nil, name: String, physicalTableMap: [String: PhysicalTable], rowLevelPermissionDataSet: RowLevelPermissionDataSet? = nil, rowLevelPermissionTagConfiguration: RowLevelPermissionTagConfiguration? = nil) { + public init(awsAccountId: String, columnGroups: [ColumnGroup]? = nil, columnLevelPermissionRules: [ColumnLevelPermissionRule]? = nil, dataSetId: String, datasetParameters: [DatasetParameter]? = nil, dataSetUsageConfiguration: DataSetUsageConfiguration? = nil, fieldFolders: [String: FieldFolder]? = nil, importMode: DataSetImportMode, logicalTableMap: [String: LogicalTable]? = nil, name: String, performanceConfiguration: PerformanceConfiguration? = nil, physicalTableMap: [String: PhysicalTable], rowLevelPermissionDataSet: RowLevelPermissionDataSet? = nil, rowLevelPermissionTagConfiguration: RowLevelPermissionTagConfiguration? = nil) { self.awsAccountId = awsAccountId self.columnGroups = columnGroups self.columnLevelPermissionRules = columnLevelPermissionRules @@ -41192,6 +41249,7 @@ extension QuickSight { self.importMode = importMode self.logicalTableMap = logicalTableMap self.name = name + self.performanceConfiguration = performanceConfiguration self.physicalTableMap = physicalTableMap self.rowLevelPermissionDataSet = rowLevelPermissionDataSet self.rowLevelPermissionTagConfiguration = rowLevelPermissionTagConfiguration @@ -41210,6 +41268,7 @@ extension QuickSight { try container.encode(self.importMode, forKey: .importMode) try container.encodeIfPresent(self.logicalTableMap, forKey: .logicalTableMap) try container.encode(self.name, forKey: .name) + try container.encodeIfPresent(self.performanceConfiguration, forKey: .performanceConfiguration) try container.encode(self.physicalTableMap, forKey: .physicalTableMap) try container.encodeIfPresent(self.rowLevelPermissionDataSet, forKey: .rowLevelPermissionDataSet) try container.encodeIfPresent(self.rowLevelPermissionTagConfiguration, forKey: .rowLevelPermissionTagConfiguration) @@ -41248,6 +41307,7 @@ extension QuickSight { try self.validate(self.logicalTableMap, name: "logicalTableMap", parent: name, min: 1) try self.validate(self.name, name: "name", parent: name, max: 128) try self.validate(self.name, name: "name", parent: name, min: 1) + try self.performanceConfiguration?.validate(name: "\(name).performanceConfiguration") try self.physicalTableMap.forEach { try validate($0.key, name: "physicalTableMap.key", parent: name, max: 64) try validate($0.key, name: "physicalTableMap.key", parent: name, min: 1) @@ -41268,6 +41328,7 @@ extension QuickSight { case importMode = "ImportMode" case logicalTableMap = "LogicalTableMap" case name = "Name" + case performanceConfiguration = "PerformanceConfiguration" case physicalTableMap = "PhysicalTableMap" case rowLevelPermissionDataSet = "RowLevelPermissionDataSet" case rowLevelPermissionTagConfiguration = "RowLevelPermissionTagConfiguration" diff --git a/Sources/Soto/Services/RDS/RDS_api.swift b/Sources/Soto/Services/RDS/RDS_api.swift index b8b9a73abc..dfa395bf1a 100644 --- a/Sources/Soto/Services/RDS/RDS_api.swift +++ b/Sources/Soto/Services/RDS/RDS_api.swift @@ -723,7 +723,7 @@ public struct RDS: AWSService { /// /// Parameters: /// - allocatedStorage: The amount of storage in gibibytes (GiB) to allocate to each DB instance in the Multi-AZ DB cluster. Valid for Cluster Type: Multi-AZ DB clusters only This setting is required to create a Multi-AZ DB cluster. - /// - autoMinorVersionUpgrade: Specifies whether minor engine upgrades are applied automatically to the DB cluster during the maintenance window. By default, minor engine upgrades are applied automatically. Valid for Cluster Type: Multi-AZ DB clusters only + /// - autoMinorVersionUpgrade: Specifies whether minor engine upgrades are applied automatically to the DB cluster during the maintenance window. By default, minor engine upgrades are applied automatically. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB cluster /// - availabilityZones: A list of Availability Zones (AZs) where you specifically want to create DB instances in the DB cluster. For information on AZs, see Availability Zones in the Amazon Aurora User Guide. Valid for Cluster Type: Aurora DB clusters only Constraints: Can't specify more than three AZs. /// - backtrackWindow: The target backtrack window, in seconds. To disable backtracking, set this value to 0. Valid for Cluster Type: Aurora MySQL DB clusters only Default: 0 Constraints: If specified, this value must be set to a number from 0 to 259,200 (72 hours). /// - backupRetentionPeriod: The number of days for which automated backups are retained. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters Default: 1 Constraints: Must be a value from 1 to 35. @@ -731,7 +731,7 @@ public struct RDS: AWSService { /// - characterSetName: The name of the character set (CharacterSet) to associate the DB cluster with. Valid for Cluster Type: Aurora DB clusters only /// - clusterScalabilityType: Specifies the scalability mode of the Aurora DB cluster. When set to limitless, the cluster operates as an Aurora Limitless Database. When set to standard (the default), the cluster uses normal DB instance creation. Valid for: Aurora DB clusters only You can't modify this setting after you create the DB cluster. /// - copyTagsToSnapshot: Specifies whether to copy all tags from the DB cluster to snapshots of the DB cluster. The default is not to copy them. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters - /// - databaseInsightsMode: Specifies the mode of Database Insights to enable for the cluster. + /// - databaseInsightsMode: The mode of Database Insights to enable for the DB cluster. If you set this value to advanced, you must also set the PerformanceInsightsEnabled parameter to true and the PerformanceInsightsRetentionPeriod parameter to 465. Valid for Cluster Type: Aurora DB clusters only /// - databaseName: The name for your database of up to 64 alphanumeric characters. A database named postgres is always created. If this parameter is specified, an additional database with this name is created. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters /// - dbClusterIdentifier: The identifier for this DB cluster. This parameter is stored as a lowercase string. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters Constraints: Must contain from 1 to 63 (for Aurora DB clusters) or 1 to 52 (for Multi-AZ DB clusters) letters, numbers, or hyphens. First character must be a letter. Can't end with a hyphen or contain two consecutive hyphens. Example: my-cluster1 /// - dbClusterInstanceClass: The compute and memory capacity of each DB instance in the Multi-AZ DB cluster, for example db.m6gd.xlarge. Not all DB instance classes are available in all Amazon Web Services Regions, or for all database engines. For the full list of DB instance classes and availability for your engine, see DB instance class in the Amazon RDS User Guide. This setting is required to create a Multi-AZ DB cluster. Valid for Cluster Type: Multi-AZ DB clusters only @@ -747,7 +747,7 @@ public struct RDS: AWSService { /// - enableIAMDatabaseAuthentication: Specifies whether to enable mapping of Amazon Web Services Identity and Access Management (IAM) accounts to database accounts. By default, mapping isn't enabled. For more information, see IAM Database Authentication in the Amazon Aurora User Guide or IAM database authentication for MariaDB, MySQL, and PostgreSQL in the Amazon RDS User Guide. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters /// - enableLimitlessDatabase: Specifies whether to enable Aurora Limitless Database. You must enable Aurora Limitless Database to create a DB shard group. Valid for: Aurora DB clusters only This setting is no longer used. Instead use the ClusterScalabilityType setting. /// - enableLocalWriteForwarding: Specifies whether read replicas can forward write operations to the writer DB instance in the DB cluster. By default, write operations aren't allowed on reader DB instances. Valid for: Aurora DB clusters only - /// - enablePerformanceInsights: Specifies whether to turn on Performance Insights for the DB cluster. For more information, see Using Amazon Performance Insights in the Amazon RDS User Guide. Valid for Cluster Type: Multi-AZ DB clusters only + /// - enablePerformanceInsights: Specifies whether to turn on Performance Insights for the DB cluster. For more information, see Using Amazon Performance Insights in the Amazon RDS User Guide. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters /// - engine: The database engine to use for this DB cluster. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters Valid Values: aurora-mysql aurora-postgresql mysql postgres neptune - For information about using Amazon Neptune, see the Amazon Neptune User Guide . /// - engineLifecycleSupport: The life cycle type for this DB cluster. By default, this value is set to open-source-rds-extended-support, which enrolls your DB cluster into Amazon RDS Extended Support. At the end of standard support, you can avoid charges for Extended Support by setting the value to open-source-rds-extended-support-disabled. In this case, creating the DB cluster will fail if the DB major version is past its end of standard support date. You can use this setting to enroll your DB cluster into Amazon RDS Extended Support. With RDS Extended Support, you can run the selected major engine version on your DB cluster past the end of standard support for that engine version. For more information, see the following sections: Amazon Aurora - Using Amazon RDS Extended Support in the Amazon Aurora User Guide Amazon RDS - Using Amazon RDS Extended Support in the Amazon RDS User Guide Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters Valid Values: open-source-rds-extended-support | open-source-rds-extended-support-disabled Default: open-source-rds-extended-support /// - engineMode: The DB engine mode of the DB cluster, either provisioned or serverless. The serverless engine mode only applies for Aurora Serverless v1 DB clusters. Aurora Serverless v2 DB clusters use the provisioned engine mode. For information about limitations and requirements for Serverless DB clusters, see the following sections in the Amazon Aurora User Guide: Limitations of Aurora Serverless v1 Requirements for Aurora Serverless v2 Valid for Cluster Type: Aurora DB clusters only @@ -759,12 +759,12 @@ public struct RDS: AWSService { /// - masterUsername: The name of the master user for the DB cluster. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters Constraints: Must be 1 to 16 letters or numbers. First character must be a letter. Can't be a reserved word for the chosen database engine. /// - masterUserPassword: The password for the master database user. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters Constraints: Must contain from 8 to 41 characters. Can contain any printable ASCII character except "/", """, or "@". Can't be specified if ManageMasterUserPassword is turned on. /// - masterUserSecretKmsKeyId: The Amazon Web Services KMS key identifier to encrypt a secret that is automatically generated and managed in Amazon Web Services Secrets Manager. This setting is valid only if the master user password is managed by RDS in Amazon Web Services Secrets Manager for the DB cluster. The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key. To use a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN. If you don't specify MasterUserSecretKmsKeyId, then the aws/secretsmanager KMS key is used to encrypt the secret. If the secret is in a different Amazon Web Services account, then you can't use the aws/secretsmanager KMS key to encrypt the secret, and you must use a customer managed KMS key. There is a default KMS key for your Amazon Web Services account. Your Amazon Web Services account has a different default KMS key for each Amazon Web Services Region. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters - /// - monitoringInterval: The interval, in seconds, between points when Enhanced Monitoring metrics are collected for the DB cluster. To turn off collecting Enhanced Monitoring metrics, specify 0. If MonitoringRoleArn is specified, also set MonitoringInterval to a value other than 0. Valid for Cluster Type: Multi-AZ DB clusters only Valid Values: 0 | 1 | 5 | 10 | 15 | 30 | 60 Default: 0 - /// - monitoringRoleArn: The Amazon Resource Name (ARN) for the IAM role that permits RDS to send Enhanced Monitoring metrics to Amazon CloudWatch Logs. An example is arn:aws:iam:123456789012:role/emaccess. For information on creating a monitoring role, see Setting up and enabling Enhanced Monitoring in the Amazon RDS User Guide. If MonitoringInterval is set to a value other than 0, supply a MonitoringRoleArn value. Valid for Cluster Type: Multi-AZ DB clusters only + /// - monitoringInterval: The interval, in seconds, between points when Enhanced Monitoring metrics are collected for the DB cluster. To turn off collecting Enhanced Monitoring metrics, specify 0. If MonitoringRoleArn is specified, also set MonitoringInterval to a value other than 0. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters Valid Values: 0 | 1 | 5 | 10 | 15 | 30 | 60 Default: 0 + /// - monitoringRoleArn: The Amazon Resource Name (ARN) for the IAM role that permits RDS to send Enhanced Monitoring metrics to Amazon CloudWatch Logs. An example is arn:aws:iam:123456789012:role/emaccess. For information on creating a monitoring role, see Setting up and enabling Enhanced Monitoring in the Amazon RDS User Guide. If MonitoringInterval is set to a value other than 0, supply a MonitoringRoleArn value. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters /// - networkType: The network type of the DB cluster. The network type is determined by the DBSubnetGroup specified for the DB cluster. A DBSubnetGroup can support only the IPv4 protocol or the IPv4 and the IPv6 protocols (DUAL). For more information, see Working with a DB instance in a VPC in the Amazon Aurora User Guide. Valid for Cluster Type: Aurora DB clusters only Valid Values: IPV4 | DUAL /// - optionGroupName: The option group to associate the DB cluster with. DB clusters are associated with a default option group that can't be modified. - /// - performanceInsightsKMSKeyId: The Amazon Web Services KMS key identifier for encryption of Performance Insights data. The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key. If you don't specify a value for PerformanceInsightsKMSKeyId, then Amazon RDS uses your default KMS key. There is a default KMS key for your Amazon Web Services account. Your Amazon Web Services account has a different default KMS key for each Amazon Web Services Region. Valid for Cluster Type: Multi-AZ DB clusters only - /// - performanceInsightsRetentionPeriod: The number of days to retain Performance Insights data. Valid for Cluster Type: Multi-AZ DB clusters only Valid Values: 7 month * 31, where month is a number of months from 1-23. Examples: 93 (3 months * 31), 341 (11 months * 31), 589 (19 months * 31) 731 Default: 7 days If you specify a retention period that isn't valid, such as 94, Amazon RDS issues an error. + /// - performanceInsightsKMSKeyId: The Amazon Web Services KMS key identifier for encryption of Performance Insights data. The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key. If you don't specify a value for PerformanceInsightsKMSKeyId, then Amazon RDS uses your default KMS key. There is a default KMS key for your Amazon Web Services account. Your Amazon Web Services account has a different default KMS key for each Amazon Web Services Region. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters + /// - performanceInsightsRetentionPeriod: The number of days to retain Performance Insights data. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters Valid Values: 7 month * 31, where month is a number of months from 1-23. Examples: 93 (3 months * 31), 341 (11 months * 31), 589 (19 months * 31) 731 Default: 7 days If you specify a retention period that isn't valid, such as 94, Amazon RDS issues an error. /// - port: The port number on which the instances in the DB cluster accept connections. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters Valid Values: 1150-65535 Default: RDS for MySQL and Aurora MySQL - 3306 RDS for PostgreSQL and Aurora PostgreSQL - 5432 /// - preferredBackupWindow: The daily time range during which automated backups are created if automated backups are enabled using the BackupRetentionPeriod parameter. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters The default is a 30-minute window selected at random from an 8-hour block of time for each Amazon Web Services Region. To view the time blocks available, see Backup window in the Amazon Aurora User Guide. Constraints: Must be in the format hh24:mi-hh24:mi. Must be in Universal Coordinated Time (UTC). Must not conflict with the preferred maintenance window. Must be at least 30 minutes. /// - preferredMaintenanceWindow: The weekly time range during which system maintenance can occur. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters The default is a 30-minute window selected at random from an 8-hour block of time for each Amazon Web Services Region, occurring on a random day of the week. To see the time blocks available, see Adjusting the Preferred DB Cluster Maintenance Window in the Amazon Aurora User Guide. Constraints: Must be in the format ddd:hh24:mi-ddd:hh24:mi. Days must be one of Mon | Tue | Wed | Thu | Fri | Sat | Sun. Must be in Universal Coordinated Time (UTC). Must be at least 30 minutes. @@ -1042,7 +1042,7 @@ public struct RDS: AWSService { /// - characterSetName: For supported engines, the character set (CharacterSet) to associate the DB instance with. This setting doesn't apply to the following DB instances: Amazon Aurora - The character set is managed by the DB cluster. For more information, see CreateDBCluster. RDS Custom - However, if you need to change the character set, you can change it on the database itself. /// - copyTagsToSnapshot: Specifies whether to copy tags from the DB instance to snapshots of the DB instance. By default, tags are not copied. This setting doesn't apply to Amazon Aurora DB instances. Copying tags to snapshots is managed by the DB cluster. Setting this value for an Aurora DB instance has no effect on the DB cluster setting. /// - customIamInstanceProfile: The instance profile associated with the underlying Amazon EC2 instance of an RDS Custom DB instance. This setting is required for RDS Custom. Constraints: The profile must exist in your account. The profile must have an IAM role that Amazon EC2 has permissions to assume. The instance profile name and the associated IAM role name must start with the prefix AWSRDSCustom. For the list of permissions required for the IAM role, see Configure IAM and your VPC in the Amazon RDS User Guide. - /// - databaseInsightsMode: Specifies the mode of Database Insights to enable for the instance. + /// - databaseInsightsMode: The mode of Database Insights to enable for the DB instance. This setting only applies to Amazon Aurora DB instances. Currently, this value is inherited from the DB cluster and can't be changed. /// - dbClusterIdentifier: The identifier of the DB cluster that this DB instance will belong to. This setting doesn't apply to RDS Custom DB instances. /// - dbInstanceClass: The compute and memory capacity of the DB instance, for example db.m5.large. Not all DB instance classes are available in all Amazon Web Services Regions, or for all database engines. For the full list of DB instance classes, and availability for your engine, see DB instance classes in the Amazon RDS User Guide or Aurora DB instance classes in the Amazon Aurora User Guide. /// - dbInstanceIdentifier: The identifier for this DB instance. This parameter is stored as a lowercase string. Constraints: Must contain from 1 to 63 letters, numbers, or hyphens. First character must be a letter. Can't end with a hyphen or contain two consecutive hyphens. Example: mydbinstance @@ -1251,13 +1251,13 @@ public struct RDS: AWSService { /// Creates a new DB instance that acts as a read replica for an existing source DB instance or Multi-AZ DB cluster. You can create a read replica for a DB instance running Db2, MariaDB, MySQL, Oracle, PostgreSQL, or SQL Server. You can create a read replica for a Multi-AZ DB cluster running MySQL or PostgreSQL. For more information, see Working with read replicas and Migrating from a Multi-AZ DB cluster to a DB instance using a read replica in the Amazon RDS User Guide. Amazon Aurora doesn't support this operation. To create a DB instance for an Aurora DB cluster, use the CreateDBInstance operation. All read replica DB instances are created with backups disabled. All other attributes (including DB security groups and DB parameter groups) are inherited from the source DB instance or cluster, except as specified. Your source DB instance or cluster must have backup retention enabled. /// /// Parameters: - /// - allocatedStorage: The amount of storage (in gibibytes) to allocate initially for the read replica. Follow the allocation rules specified in CreateDBInstance. Be sure to allocate enough storage for your read replica so that the create operation can succeed. You can also allocate additional storage for future growth. + /// - allocatedStorage: The amount of storage (in gibibytes) to allocate initially for the read replica. Follow the allocation rules specified in CreateDBInstance. This setting isn't valid for RDS for SQL Server. Be sure to allocate enough storage for your read replica so that the create operation can succeed. You can also allocate additional storage for future growth. /// - autoMinorVersionUpgrade: Specifies whether to automatically apply minor engine upgrades to the read replica during the maintenance window. This setting doesn't apply to RDS Custom DB instances. Default: Inherits the value from the source DB instance. /// - availabilityZone: The Availability Zone (AZ) where the read replica will be created. Default: A random, system-chosen Availability Zone in the endpoint's Amazon Web Services Region. Example: us-east-1d /// - caCertificateIdentifier: The CA certificate identifier to use for the read replica's server certificate. This setting doesn't apply to RDS Custom DB instances. For more information, see Using SSL/TLS to encrypt a connection to a DB instance in the Amazon RDS User Guide and Using SSL/TLS to encrypt a connection to a DB cluster in the Amazon Aurora User Guide. /// - copyTagsToSnapshot: Specifies whether to copy all tags from the read replica to snapshots of the read replica. By default, tags aren't copied. /// - customIamInstanceProfile: The instance profile associated with the underlying Amazon EC2 instance of an RDS Custom DB instance. The instance profile must meet the following requirements: The profile must exist in your account. The profile must have an IAM role that Amazon EC2 has permissions to assume. The instance profile name and the associated IAM role name must start with the prefix AWSRDSCustom. For the list of permissions required for the IAM role, see Configure IAM and your VPC in the Amazon RDS User Guide. This setting is required for RDS Custom DB instances. - /// - databaseInsightsMode: Specifies the mode of Database Insights. + /// - databaseInsightsMode: The mode of Database Insights to enable for the read replica. Currently, this setting is not supported. /// - dbInstanceClass: The compute and memory capacity of the read replica, for example db.m4.large. Not all DB instance classes are available in all Amazon Web Services Regions, or for all database engines. For the full list of DB instance classes, and availability for your engine, see DB Instance Class in the Amazon RDS User Guide. Default: Inherits the value from the source DB instance. /// - dbInstanceIdentifier: The DB instance identifier of the read replica. This identifier is the unique key that identifies a DB instance. This parameter is stored as a lowercase string. /// - dbParameterGroupName: The name of the DB parameter group to associate with this read replica DB instance. For Single-AZ or Multi-AZ DB instance read replica instances, if you don't specify a value for DBParameterGroupName, then Amazon RDS uses the DBParameterGroup of the source DB instance for a same Region read replica, or the default DBParameterGroup for the specified DB engine for a cross-Region read replica. For Multi-AZ DB cluster same Region read replica instances, if you don't specify a value for DBParameterGroupName, then Amazon RDS uses the default DBParameterGroup. Specifying a parameter group for this operation is only supported for MySQL DB instances for cross-Region read replicas, for Multi-AZ DB cluster read replica instances, and for Oracle DB instances. It isn't supported for MySQL DB instances for same Region read replicas or for RDS Custom. Constraints: Must be 1 to 255 letters, numbers, or hyphens. First character must be a letter. Can't end with a hyphen or contain two consecutive hyphens. @@ -2893,10 +2893,10 @@ public struct RDS: AWSService { /// /// Parameters: /// - dbClusterParameterGroupName: The name of a specific DB cluster parameter group to return parameter details for. Constraints: If supplied, must match the name of an existing DBClusterParameterGroup. - /// - filters: This parameter isn't currently supported. + /// - filters: A filter that specifies one or more DB cluster parameters to describe. The only supported filter is parameter-name. The results list only includes information about the DB cluster parameters with these names. /// - marker: An optional pagination token provided by a previous DescribeDBClusterParameters request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords. /// - maxRecords: The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so you can retrieve the remaining results. Default: 100 Constraints: Minimum 20, maximum 100. - /// - source: A specific source to return parameters for. Valid Values: customer engine service + /// - source: A specific source to return parameters for. Valid Values: engine-default system user /// - logger: Logger use during operation @inlinable public func describeDBClusterParameters( @@ -3280,7 +3280,7 @@ public struct RDS: AWSService { /// /// Parameters: /// - dbParameterGroupName: The name of a specific DB parameter group to return details for. Constraints: If supplied, must match the name of an existing DBParameterGroup. - /// - filters: This parameter isn't currently supported. + /// - filters: A filter that specifies one or more DB parameters to describe. The only supported filter is parameter-name. The results list only includes information about the DB parameters with these names. /// - marker: An optional pagination token provided by a previous DescribeDBParameters request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords. /// - maxRecords: The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that you can retrieve the remaining results. Default: 100 Constraints: Minimum 20, maximum 100. /// - source: The parameter types to return. Default: All parameter types returned Valid Values: user | system | engine-default @@ -3807,7 +3807,7 @@ public struct RDS: AWSService { /// /// Parameters: /// - dbParameterGroupFamily: The name of the DB parameter group family. Valid Values: aurora-mysql5.7 aurora-mysql8.0 aurora-postgresql10 aurora-postgresql11 aurora-postgresql12 aurora-postgresql13 aurora-postgresql14 custom-oracle-ee-19 custom-oracle-ee-cdb-19 db2-ae db2-se mariadb10.2 mariadb10.3 mariadb10.4 mariadb10.5 mariadb10.6 mysql5.7 mysql8.0 oracle-ee-19 oracle-ee-cdb-19 oracle-ee-cdb-21 oracle-se2-19 oracle-se2-cdb-19 oracle-se2-cdb-21 postgres10 postgres11 postgres12 postgres13 postgres14 sqlserver-ee-11.0 sqlserver-ee-12.0 sqlserver-ee-13.0 sqlserver-ee-14.0 sqlserver-ee-15.0 sqlserver-ex-11.0 sqlserver-ex-12.0 sqlserver-ex-13.0 sqlserver-ex-14.0 sqlserver-ex-15.0 sqlserver-se-11.0 sqlserver-se-12.0 sqlserver-se-13.0 sqlserver-se-14.0 sqlserver-se-15.0 sqlserver-web-11.0 sqlserver-web-12.0 sqlserver-web-13.0 sqlserver-web-14.0 sqlserver-web-15.0 - /// - filters: This parameter isn't currently supported. + /// - filters: A filter that specifies one or more parameters to describe. The only supported filter is parameter-name. The results list only includes information about the parameters with these names. /// - marker: An optional pagination token provided by a previous DescribeEngineDefaultParameters request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords. /// - maxRecords: The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so you can retrieve the remaining results. Default: 100 Constraints: Minimum 20, maximum 100. /// - logger: Logger use during operation @@ -4825,14 +4825,14 @@ public struct RDS: AWSService { /// - allowEngineModeChange: Specifies whether engine mode changes from serverless to provisioned are allowed. Valid for Cluster Type: Aurora Serverless v1 DB clusters only Constraints: You must allow engine mode changes when specifying a different value for the EngineMode parameter from the DB cluster's current engine mode. /// - allowMajorVersionUpgrade: Specifies whether major version upgrades are allowed. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters Constraints: You must allow major version upgrades when specifying a value for the EngineVersion parameter that is a different major version than the DB cluster's current version. /// - applyImmediately: Specifies whether the modifications in this request are asynchronously applied as soon as possible, regardless of the PreferredMaintenanceWindow setting for the DB cluster. If this parameter is disabled, changes to the DB cluster are applied during the next maintenance window. Most modifications can be applied immediately or during the next scheduled maintenance window. Some modifications, such as turning on deletion protection and changing the master password, are applied immediately—regardless of when you choose to apply them. By default, this parameter is disabled. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters - /// - autoMinorVersionUpgrade: Specifies whether minor engine upgrades are applied automatically to the DB cluster during the maintenance window. By default, minor engine upgrades are applied automatically. Valid for Cluster Type: Multi-AZ DB clusters only + /// - autoMinorVersionUpgrade: Specifies whether minor engine upgrades are applied automatically to the DB cluster during the maintenance window. By default, minor engine upgrades are applied automatically. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters /// - awsBackupRecoveryPointArn: The Amazon Resource Name (ARN) of the recovery point in Amazon Web Services Backup. /// - backtrackWindow: The target backtrack window, in seconds. To disable backtracking, set this value to 0. Valid for Cluster Type: Aurora MySQL DB clusters only Default: 0 Constraints: If specified, this value must be set to a number from 0 to 259,200 (72 hours). /// - backupRetentionPeriod: The number of days for which automated backups are retained. Specify a minimum value of 1. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters Default: 1 Constraints: Must be a value from 1 to 35. /// - caCertificateIdentifier: The CA certificate identifier to use for the DB cluster's server certificate. For more information, see Using SSL/TLS to encrypt a connection to a DB instance in the Amazon RDS User Guide. Valid for Cluster Type: Multi-AZ DB clusters /// - cloudwatchLogsExportConfiguration: The configuration setting for the log types to be enabled for export to CloudWatch Logs for a specific DB cluster. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters The following values are valid for each DB engine: Aurora MySQL - audit | error | general | slowquery Aurora PostgreSQL - postgresql RDS for MySQL - error | general | slowquery RDS for PostgreSQL - postgresql | upgrade For more information about exporting CloudWatch Logs for Amazon RDS, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon RDS User Guide. For more information about exporting CloudWatch Logs for Amazon Aurora, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon Aurora User Guide. /// - copyTagsToSnapshot: Specifies whether to copy all tags from the DB cluster to snapshots of the DB cluster. The default is not to copy them. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters - /// - databaseInsightsMode: Specifies the mode of Database Insights to enable for the cluster. + /// - databaseInsightsMode: Specifies the mode of Database Insights to enable for the DB cluster. If you change the value from standard to advanced, you must set the PerformanceInsightsEnabled parameter to true and the PerformanceInsightsRetentionPeriod parameter to 465. If you change the value from advanced to standard, you must set the PerformanceInsightsEnabled parameter to false. Valid for Cluster Type: Aurora DB clusters only /// - dbClusterIdentifier: The DB cluster identifier for the cluster being modified. This parameter isn't case-sensitive. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters Constraints: Must match the identifier of an existing DB cluster. /// - dbClusterInstanceClass: The compute and memory capacity of each DB instance in the Multi-AZ DB cluster, for example db.m6gd.xlarge. Not all DB instance classes are available in all Amazon Web Services Regions, or for all database engines. For the full list of DB instance classes and availability for your engine, see DB Instance Class in the Amazon RDS User Guide. Valid for Cluster Type: Multi-AZ DB clusters only /// - dbClusterParameterGroupName: The name of the DB cluster parameter group to use for the DB cluster. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters @@ -4857,8 +4857,8 @@ public struct RDS: AWSService { /// - networkType: The network type of the DB cluster. The network type is determined by the DBSubnetGroup specified for the DB cluster. A DBSubnetGroup can support only the IPv4 protocol or the IPv4 and the IPv6 protocols (DUAL). For more information, see Working with a DB instance in a VPC in the Amazon Aurora User Guide. Valid for Cluster Type: Aurora DB clusters only Valid Values: IPV4 | DUAL /// - newDBClusterIdentifier: The new DB cluster identifier for the DB cluster when renaming a DB cluster. This value is stored as a lowercase string. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters Constraints: Must contain from 1 to 63 letters, numbers, or hyphens. The first character must be a letter. Can't end with a hyphen or contain two consecutive hyphens. Example: my-cluster2 /// - optionGroupName: The option group to associate the DB cluster with. DB clusters are associated with a default option group that can't be modified. - /// - performanceInsightsKMSKeyId: The Amazon Web Services KMS key identifier for encryption of Performance Insights data. The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key. If you don't specify a value for PerformanceInsightsKMSKeyId, then Amazon RDS uses your default KMS key. There is a default KMS key for your Amazon Web Services account. Your Amazon Web Services account has a different default KMS key for each Amazon Web Services Region. Valid for Cluster Type: Multi-AZ DB clusters only - /// - performanceInsightsRetentionPeriod: The number of days to retain Performance Insights data. Valid for Cluster Type: Multi-AZ DB clusters only Valid Values: 7 month * 31, where month is a number of months from 1-23. Examples: 93 (3 months * 31), 341 (11 months * 31), 589 (19 months * 31) 731 Default: 7 days If you specify a retention period that isn't valid, such as 94, Amazon RDS issues an error. + /// - performanceInsightsKMSKeyId: The Amazon Web Services KMS key identifier for encryption of Performance Insights data. The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key. If you don't specify a value for PerformanceInsightsKMSKeyId, then Amazon RDS uses your default KMS key. There is a default KMS key for your Amazon Web Services account. Your Amazon Web Services account has a different default KMS key for each Amazon Web Services Region. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters + /// - performanceInsightsRetentionPeriod: The number of days to retain Performance Insights data. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters Valid Values: 7 month * 31, where month is a number of months from 1-23. Examples: 93 (3 months * 31), 341 (11 months * 31), 589 (19 months * 31) 731 Default: 7 days If you specify a retention period that isn't valid, such as 94, Amazon RDS issues an error. /// - port: The port number on which the DB cluster accepts connections. Valid for Cluster Type: Aurora DB clusters only Valid Values: 1150-65535 Default: The same port as the original DB cluster. /// - preferredBackupWindow: The daily time range during which automated backups are created if automated backups are enabled, using the BackupRetentionPeriod parameter. The default is a 30-minute window selected at random from an 8-hour block of time for each Amazon Web Services Region. To view the time blocks available, see Backup window in the Amazon Aurora User Guide. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters Constraints: Must be in the format hh24:mi-hh24:mi. Must be in Universal Coordinated Time (UTC). Must not conflict with the preferred maintenance window. Must be at least 30 minutes. /// - preferredMaintenanceWindow: The weekly time range during which system maintenance can occur, in Universal Coordinated Time (UTC). Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters The default is a 30-minute window selected at random from an 8-hour block of time for each Amazon Web Services Region, occurring on a random day of the week. To see the time blocks available, see Adjusting the Preferred DB Cluster Maintenance Window in the Amazon Aurora User Guide. Constraints: Must be in the format ddd:hh24:mi-ddd:hh24:mi. Days must be one of Mon | Tue | Wed | Thu | Fri | Sat | Sun. Must be in Universal Coordinated Time (UTC). Must be at least 30 minutes. @@ -5102,9 +5102,9 @@ public struct RDS: AWSService { /// - backupRetentionPeriod: The number of days to retain automated backups. Setting this parameter to a positive number enables backups. Setting this parameter to 0 disables automated backups. Enabling and disabling backups can result in a brief I/O suspension that lasts from a few seconds to a few minutes, depending on the size and class of your DB instance. These changes are applied during the next maintenance window unless the ApplyImmediately parameter is enabled for this request. If you change the parameter from one non-zero value to another non-zero value, the change is asynchronously applied as soon as possible. This setting doesn't apply to Amazon Aurora DB instances. The retention period for automated backups is managed by the DB cluster. For more information, see ModifyDBCluster. Default: Uses existing setting Constraints: Must be a value from 0 to 35. Can't be set to 0 if the DB instance is a source to read replicas. Can't be set to 0 for an RDS Custom for Oracle DB instance. /// - caCertificateIdentifier: The CA certificate identifier to use for the DB instance's server certificate. This setting doesn't apply to RDS Custom DB instances. For more information, see Using SSL/TLS to encrypt a connection to a DB instance in the Amazon RDS User Guide and Using SSL/TLS to encrypt a connection to a DB cluster in the Amazon Aurora User Guide. /// - certificateRotationRestart: Specifies whether the DB instance is restarted when you rotate your SSL/TLS certificate. By default, the DB instance is restarted when you rotate your SSL/TLS certificate. The certificate is not updated until the DB instance is restarted. Set this parameter only if you are not using SSL/TLS to connect to the DB instance. If you are using SSL/TLS to connect to the DB instance, follow the appropriate instructions for your DB engine to rotate your SSL/TLS certificate: For more information about rotating your SSL/TLS certificate for RDS DB engines, see Rotating Your SSL/TLS Certificate. in the Amazon RDS User Guide. For more information about rotating your SSL/TLS certificate for Aurora DB engines, see Rotating Your SSL/TLS Certificate in the Amazon Aurora User Guide. This setting doesn't apply to RDS Custom DB instances. - /// - cloudwatchLogsExportConfiguration: The log types to be enabled for export to CloudWatch Logs for a specific DB instance. A change to the CloudwatchLogsExportConfiguration parameter is always applied to the DB instance immediately. Therefore, the ApplyImmediately parameter has no effect. This setting doesn't apply to RDS Custom DB instances. + /// - cloudwatchLogsExportConfiguration: The log types to be enabled for export to CloudWatch Logs for a specific DB instance. A change to the CloudwatchLogsExportConfiguration parameter is always applied to the DB instance immediately. Therefore, the ApplyImmediately parameter has no effect. This setting doesn't apply to RDS Custom DB instances. The following values are valid for each DB engine: Aurora MySQL - audit | error | general | slowquery Aurora PostgreSQL - postgresql RDS for MySQL - error | general | slowquery RDS for PostgreSQL - postgresql | upgrade For more information about exporting CloudWatch Logs for Amazon RDS, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon RDS User Guide. For more information about exporting CloudWatch Logs for Amazon Aurora, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon Aurora User Guide. /// - copyTagsToSnapshot: Specifies whether to copy all tags from the DB instance to snapshots of the DB instance. By default, tags aren't copied. This setting doesn't apply to Amazon Aurora DB instances. Copying tags to snapshots is managed by the DB cluster. Setting this value for an Aurora DB instance has no effect on the DB cluster setting. For more information, see ModifyDBCluster. - /// - databaseInsightsMode: Specifies the mode of Database Insights to enable for the instance. + /// - databaseInsightsMode: Specifies the mode of Database Insights to enable for the DB instance. This setting only applies to Amazon Aurora DB instances. Currently, this value is inherited from the DB cluster and can't be changed. /// - dbInstanceClass: The new compute and memory capacity of the DB instance, for example db.m4.large. Not all DB instance classes are available in all Amazon Web Services Regions, or for all database engines. For the full list of DB instance classes, and availability for your engine, see DB Instance Class in the Amazon RDS User Guide or Aurora DB instance classes in the Amazon Aurora User Guide. For RDS Custom, see DB instance class support for RDS Custom for Oracle and DB instance class support for RDS Custom for SQL Server. If you modify the DB instance class, an outage occurs during the change. The change is applied during the next maintenance window, unless you specify ApplyImmediately in your request. Default: Uses existing setting Constraints: If you are modifying the DB instance class and upgrading the engine version at the same time, the currently running engine version must be supported on the specified DB instance class. Otherwise, the operation returns an error. In this case, first run the operation to upgrade the engine version, and then run it again to modify the DB instance class. /// - dbInstanceIdentifier: The identifier of DB instance to modify. This value is stored as a lowercase string. Constraints: Must match the identifier of an existing DB instance. /// - dbParameterGroupName: The name of the DB parameter group to apply to the DB instance. Changing this setting doesn't result in an outage. The parameter group name itself is changed immediately, but the actual parameter changes are not applied until you reboot the instance without failover. In this case, the DB instance isn't rebooted automatically, and the parameter changes aren't applied during the next maintenance window. However, if you modify dynamic parameters in the newly associated DB parameter group, these changes are applied immediately without a reboot. This setting doesn't apply to RDS Custom DB instances. Default: Uses existing setting Constraints: Must be in the same DB parameter group family as the DB instance. @@ -6702,7 +6702,7 @@ public struct RDS: AWSService { /// Creates a new DB instance from a DB snapshot. The target database is created from the source database restore point with most of the source's original configuration, including the default security group and DB parameter group. By default, the new DB instance is created as a Single-AZ deployment, except when the instance is a SQL Server instance that has an option group associated with mirroring. In this case, the instance becomes a Multi-AZ deployment, not a Single-AZ deployment. If you want to replace your original DB instance with the new, restored DB instance, then rename your original DB instance before you call the RestoreDBInstanceFromDBSnapshot operation. RDS doesn't allow two DB instances with the same name. After you have renamed your original DB instance with a different identifier, then you can pass the original name of the DB instance as the DBInstanceIdentifier in the call to the RestoreDBInstanceFromDBSnapshot operation. The result is that you replace the original DB instance with the DB instance created from the snapshot. If you are restoring from a shared manual DB snapshot, the DBSnapshotIdentifier must be the ARN of the shared DB snapshot. To restore from a DB snapshot with an unsupported engine version, you must first upgrade the engine version of the snapshot. For more information about upgrading a RDS for MySQL DB snapshot engine version, see Upgrading a MySQL DB snapshot engine version. For more information about upgrading a RDS for PostgreSQL DB snapshot engine version, Upgrading a PostgreSQL DB snapshot engine version. This command doesn't apply to Aurora MySQL and Aurora PostgreSQL. For Aurora, use RestoreDBClusterFromSnapshot. /// /// Parameters: - /// - allocatedStorage: The amount of storage (in gibibytes) to allocate initially for the DB instance. Follow the allocation rules specified in CreateDBInstance. Be sure to allocate enough storage for your new DB instance so that the restore operation can succeed. You can also allocate additional storage for future growth. + /// - allocatedStorage: The amount of storage (in gibibytes) to allocate initially for the DB instance. Follow the allocation rules specified in CreateDBInstance. This setting isn't valid for RDS for SQL Server. Be sure to allocate enough storage for your new DB instance so that the restore operation can succeed. You can also allocate additional storage for future growth. /// - autoMinorVersionUpgrade: Specifies whether to automatically apply minor version upgrades to the DB instance during the maintenance window. If you restore an RDS Custom DB instance, you must disable this parameter. /// - availabilityZone: The Availability Zone (AZ) where the DB instance will be created. Default: A random, system-chosen Availability Zone. Constraint: You can't specify the AvailabilityZone parameter if the DB instance is a Multi-AZ deployment. Example: us-east-1a /// - backupTarget: Specifies where automated backups and manual snapshots are stored for the restored DB instance. Possible values are outposts (Amazon Web Services Outposts) and region (Amazon Web Services Region). The default is region. For more information, see Working with Amazon RDS on Amazon Web Services Outposts in the Amazon RDS User Guide. @@ -6854,13 +6854,13 @@ public struct RDS: AWSService { /// Amazon Relational Database Service (Amazon RDS) supports importing MySQL databases by using backup files. You can create a backup of your on-premises database, store it on Amazon Simple Storage Service (Amazon S3), and then restore the backup file onto a new Amazon RDS DB instance running MySQL. For more information, see Importing Data into an Amazon RDS MySQL DB Instance in the Amazon RDS User Guide. This operation doesn't apply to RDS Custom. /// /// Parameters: - /// - allocatedStorage: The amount of storage (in gibibytes) to allocate initially for the DB instance. Follow the allocation rules specified in CreateDBInstance. Be sure to allocate enough storage for your new DB instance so that the restore operation can succeed. You can also allocate additional storage for future growth. + /// - allocatedStorage: The amount of storage (in gibibytes) to allocate initially for the DB instance. Follow the allocation rules specified in CreateDBInstance. This setting isn't valid for RDS for SQL Server. Be sure to allocate enough storage for your new DB instance so that the restore operation can succeed. You can also allocate additional storage for future growth. /// - autoMinorVersionUpgrade: Specifies whether to automatically apply minor engine upgrades to the DB instance during the maintenance window. By default, minor engine upgrades are not applied automatically. /// - availabilityZone: The Availability Zone that the DB instance is created in. For information about Amazon Web Services Regions and Availability Zones, see Regions and Availability Zones in the Amazon RDS User Guide. Default: A random, system-chosen Availability Zone in the endpoint's Amazon Web Services Region. Example: us-east-1d Constraint: The AvailabilityZone parameter can't be specified if the DB instance is a Multi-AZ deployment. The specified Availability Zone must be in the same Amazon Web Services Region as the current endpoint. /// - backupRetentionPeriod: The number of days for which automated backups are retained. Setting this parameter to a positive number enables backups. For more information, see CreateDBInstance. /// - caCertificateIdentifier: The CA certificate identifier to use for the DB instance's server certificate. This setting doesn't apply to RDS Custom DB instances. For more information, see Using SSL/TLS to encrypt a connection to a DB instance in the Amazon RDS User Guide and Using SSL/TLS to encrypt a connection to a DB cluster in the Amazon Aurora User Guide. /// - copyTagsToSnapshot: Specifies whether to copy all tags from the DB instance to snapshots of the DB instance. By default, tags are not copied. - /// - databaseInsightsMode: Specifies the mode of Database Insights to enable for the instance. + /// - databaseInsightsMode: Specifies the mode of Database Insights to enable for the DB instance. This setting only applies to Amazon Aurora DB instances. Currently, this value is inherited from the DB cluster and can't be changed. /// - dbInstanceClass: The compute and memory capacity of the DB instance, for example db.m4.large. Not all DB instance classes are available in all Amazon Web Services Regions, or for all database engines. For the full list of DB instance classes, and availability for your engine, see DB Instance Class in the Amazon RDS User Guide. Importing from Amazon S3 isn't supported on the db.t2.micro DB instance class. /// - dbInstanceIdentifier: The DB instance identifier. This parameter is stored as a lowercase string. Constraints: Must contain from 1 to 63 letters, numbers, or hyphens. First character must be a letter. Can't end with a hyphen or contain two consecutive hyphens. Example: mydbinstance /// - dbName: The name of the database to create when the DB instance is created. Follow the naming rules specified in CreateDBInstance. @@ -7036,7 +7036,7 @@ public struct RDS: AWSService { /// Restores a DB instance to an arbitrary point in time. You can restore to any point in time before the time identified by the LatestRestorableTime property. You can restore to a point up to the number of days specified by the BackupRetentionPeriod property. The target database is created with most of the original configuration, but in a system-selected Availability Zone, with the default security group, the default subnet group, and the default DB parameter group. By default, the new DB instance is created as a single-AZ deployment except when the instance is a SQL Server instance that has an option group that is associated with mirroring; in this case, the instance becomes a mirrored deployment and not a single-AZ deployment. This operation doesn't apply to Aurora MySQL and Aurora PostgreSQL. For Aurora, use RestoreDBClusterToPointInTime. /// /// Parameters: - /// - allocatedStorage: The amount of storage (in gibibytes) to allocate initially for the DB instance. Follow the allocation rules specified in CreateDBInstance. Be sure to allocate enough storage for your new DB instance so that the restore operation can succeed. You can also allocate additional storage for future growth. + /// - allocatedStorage: The amount of storage (in gibibytes) to allocate initially for the DB instance. Follow the allocation rules specified in CreateDBInstance. This setting isn't valid for RDS for SQL Server. Be sure to allocate enough storage for your new DB instance so that the restore operation can succeed. You can also allocate additional storage for future growth. /// - autoMinorVersionUpgrade: Specifies whether minor version upgrades are applied automatically to the DB instance during the maintenance window. This setting doesn't apply to RDS Custom. /// - availabilityZone: The Availability Zone (AZ) where the DB instance will be created. Default: A random, system-chosen Availability Zone. Constraints: You can't specify the AvailabilityZone parameter if the DB instance is a Multi-AZ deployment. Example: us-east-1a /// - backupTarget: The location for storing automated backups and manual snapshots for the restored DB instance. Valid Values: outposts (Amazon Web Services Outposts) region (Amazon Web Services Region) Default: region For more information, see Working with Amazon RDS on Amazon Web Services Outposts in the Amazon RDS User Guide. @@ -7909,9 +7909,9 @@ extension RDS { /// /// - Parameters: /// - dbClusterParameterGroupName: The name of a specific DB cluster parameter group to return parameter details for. Constraints: If supplied, must match the name of an existing DBClusterParameterGroup. - /// - filters: This parameter isn't currently supported. + /// - filters: A filter that specifies one or more DB cluster parameters to describe. The only supported filter is parameter-name. The results list only includes information about the DB cluster parameters with these names. /// - maxRecords: The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so you can retrieve the remaining results. Default: 100 Constraints: Minimum 20, maximum 100. - /// - source: A specific source to return parameters for. Valid Values: customer engine service + /// - source: A specific source to return parameters for. Valid Values: engine-default system user /// - logger: Logger used for logging @inlinable public func describeDBClusterParametersPaginator( @@ -8283,7 +8283,7 @@ extension RDS { /// /// - Parameters: /// - dbParameterGroupName: The name of a specific DB parameter group to return details for. Constraints: If supplied, must match the name of an existing DBParameterGroup. - /// - filters: This parameter isn't currently supported. + /// - filters: A filter that specifies one or more DB parameters to describe. The only supported filter is parameter-name. The results list only includes information about the DB parameters with these names. /// - maxRecords: The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that you can retrieve the remaining results. Default: 100 Constraints: Minimum 20, maximum 100. /// - source: The parameter types to return. Default: All parameter types returned Valid Values: user | system | engine-default /// - logger: Logger used for logging @@ -8725,7 +8725,7 @@ extension RDS { /// /// - Parameters: /// - dbParameterGroupFamily: The name of the DB parameter group family. Valid Values: aurora-mysql5.7 aurora-mysql8.0 aurora-postgresql10 aurora-postgresql11 aurora-postgresql12 aurora-postgresql13 aurora-postgresql14 custom-oracle-ee-19 custom-oracle-ee-cdb-19 db2-ae db2-se mariadb10.2 mariadb10.3 mariadb10.4 mariadb10.5 mariadb10.6 mysql5.7 mysql8.0 oracle-ee-19 oracle-ee-cdb-19 oracle-ee-cdb-21 oracle-se2-19 oracle-se2-cdb-19 oracle-se2-cdb-21 postgres10 postgres11 postgres12 postgres13 postgres14 sqlserver-ee-11.0 sqlserver-ee-12.0 sqlserver-ee-13.0 sqlserver-ee-14.0 sqlserver-ee-15.0 sqlserver-ex-11.0 sqlserver-ex-12.0 sqlserver-ex-13.0 sqlserver-ex-14.0 sqlserver-ex-15.0 sqlserver-se-11.0 sqlserver-se-12.0 sqlserver-se-13.0 sqlserver-se-14.0 sqlserver-se-15.0 sqlserver-web-11.0 sqlserver-web-12.0 sqlserver-web-13.0 sqlserver-web-14.0 sqlserver-web-15.0 - /// - filters: This parameter isn't currently supported. + /// - filters: A filter that specifies one or more parameters to describe. The only supported filter is parameter-name. The results list only includes information about the parameters with these names. /// - maxRecords: The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so you can retrieve the remaining results. Default: 100 Constraints: Minimum 20, maximum 100. /// - logger: Logger used for logging @inlinable diff --git a/Sources/Soto/Services/RDS/RDS_shapes.swift b/Sources/Soto/Services/RDS/RDS_shapes.swift index 64b8daa7fa..0a2de55574 100644 --- a/Sources/Soto/Services/RDS/RDS_shapes.swift +++ b/Sources/Soto/Services/RDS/RDS_shapes.swift @@ -722,10 +722,10 @@ extension RDS { } public struct CloudwatchLogsExportConfiguration: AWSEncodableShape { - /// The list of log types to disable. + /// The list of log types to disable. The following values are valid for each DB engine: Aurora MySQL - audit | error | general | slowquery Aurora PostgreSQL - postgresql RDS for MySQL - error | general | slowquery RDS for PostgreSQL - postgresql | upgrade @OptionalCustomCoding> public var disableLogTypes: [String]? - /// The list of log types to enable. + /// The list of log types to enable. The following values are valid for each DB engine: Aurora MySQL - audit | error | general | slowquery Aurora PostgreSQL - postgresql RDS for MySQL - error | general | slowquery RDS for PostgreSQL - postgresql | upgrade @OptionalCustomCoding> public var enableLogTypes: [String]? @@ -1333,7 +1333,7 @@ extension RDS { /// The amount of storage in gibibytes (GiB) to allocate to each DB instance in the Multi-AZ DB cluster. Valid for Cluster Type: Multi-AZ DB clusters only This setting is required to create a Multi-AZ DB cluster. public let allocatedStorage: Int? - /// Specifies whether minor engine upgrades are applied automatically to the DB cluster during the maintenance window. By default, minor engine upgrades are applied automatically. Valid for Cluster Type: Multi-AZ DB clusters only + /// Specifies whether minor engine upgrades are applied automatically to the DB cluster during the maintenance window. By default, minor engine upgrades are applied automatically. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB cluster public let autoMinorVersionUpgrade: Bool? /// A list of Availability Zones (AZs) where you specifically want to create DB instances in the DB cluster. For information on AZs, see Availability Zones in the Amazon Aurora User Guide. Valid for Cluster Type: Aurora DB clusters only Constraints: Can't specify more than three AZs. @OptionalCustomCoding> @@ -1350,7 +1350,7 @@ extension RDS { public let clusterScalabilityType: ClusterScalabilityType? /// Specifies whether to copy all tags from the DB cluster to snapshots of the DB cluster. The default is not to copy them. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters public let copyTagsToSnapshot: Bool? - /// Specifies the mode of Database Insights to enable for the cluster. + /// The mode of Database Insights to enable for the DB cluster. If you set this value to advanced, you must also set the PerformanceInsightsEnabled parameter to true and the PerformanceInsightsRetentionPeriod parameter to 465. Valid for Cluster Type: Aurora DB clusters only public let databaseInsightsMode: DatabaseInsightsMode? /// The name for your database of up to 64 alphanumeric characters. A database named postgres is always created. If this parameter is specified, an additional database with this name is created. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters public let databaseName: String? @@ -1383,7 +1383,7 @@ extension RDS { public let enableLimitlessDatabase: Bool? /// Specifies whether read replicas can forward write operations to the writer DB instance in the DB cluster. By default, write operations aren't allowed on reader DB instances. Valid for: Aurora DB clusters only public let enableLocalWriteForwarding: Bool? - /// Specifies whether to turn on Performance Insights for the DB cluster. For more information, see Using Amazon Performance Insights in the Amazon RDS User Guide. Valid for Cluster Type: Multi-AZ DB clusters only + /// Specifies whether to turn on Performance Insights for the DB cluster. For more information, see Using Amazon Performance Insights in the Amazon RDS User Guide. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters public let enablePerformanceInsights: Bool? /// The database engine to use for this DB cluster. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters Valid Values: aurora-mysql aurora-postgresql mysql postgres neptune - For information about using Amazon Neptune, see the Amazon Neptune User Guide . public let engine: String? @@ -1407,17 +1407,17 @@ extension RDS { public let masterUserPassword: String? /// The Amazon Web Services KMS key identifier to encrypt a secret that is automatically generated and managed in Amazon Web Services Secrets Manager. This setting is valid only if the master user password is managed by RDS in Amazon Web Services Secrets Manager for the DB cluster. The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key. To use a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN. If you don't specify MasterUserSecretKmsKeyId, then the aws/secretsmanager KMS key is used to encrypt the secret. If the secret is in a different Amazon Web Services account, then you can't use the aws/secretsmanager KMS key to encrypt the secret, and you must use a customer managed KMS key. There is a default KMS key for your Amazon Web Services account. Your Amazon Web Services account has a different default KMS key for each Amazon Web Services Region. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters public let masterUserSecretKmsKeyId: String? - /// The interval, in seconds, between points when Enhanced Monitoring metrics are collected for the DB cluster. To turn off collecting Enhanced Monitoring metrics, specify 0. If MonitoringRoleArn is specified, also set MonitoringInterval to a value other than 0. Valid for Cluster Type: Multi-AZ DB clusters only Valid Values: 0 | 1 | 5 | 10 | 15 | 30 | 60 Default: 0 + /// The interval, in seconds, between points when Enhanced Monitoring metrics are collected for the DB cluster. To turn off collecting Enhanced Monitoring metrics, specify 0. If MonitoringRoleArn is specified, also set MonitoringInterval to a value other than 0. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters Valid Values: 0 | 1 | 5 | 10 | 15 | 30 | 60 Default: 0 public let monitoringInterval: Int? - /// The Amazon Resource Name (ARN) for the IAM role that permits RDS to send Enhanced Monitoring metrics to Amazon CloudWatch Logs. An example is arn:aws:iam:123456789012:role/emaccess. For information on creating a monitoring role, see Setting up and enabling Enhanced Monitoring in the Amazon RDS User Guide. If MonitoringInterval is set to a value other than 0, supply a MonitoringRoleArn value. Valid for Cluster Type: Multi-AZ DB clusters only + /// The Amazon Resource Name (ARN) for the IAM role that permits RDS to send Enhanced Monitoring metrics to Amazon CloudWatch Logs. An example is arn:aws:iam:123456789012:role/emaccess. For information on creating a monitoring role, see Setting up and enabling Enhanced Monitoring in the Amazon RDS User Guide. If MonitoringInterval is set to a value other than 0, supply a MonitoringRoleArn value. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters public let monitoringRoleArn: String? /// The network type of the DB cluster. The network type is determined by the DBSubnetGroup specified for the DB cluster. A DBSubnetGroup can support only the IPv4 protocol or the IPv4 and the IPv6 protocols (DUAL). For more information, see Working with a DB instance in a VPC in the Amazon Aurora User Guide. Valid for Cluster Type: Aurora DB clusters only Valid Values: IPV4 | DUAL public let networkType: String? /// The option group to associate the DB cluster with. DB clusters are associated with a default option group that can't be modified. public let optionGroupName: String? - /// The Amazon Web Services KMS key identifier for encryption of Performance Insights data. The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key. If you don't specify a value for PerformanceInsightsKMSKeyId, then Amazon RDS uses your default KMS key. There is a default KMS key for your Amazon Web Services account. Your Amazon Web Services account has a different default KMS key for each Amazon Web Services Region. Valid for Cluster Type: Multi-AZ DB clusters only + /// The Amazon Web Services KMS key identifier for encryption of Performance Insights data. The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key. If you don't specify a value for PerformanceInsightsKMSKeyId, then Amazon RDS uses your default KMS key. There is a default KMS key for your Amazon Web Services account. Your Amazon Web Services account has a different default KMS key for each Amazon Web Services Region. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters public let performanceInsightsKMSKeyId: String? - /// The number of days to retain Performance Insights data. Valid for Cluster Type: Multi-AZ DB clusters only Valid Values: 7 month * 31, where month is a number of months from 1-23. Examples: 93 (3 months * 31), 341 (11 months * 31), 589 (19 months * 31) 731 Default: 7 days If you specify a retention period that isn't valid, such as 94, Amazon RDS issues an error. + /// The number of days to retain Performance Insights data. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters Valid Values: 7 month * 31, where month is a number of months from 1-23. Examples: 93 (3 months * 31), 341 (11 months * 31), 589 (19 months * 31) 731 Default: 7 days If you specify a retention period that isn't valid, such as 94, Amazon RDS issues an error. public let performanceInsightsRetentionPeriod: Int? /// The port number on which the instances in the DB cluster accept connections. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters Valid Values: 1150-65535 Default: RDS for MySQL and Aurora MySQL - 3306 RDS for PostgreSQL and Aurora PostgreSQL - 5432 public let port: Int? @@ -1684,7 +1684,7 @@ extension RDS { public let copyTagsToSnapshot: Bool? /// The instance profile associated with the underlying Amazon EC2 instance of an RDS Custom DB instance. This setting is required for RDS Custom. Constraints: The profile must exist in your account. The profile must have an IAM role that Amazon EC2 has permissions to assume. The instance profile name and the associated IAM role name must start with the prefix AWSRDSCustom. For the list of permissions required for the IAM role, see Configure IAM and your VPC in the Amazon RDS User Guide. public let customIamInstanceProfile: String? - /// Specifies the mode of Database Insights to enable for the instance. + /// The mode of Database Insights to enable for the DB instance. This setting only applies to Amazon Aurora DB instances. Currently, this value is inherited from the DB cluster and can't be changed. public let databaseInsightsMode: DatabaseInsightsMode? /// The identifier of the DB cluster that this DB instance will belong to. This setting doesn't apply to RDS Custom DB instances. public let dbClusterIdentifier: String? @@ -1942,7 +1942,7 @@ extension RDS { public struct _TagsEncoding: ArrayCoderProperties { public static let member = "Tag" } public struct _VpcSecurityGroupIdsEncoding: ArrayCoderProperties { public static let member = "VpcSecurityGroupId" } - /// The amount of storage (in gibibytes) to allocate initially for the read replica. Follow the allocation rules specified in CreateDBInstance. Be sure to allocate enough storage for your read replica so that the create operation can succeed. You can also allocate additional storage for future growth. + /// The amount of storage (in gibibytes) to allocate initially for the read replica. Follow the allocation rules specified in CreateDBInstance. This setting isn't valid for RDS for SQL Server. Be sure to allocate enough storage for your read replica so that the create operation can succeed. You can also allocate additional storage for future growth. public let allocatedStorage: Int? /// Specifies whether to automatically apply minor engine upgrades to the read replica during the maintenance window. This setting doesn't apply to RDS Custom DB instances. Default: Inherits the value from the source DB instance. public let autoMinorVersionUpgrade: Bool? @@ -1954,7 +1954,7 @@ extension RDS { public let copyTagsToSnapshot: Bool? /// The instance profile associated with the underlying Amazon EC2 instance of an RDS Custom DB instance. The instance profile must meet the following requirements: The profile must exist in your account. The profile must have an IAM role that Amazon EC2 has permissions to assume. The instance profile name and the associated IAM role name must start with the prefix AWSRDSCustom. For the list of permissions required for the IAM role, see Configure IAM and your VPC in the Amazon RDS User Guide. This setting is required for RDS Custom DB instances. public let customIamInstanceProfile: String? - /// Specifies the mode of Database Insights. + /// The mode of Database Insights to enable for the read replica. Currently, this setting is not supported. public let databaseInsightsMode: DatabaseInsightsMode? /// The compute and memory capacity of the read replica, for example db.m4.large. Not all DB instance classes are available in all Amazon Web Services Regions, or for all database engines. For the full list of DB instance classes, and availability for your engine, see DB Instance Class in the Amazon RDS User Guide. Default: Inherits the value from the source DB instance. public let dbInstanceClass: String? @@ -2820,7 +2820,7 @@ extension RDS { public var associatedRoles: [DBClusterRole]? /// The time when a stopped DB cluster is restarted automatically. public let automaticRestartTime: Date? - /// Indicates whether minor version patches are applied automatically. This setting is only for non-Aurora Multi-AZ DB clusters. + /// Indicates whether minor version patches are applied automatically. This setting is for Aurora DB clusters and Multi-AZ DB clusters. public let autoMinorVersionUpgrade: Bool? /// The list of Availability Zones (AZs) where instances in the DB cluster can be created. @OptionalCustomCoding> @@ -2851,7 +2851,7 @@ extension RDS { /// The custom endpoints associated with the DB cluster. @OptionalCustomCoding> public var customEndpoints: [String]? - /// The mode of Database Insights that is enabled for the cluster. + /// The mode of Database Insights that is enabled for the DB cluster. public let databaseInsightsMode: DatabaseInsightsMode? /// The name of the initial database that was specified for the DB cluster when it was created, if one was provided. This same name is returned for the life of the DB cluster. public let databaseName: String? @@ -2923,9 +2923,9 @@ extension RDS { public let masterUsername: String? /// The secret managed by RDS in Amazon Web Services Secrets Manager for the master user password. For more information, see Password management with Amazon Web Services Secrets Manager in the Amazon RDS User Guide and Password management with Amazon Web Services Secrets Manager in the Amazon Aurora User Guide. public let masterUserSecret: MasterUserSecret? - /// The interval, in seconds, between points when Enhanced Monitoring metrics are collected for the DB cluster. This setting is only for non-Aurora Multi-AZ DB clusters. + /// The interval, in seconds, between points when Enhanced Monitoring metrics are collected for the DB cluster. This setting is only for -Aurora DB clusters and Multi-AZ DB clusters. public let monitoringInterval: Int? - /// The ARN for the IAM role that permits RDS to send Enhanced Monitoring metrics to Amazon CloudWatch Logs. This setting is only for non-Aurora Multi-AZ DB clusters. + /// The ARN for the IAM role that permits RDS to send Enhanced Monitoring metrics to Amazon CloudWatch Logs. This setting is only for Aurora DB clusters and Multi-AZ DB clusters. public let monitoringRoleArn: String? /// Indicates whether the DB cluster has instances in multiple Availability Zones. public let multiAZ: Bool? @@ -2935,11 +2935,11 @@ extension RDS { public let pendingModifiedValues: ClusterPendingModifiedValues? /// The progress of the operation as a percentage. public let percentProgress: String? - /// Indicates whether Performance Insights is enabled for the DB cluster. This setting is only for non-Aurora Multi-AZ DB clusters. + /// Indicates whether Performance Insights is enabled for the DB cluster. This setting is only for Aurora DB clusters and Multi-AZ DB clusters. public let performanceInsightsEnabled: Bool? - /// The Amazon Web Services KMS key identifier for encryption of Performance Insights data. The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key. This setting is only for non-Aurora Multi-AZ DB clusters. + /// The Amazon Web Services KMS key identifier for encryption of Performance Insights data. The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key. This setting is only for Aurora DB clusters and Multi-AZ DB clusters. public let performanceInsightsKMSKeyId: String? - /// The number of days to retain Performance Insights data. This setting is only for non-Aurora Multi-AZ DB clusters. Valid Values: 7 month * 31, where month is a number of months from 1-23. Examples: 93 (3 months * 31), 341 (11 months * 31), 589 (19 months * 31) 731 Default: 7 days + /// The number of days to retain Performance Insights data. This setting is only for Aurora DB clusters and Multi-AZ DB clusters. Valid Values: 7 month * 31, where month is a number of months from 1-23. Examples: 93 (3 months * 31), 341 (11 months * 31), 589 (19 months * 31) 731 Default: 7 days public let performanceInsightsRetentionPeriod: Int? /// The port that the database engine is listening on. public let port: Int? @@ -6356,14 +6356,14 @@ extension RDS { /// The name of a specific DB cluster parameter group to return parameter details for. Constraints: If supplied, must match the name of an existing DBClusterParameterGroup. public let dbClusterParameterGroupName: String? - /// This parameter isn't currently supported. + /// A filter that specifies one or more DB cluster parameters to describe. The only supported filter is parameter-name. The results list only includes information about the DB cluster parameters with these names. @OptionalCustomCoding> public var filters: [Filter]? /// An optional pagination token provided by a previous DescribeDBClusterParameters request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords. public let marker: String? /// The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so you can retrieve the remaining results. Default: 100 Constraints: Minimum 20, maximum 100. public let maxRecords: Int? - /// A specific source to return parameters for. Valid Values: customer engine service + /// A specific source to return parameters for. Valid Values: engine-default system user public let source: String? @inlinable @@ -6730,7 +6730,7 @@ extension RDS { /// The name of a specific DB parameter group to return details for. Constraints: If supplied, must match the name of an existing DBParameterGroup. public let dbParameterGroupName: String? - /// This parameter isn't currently supported. + /// A filter that specifies one or more DB parameters to describe. The only supported filter is parameter-name. The results list only includes information about the DB parameters with these names. @OptionalCustomCoding> public var filters: [Filter]? /// An optional pagination token provided by a previous DescribeDBParameters request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords. @@ -7305,7 +7305,7 @@ extension RDS { /// The name of the DB parameter group family. Valid Values: aurora-mysql5.7 aurora-mysql8.0 aurora-postgresql10 aurora-postgresql11 aurora-postgresql12 aurora-postgresql13 aurora-postgresql14 custom-oracle-ee-19 custom-oracle-ee-cdb-19 db2-ae db2-se mariadb10.2 mariadb10.3 mariadb10.4 mariadb10.5 mariadb10.6 mysql5.7 mysql8.0 oracle-ee-19 oracle-ee-cdb-19 oracle-ee-cdb-21 oracle-se2-19 oracle-se2-cdb-19 oracle-se2-cdb-21 postgres10 postgres11 postgres12 postgres13 postgres14 sqlserver-ee-11.0 sqlserver-ee-12.0 sqlserver-ee-13.0 sqlserver-ee-14.0 sqlserver-ee-15.0 sqlserver-ex-11.0 sqlserver-ex-12.0 sqlserver-ex-13.0 sqlserver-ex-14.0 sqlserver-ex-15.0 sqlserver-se-11.0 sqlserver-se-12.0 sqlserver-se-13.0 sqlserver-se-14.0 sqlserver-se-15.0 sqlserver-web-11.0 sqlserver-web-12.0 sqlserver-web-13.0 sqlserver-web-14.0 sqlserver-web-15.0 public let dbParameterGroupFamily: String? - /// This parameter isn't currently supported. + /// A filter that specifies one or more parameters to describe. The only supported filter is parameter-name. The results list only includes information about the parameters with these names. @OptionalCustomCoding> public var filters: [Filter]? /// An optional pagination token provided by a previous DescribeEngineDefaultParameters request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords. @@ -9128,7 +9128,7 @@ extension RDS { public let allowMajorVersionUpgrade: Bool? /// Specifies whether the modifications in this request are asynchronously applied as soon as possible, regardless of the PreferredMaintenanceWindow setting for the DB cluster. If this parameter is disabled, changes to the DB cluster are applied during the next maintenance window. Most modifications can be applied immediately or during the next scheduled maintenance window. Some modifications, such as turning on deletion protection and changing the master password, are applied immediately—regardless of when you choose to apply them. By default, this parameter is disabled. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters public let applyImmediately: Bool? - /// Specifies whether minor engine upgrades are applied automatically to the DB cluster during the maintenance window. By default, minor engine upgrades are applied automatically. Valid for Cluster Type: Multi-AZ DB clusters only + /// Specifies whether minor engine upgrades are applied automatically to the DB cluster during the maintenance window. By default, minor engine upgrades are applied automatically. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters public let autoMinorVersionUpgrade: Bool? /// The Amazon Resource Name (ARN) of the recovery point in Amazon Web Services Backup. public let awsBackupRecoveryPointArn: String? @@ -9142,7 +9142,7 @@ extension RDS { public let cloudwatchLogsExportConfiguration: CloudwatchLogsExportConfiguration? /// Specifies whether to copy all tags from the DB cluster to snapshots of the DB cluster. The default is not to copy them. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters public let copyTagsToSnapshot: Bool? - /// Specifies the mode of Database Insights to enable for the cluster. + /// Specifies the mode of Database Insights to enable for the DB cluster. If you change the value from standard to advanced, you must set the PerformanceInsightsEnabled parameter to true and the PerformanceInsightsRetentionPeriod parameter to 465. If you change the value from advanced to standard, you must set the PerformanceInsightsEnabled parameter to false. Valid for Cluster Type: Aurora DB clusters only public let databaseInsightsMode: DatabaseInsightsMode? /// The DB cluster identifier for the cluster being modified. This parameter isn't case-sensitive. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters Constraints: Must match the identifier of an existing DB cluster. public let dbClusterIdentifier: String? @@ -9192,9 +9192,9 @@ extension RDS { public let newDBClusterIdentifier: String? /// The option group to associate the DB cluster with. DB clusters are associated with a default option group that can't be modified. public let optionGroupName: String? - /// The Amazon Web Services KMS key identifier for encryption of Performance Insights data. The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key. If you don't specify a value for PerformanceInsightsKMSKeyId, then Amazon RDS uses your default KMS key. There is a default KMS key for your Amazon Web Services account. Your Amazon Web Services account has a different default KMS key for each Amazon Web Services Region. Valid for Cluster Type: Multi-AZ DB clusters only + /// The Amazon Web Services KMS key identifier for encryption of Performance Insights data. The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key. If you don't specify a value for PerformanceInsightsKMSKeyId, then Amazon RDS uses your default KMS key. There is a default KMS key for your Amazon Web Services account. Your Amazon Web Services account has a different default KMS key for each Amazon Web Services Region. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters public let performanceInsightsKMSKeyId: String? - /// The number of days to retain Performance Insights data. Valid for Cluster Type: Multi-AZ DB clusters only Valid Values: 7 month * 31, where month is a number of months from 1-23. Examples: 93 (3 months * 31), 341 (11 months * 31), 589 (19 months * 31) 731 Default: 7 days If you specify a retention period that isn't valid, such as 94, Amazon RDS issues an error. + /// The number of days to retain Performance Insights data. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters Valid Values: 7 month * 31, where month is a number of months from 1-23. Examples: 93 (3 months * 31), 341 (11 months * 31), 589 (19 months * 31) 731 Default: 7 days If you specify a retention period that isn't valid, such as 94, Amazon RDS issues an error. public let performanceInsightsRetentionPeriod: Int? /// The port number on which the DB cluster accepts connections. Valid for Cluster Type: Aurora DB clusters only Valid Values: 1150-65535 Default: The same port as the original DB cluster. public let port: Int? @@ -9420,11 +9420,11 @@ extension RDS { public let caCertificateIdentifier: String? /// Specifies whether the DB instance is restarted when you rotate your SSL/TLS certificate. By default, the DB instance is restarted when you rotate your SSL/TLS certificate. The certificate is not updated until the DB instance is restarted. Set this parameter only if you are not using SSL/TLS to connect to the DB instance. If you are using SSL/TLS to connect to the DB instance, follow the appropriate instructions for your DB engine to rotate your SSL/TLS certificate: For more information about rotating your SSL/TLS certificate for RDS DB engines, see Rotating Your SSL/TLS Certificate. in the Amazon RDS User Guide. For more information about rotating your SSL/TLS certificate for Aurora DB engines, see Rotating Your SSL/TLS Certificate in the Amazon Aurora User Guide. This setting doesn't apply to RDS Custom DB instances. public let certificateRotationRestart: Bool? - /// The log types to be enabled for export to CloudWatch Logs for a specific DB instance. A change to the CloudwatchLogsExportConfiguration parameter is always applied to the DB instance immediately. Therefore, the ApplyImmediately parameter has no effect. This setting doesn't apply to RDS Custom DB instances. + /// The log types to be enabled for export to CloudWatch Logs for a specific DB instance. A change to the CloudwatchLogsExportConfiguration parameter is always applied to the DB instance immediately. Therefore, the ApplyImmediately parameter has no effect. This setting doesn't apply to RDS Custom DB instances. The following values are valid for each DB engine: Aurora MySQL - audit | error | general | slowquery Aurora PostgreSQL - postgresql RDS for MySQL - error | general | slowquery RDS for PostgreSQL - postgresql | upgrade For more information about exporting CloudWatch Logs for Amazon RDS, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon RDS User Guide. For more information about exporting CloudWatch Logs for Amazon Aurora, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon Aurora User Guide. public let cloudwatchLogsExportConfiguration: CloudwatchLogsExportConfiguration? /// Specifies whether to copy all tags from the DB instance to snapshots of the DB instance. By default, tags aren't copied. This setting doesn't apply to Amazon Aurora DB instances. Copying tags to snapshots is managed by the DB cluster. Setting this value for an Aurora DB instance has no effect on the DB cluster setting. For more information, see ModifyDBCluster. public let copyTagsToSnapshot: Bool? - /// Specifies the mode of Database Insights to enable for the instance. + /// Specifies the mode of Database Insights to enable for the DB instance. This setting only applies to Amazon Aurora DB instances. Currently, this value is inherited from the DB cluster and can't be changed. public let databaseInsightsMode: DatabaseInsightsMode? /// The new compute and memory capacity of the DB instance, for example db.m4.large. Not all DB instance classes are available in all Amazon Web Services Regions, or for all database engines. For the full list of DB instance classes, and availability for your engine, see DB Instance Class in the Amazon RDS User Guide or Aurora DB instance classes in the Amazon Aurora User Guide. For RDS Custom, see DB instance class support for RDS Custom for Oracle and DB instance class support for RDS Custom for SQL Server. If you modify the DB instance class, an outage occurs during the change. The change is applied during the next maintenance window, unless you specify ApplyImmediately in your request. Default: Uses existing setting Constraints: If you are modifying the DB instance class and upgrading the engine version at the same time, the currently running engine version must be supported on the specified DB instance class. Otherwise, the operation returns an error. In this case, first run the operation to upgrade the engine version, and then run it again to modify the DB instance class. public let dbInstanceClass: String? @@ -12421,7 +12421,7 @@ extension RDS { public struct _TagsEncoding: ArrayCoderProperties { public static let member = "Tag" } public struct _VpcSecurityGroupIdsEncoding: ArrayCoderProperties { public static let member = "VpcSecurityGroupId" } - /// The amount of storage (in gibibytes) to allocate initially for the DB instance. Follow the allocation rules specified in CreateDBInstance. Be sure to allocate enough storage for your new DB instance so that the restore operation can succeed. You can also allocate additional storage for future growth. + /// The amount of storage (in gibibytes) to allocate initially for the DB instance. Follow the allocation rules specified in CreateDBInstance. This setting isn't valid for RDS for SQL Server. Be sure to allocate enough storage for your new DB instance so that the restore operation can succeed. You can also allocate additional storage for future growth. public let allocatedStorage: Int? /// Specifies whether to automatically apply minor version upgrades to the DB instance during the maintenance window. If you restore an RDS Custom DB instance, you must disable this parameter. public let autoMinorVersionUpgrade: Bool? @@ -12621,7 +12621,7 @@ extension RDS { public struct _TagsEncoding: ArrayCoderProperties { public static let member = "Tag" } public struct _VpcSecurityGroupIdsEncoding: ArrayCoderProperties { public static let member = "VpcSecurityGroupId" } - /// The amount of storage (in gibibytes) to allocate initially for the DB instance. Follow the allocation rules specified in CreateDBInstance. Be sure to allocate enough storage for your new DB instance so that the restore operation can succeed. You can also allocate additional storage for future growth. + /// The amount of storage (in gibibytes) to allocate initially for the DB instance. Follow the allocation rules specified in CreateDBInstance. This setting isn't valid for RDS for SQL Server. Be sure to allocate enough storage for your new DB instance so that the restore operation can succeed. You can also allocate additional storage for future growth. public let allocatedStorage: Int? /// Specifies whether to automatically apply minor engine upgrades to the DB instance during the maintenance window. By default, minor engine upgrades are not applied automatically. public let autoMinorVersionUpgrade: Bool? @@ -12633,7 +12633,7 @@ extension RDS { public let caCertificateIdentifier: String? /// Specifies whether to copy all tags from the DB instance to snapshots of the DB instance. By default, tags are not copied. public let copyTagsToSnapshot: Bool? - /// Specifies the mode of Database Insights to enable for the instance. + /// Specifies the mode of Database Insights to enable for the DB instance. This setting only applies to Amazon Aurora DB instances. Currently, this value is inherited from the DB cluster and can't be changed. public let databaseInsightsMode: DatabaseInsightsMode? /// The compute and memory capacity of the DB instance, for example db.m4.large. Not all DB instance classes are available in all Amazon Web Services Regions, or for all database engines. For the full list of DB instance classes, and availability for your engine, see DB Instance Class in the Amazon RDS User Guide. Importing from Amazon S3 isn't supported on the db.t2.micro DB instance class. public let dbInstanceClass: String? @@ -12861,7 +12861,7 @@ extension RDS { public struct _TagsEncoding: ArrayCoderProperties { public static let member = "Tag" } public struct _VpcSecurityGroupIdsEncoding: ArrayCoderProperties { public static let member = "VpcSecurityGroupId" } - /// The amount of storage (in gibibytes) to allocate initially for the DB instance. Follow the allocation rules specified in CreateDBInstance. Be sure to allocate enough storage for your new DB instance so that the restore operation can succeed. You can also allocate additional storage for future growth. + /// The amount of storage (in gibibytes) to allocate initially for the DB instance. Follow the allocation rules specified in CreateDBInstance. This setting isn't valid for RDS for SQL Server. Be sure to allocate enough storage for your new DB instance so that the restore operation can succeed. You can also allocate additional storage for future growth. public let allocatedStorage: Int? /// Specifies whether minor version upgrades are applied automatically to the DB instance during the maintenance window. This setting doesn't apply to RDS Custom. public let autoMinorVersionUpgrade: Bool? diff --git a/Sources/Soto/Services/Rbin/Rbin_api.swift b/Sources/Soto/Services/Rbin/Rbin_api.swift index 88be5e729e..67847cb181 100644 --- a/Sources/Soto/Services/Rbin/Rbin_api.swift +++ b/Sources/Soto/Services/Rbin/Rbin_api.swift @@ -79,6 +79,52 @@ public struct Rbin: AWSService { /// FIPS and dualstack endpoints static var variantEndpoints: [EndpointVariantType: AWSServiceConfig.EndpointVariant] {[ + [.dualstack]: .init(endpoints: [ + "af-south-1": "rbin.af-south-1.api.aws", + "ap-east-1": "rbin.ap-east-1.api.aws", + "ap-northeast-1": "rbin.ap-northeast-1.api.aws", + "ap-northeast-2": "rbin.ap-northeast-2.api.aws", + "ap-northeast-3": "rbin.ap-northeast-3.api.aws", + "ap-south-1": "rbin.ap-south-1.api.aws", + "ap-south-2": "rbin.ap-south-2.api.aws", + "ap-southeast-1": "rbin.ap-southeast-1.api.aws", + "ap-southeast-2": "rbin.ap-southeast-2.api.aws", + "ap-southeast-3": "rbin.ap-southeast-3.api.aws", + "ap-southeast-4": "rbin.ap-southeast-4.api.aws", + "ap-southeast-5": "rbin.ap-southeast-5.api.aws", + "ca-central-1": "rbin.ca-central-1.api.aws", + "ca-west-1": "rbin.ca-west-1.api.aws", + "cn-north-1": "rbin.cn-north-1.api.amazonwebservices.com.cn", + "cn-northwest-1": "rbin.cn-northwest-1.api.amazonwebservices.com.cn", + "eu-central-1": "rbin.eu-central-1.api.aws", + "eu-central-2": "rbin.eu-central-2.api.aws", + "eu-north-1": "rbin.eu-north-1.api.aws", + "eu-south-1": "rbin.eu-south-1.api.aws", + "eu-south-2": "rbin.eu-south-2.api.aws", + "eu-west-1": "rbin.eu-west-1.api.aws", + "eu-west-2": "rbin.eu-west-2.api.aws", + "eu-west-3": "rbin.eu-west-3.api.aws", + "il-central-1": "rbin.il-central-1.api.aws", + "me-central-1": "rbin.me-central-1.api.aws", + "me-south-1": "rbin.me-south-1.api.aws", + "sa-east-1": "rbin.sa-east-1.api.aws", + "us-east-1": "rbin.us-east-1.api.aws", + "us-east-2": "rbin.us-east-2.api.aws", + "us-gov-east-1": "rbin.us-gov-east-1.api.aws", + "us-gov-west-1": "rbin.us-gov-west-1.api.aws", + "us-west-1": "rbin.us-west-1.api.aws", + "us-west-2": "rbin.us-west-2.api.aws" + ]), + [.dualstack, .fips]: .init(endpoints: [ + "ca-central-1": "rbin-fips.ca-central-1.api.aws", + "ca-west-1": "rbin-fips.ca-west-1.api.aws", + "us-east-1": "rbin-fips.us-east-1.api.aws", + "us-east-2": "rbin-fips.us-east-2.api.aws", + "us-gov-east-1": "rbin-fips.us-gov-east-1.api.aws", + "us-gov-west-1": "rbin-fips.us-gov-west-1.api.aws", + "us-west-1": "rbin-fips.us-west-1.api.aws", + "us-west-2": "rbin-fips.us-west-2.api.aws" + ]), [.fips]: .init(endpoints: [ "ca-central-1": "rbin-fips.ca-central-1.amazonaws.com", "ca-west-1": "rbin-fips.ca-west-1.amazonaws.com", diff --git a/Sources/Soto/Services/Resiliencehub/Resiliencehub_shapes.swift b/Sources/Soto/Services/Resiliencehub/Resiliencehub_shapes.swift index 050d686ce2..39679ae19a 100644 --- a/Sources/Soto/Services/Resiliencehub/Resiliencehub_shapes.swift +++ b/Sources/Soto/Services/Resiliencehub/Resiliencehub_shapes.swift @@ -471,6 +471,24 @@ extension Resiliencehub { } } + public struct Alarm: AWSDecodableShape { + /// Amazon Resource Name (ARN) of the Amazon CloudWatch alarm. + public let alarmArn: String? + /// Indicates the source of the Amazon CloudWatch alarm. That is, it indicates if the alarm was created using Resilience Hub recommendation (AwsResilienceHub), or if you had created the alarm in Amazon CloudWatch (Customer). + public let source: String? + + @inlinable + public init(alarmArn: String? = nil, source: String? = nil) { + self.alarmArn = alarmArn + self.source = source + } + + private enum CodingKeys: String, CodingKey { + case alarmArn = "alarmArn" + case source = "source" + } + } + public struct AlarmRecommendation: AWSDecodableShape { /// Application Component name for the CloudWatch alarm recommendation. This name is saved as the first item in the appComponentNames list. public let appComponentName: String? @@ -976,7 +994,7 @@ extension Resiliencehub { } public struct AssessmentRiskRecommendation: AWSDecodableShape { - /// Indicates the Application Components (AppComponents) that were assessed as part of the assessnent and are associated with the identified risk and recommendation. This property is available only in the US East (N. Virginia) Region. + /// Indicates the Application Components (AppComponents) that were assessed as part of the assessment and are associated with the identified risk and recommendation. This property is available only in the US East (N. Virginia) Region. public let appComponents: [String]? /// Indicates the recommendation provided by the Resilience Hub to address the identified risks in the application. This property is available only in the US East (N. Virginia) Region. public let recommendation: String? @@ -1087,6 +1105,8 @@ extension Resiliencehub { } public struct BatchUpdateRecommendationStatusSuccessfulEntry: AWSDecodableShape { + /// Indicates the identifier of an AppComponent. + public let appComponentId: String? /// An identifier for an entry in this batch that is used to communicate the result. The entryIds of a batch request need to be unique within a request. public let entryId: String /// Indicates if the operational recommendation was successfully excluded. @@ -1099,7 +1119,8 @@ extension Resiliencehub { public let referenceId: String @inlinable - public init(entryId: String, excluded: Bool, excludeReason: ExcludeRecommendationReason? = nil, item: UpdateRecommendationStatusItem? = nil, referenceId: String) { + public init(appComponentId: String? = nil, entryId: String, excluded: Bool, excludeReason: ExcludeRecommendationReason? = nil, item: UpdateRecommendationStatusItem? = nil, referenceId: String) { + self.appComponentId = appComponentId self.entryId = entryId self.excluded = excluded self.excludeReason = excludeReason @@ -1108,6 +1129,7 @@ extension Resiliencehub { } private enum CodingKeys: String, CodingKey { + case appComponentId = "appComponentId" case entryId = "entryId" case excluded = "excluded" case excludeReason = "excludeReason" @@ -2792,6 +2814,24 @@ extension Resiliencehub { } } + public struct Experiment: AWSDecodableShape { + /// Amazon Resource Name (ARN) of the FIS experiment. + public let experimentArn: String? + /// Identifier of the FIS experiment template. + public let experimentTemplateId: String? + + @inlinable + public init(experimentArn: String? = nil, experimentTemplateId: String? = nil) { + self.experimentArn = experimentArn + self.experimentTemplateId = experimentTemplateId + } + + private enum CodingKeys: String, CodingKey { + case experimentArn = "experimentArn" + case experimentTemplateId = "experimentTemplateId" + } + } + public struct FailedGroupingRecommendationEntry: AWSDecodableShape { /// Indicates the error that occurred while implementing a grouping recommendation. public let errorMessage: String @@ -4281,7 +4321,7 @@ extension Resiliencehub { public struct PermissionModel: AWSEncodableShape & AWSDecodableShape { /// Defines a list of role Amazon Resource Names (ARNs) to be used in other accounts. These ARNs are used for querying purposes while importing resources and assessing your application. These ARNs are required only when your resources are in other accounts and you have different role name in these accounts. Else, the invoker role name will be used in the other accounts. These roles must have a trust policy with iam:AssumeRole permission to the invoker role in the primary account. public let crossAccountRoleArns: [String]? - /// Existing Amazon Web Services IAM role name in the primary Amazon Web Services account that will be assumed by Resilience Hub Service Principle to obtain a read-only access to your application resources while running an assessment. You must have iam:passRole permission for this role while creating or updating the application. Currently, invokerRoleName accepts only [A-Za-z0-9_+=,.@-] characters. + /// Existing Amazon Web Services IAM role name in the primary Amazon Web Services account that will be assumed by Resilience Hub Service Principle to obtain a read-only access to your application resources while running an assessment. If your IAM role includes a path, you must include the path in the invokerRoleName parameter. For example, if your IAM role's ARN is arn:aws:iam:123456789012:role/my-path/role-name, you should pass my-path/role-name. You must have iam:passRole permission for this role while creating or updating the application. Currently, invokerRoleName accepts only [A-Za-z0-9_+=,.@-] characters. public let invokerRoleName: String? /// Defines how Resilience Hub scans your resources. It can scan for the resources by using a pre-existing role in your Amazon Web Services account, or by using the credentials of the current IAM user. public let type: PermissionModelType @@ -4519,10 +4559,14 @@ extension Resiliencehub { public struct RecommendationItem: AWSDecodableShape { /// Specifies if the recommendation has already been implemented. public let alreadyImplemented: Bool? + /// Indicates the previously implemented Amazon CloudWatch alarm discovered by Resilience Hub. + public let discoveredAlarm: Alarm? /// Indicates if an operational recommendation item is excluded. public let excluded: Bool? /// Indicates the reason for excluding an operational recommendation. public let excludeReason: ExcludeRecommendationReason? + /// Indicates the experiment created in FIS that was discovered by Resilience Hub, which matches the recommendation. + public let latestDiscoveredExperiment: Experiment? /// Identifier of the resource. public let resourceId: String? /// Identifier of the target account. @@ -4531,10 +4575,12 @@ extension Resiliencehub { public let targetRegion: String? @inlinable - public init(alreadyImplemented: Bool? = nil, excluded: Bool? = nil, excludeReason: ExcludeRecommendationReason? = nil, resourceId: String? = nil, targetAccountId: String? = nil, targetRegion: String? = nil) { + public init(alreadyImplemented: Bool? = nil, discoveredAlarm: Alarm? = nil, excluded: Bool? = nil, excludeReason: ExcludeRecommendationReason? = nil, latestDiscoveredExperiment: Experiment? = nil, resourceId: String? = nil, targetAccountId: String? = nil, targetRegion: String? = nil) { self.alreadyImplemented = alreadyImplemented + self.discoveredAlarm = discoveredAlarm self.excluded = excluded self.excludeReason = excludeReason + self.latestDiscoveredExperiment = latestDiscoveredExperiment self.resourceId = resourceId self.targetAccountId = targetAccountId self.targetRegion = targetRegion @@ -4542,8 +4588,10 @@ extension Resiliencehub { private enum CodingKeys: String, CodingKey { case alreadyImplemented = "alreadyImplemented" + case discoveredAlarm = "discoveredAlarm" case excluded = "excluded" case excludeReason = "excludeReason" + case latestDiscoveredExperiment = "latestDiscoveredExperiment" case resourceId = "resourceId" case targetAccountId = "targetAccountId" case targetRegion = "targetRegion" @@ -5379,6 +5427,8 @@ extension Resiliencehub { } public struct TestRecommendation: AWSDecodableShape { + /// Indicates the identifier of the AppComponent. + public let appComponentId: String? /// Name of the Application Component. public let appComponentName: String? /// A list of recommended alarms that are used in the test and must be exported before or with the test. @@ -5405,7 +5455,8 @@ extension Resiliencehub { public let type: TestType? @inlinable - public init(appComponentName: String? = nil, dependsOnAlarms: [String]? = nil, description: String? = nil, intent: String? = nil, items: [RecommendationItem]? = nil, name: String? = nil, prerequisite: String? = nil, recommendationId: String? = nil, recommendationStatus: RecommendationStatus? = nil, referenceId: String, risk: TestRisk? = nil, type: TestType? = nil) { + public init(appComponentId: String? = nil, appComponentName: String? = nil, dependsOnAlarms: [String]? = nil, description: String? = nil, intent: String? = nil, items: [RecommendationItem]? = nil, name: String? = nil, prerequisite: String? = nil, recommendationId: String? = nil, recommendationStatus: RecommendationStatus? = nil, referenceId: String, risk: TestRisk? = nil, type: TestType? = nil) { + self.appComponentId = appComponentId self.appComponentName = appComponentName self.dependsOnAlarms = dependsOnAlarms self.description = description @@ -5421,6 +5472,7 @@ extension Resiliencehub { } private enum CodingKeys: String, CodingKey { + case appComponentId = "appComponentId" case appComponentName = "appComponentName" case dependsOnAlarms = "dependsOnAlarms" case description = "description" @@ -5817,6 +5869,8 @@ extension Resiliencehub { } public struct UpdateRecommendationStatusRequestEntry: AWSEncodableShape { + /// Indicates the identifier of the AppComponent. + public let appComponentId: String? /// An identifier for an entry in this batch that is used to communicate the result. The entryIds of a batch request need to be unique within a request. public let entryId: String /// Indicates if the operational recommendation needs to be excluded. If set to True, the operational recommendation will be excluded. @@ -5829,7 +5883,8 @@ extension Resiliencehub { public let referenceId: String @inlinable - public init(entryId: String, excluded: Bool, excludeReason: ExcludeRecommendationReason? = nil, item: UpdateRecommendationStatusItem? = nil, referenceId: String) { + public init(appComponentId: String? = nil, entryId: String, excluded: Bool, excludeReason: ExcludeRecommendationReason? = nil, item: UpdateRecommendationStatusItem? = nil, referenceId: String) { + self.appComponentId = appComponentId self.entryId = entryId self.excluded = excluded self.excludeReason = excludeReason @@ -5838,6 +5893,7 @@ extension Resiliencehub { } public func validate(name: String) throws { + try self.validate(self.appComponentId, name: "appComponentId", parent: name, pattern: "^[A-Za-z0-9][A-Za-z0-9_\\-]{0,254}$") try self.validate(self.entryId, name: "entryId", parent: name, max: 255) try self.validate(self.entryId, name: "entryId", parent: name, min: 1) try self.item?.validate(name: "\(name).item") @@ -5846,6 +5902,7 @@ extension Resiliencehub { } private enum CodingKeys: String, CodingKey { + case appComponentId = "appComponentId" case entryId = "entryId" case excluded = "excluded" case excludeReason = "excludeReason" diff --git a/Sources/Soto/Services/SageMaker/SageMaker_api.swift b/Sources/Soto/Services/SageMaker/SageMaker_api.swift index 6c582b114e..61805acd0a 100644 --- a/Sources/Soto/Services/SageMaker/SageMaker_api.swift +++ b/Sources/Soto/Services/SageMaker/SageMaker_api.swift @@ -376,7 +376,7 @@ public struct SageMaker: AWSService { return try await self.createAlgorithm(input, logger: logger) } - /// Creates a running app for the specified UserProfile. This operation is automatically invoked by Amazon SageMaker upon access to the associated Domain, and when new kernel configurations are selected by the user. A user may have multiple Apps active simultaneously. + /// Creates a running app for the specified UserProfile. This operation is automatically invoked by Amazon SageMaker AI upon access to the associated Domain, and when new kernel configurations are selected by the user. A user may have multiple Apps active simultaneously. @Sendable @inlinable public func createApp(_ input: CreateAppRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateAppResponse { @@ -389,13 +389,13 @@ public struct SageMaker: AWSService { logger: logger ) } - /// Creates a running app for the specified UserProfile. This operation is automatically invoked by Amazon SageMaker upon access to the associated Domain, and when new kernel configurations are selected by the user. A user may have multiple Apps active simultaneously. + /// Creates a running app for the specified UserProfile. This operation is automatically invoked by Amazon SageMaker AI upon access to the associated Domain, and when new kernel configurations are selected by the user. A user may have multiple Apps active simultaneously. /// /// Parameters: /// - appName: The name of the app. /// - appType: The type of app. /// - domainId: The domain ID. - /// - resourceSpec: The instance type and the Amazon Resource Name (ARN) of the SageMaker image created on the instance. The value of InstanceType passed as part of the ResourceSpec in the CreateApp call overrides the value passed as part of the ResourceSpec configured for the user profile or the domain. If InstanceType is not specified in any of those three ResourceSpec values for a KernelGateway app, the CreateApp call fails with a request validation error. + /// - resourceSpec: The instance type and the Amazon Resource Name (ARN) of the SageMaker AI image created on the instance. The value of InstanceType passed as part of the ResourceSpec in the CreateApp call overrides the value passed as part of the ResourceSpec configured for the user profile or the domain. If InstanceType is not specified in any of those three ResourceSpec values for a KernelGateway app, the CreateApp call fails with a request validation error. /// - spaceName: The name of the space. If this value is not set, then UserProfileName must be set. /// - tags: Each tag consists of a key and an optional value. Tag keys must be unique per resource. /// - userProfileName: The user profile name. If this value is not set, then SpaceName must be set. @@ -423,7 +423,7 @@ public struct SageMaker: AWSService { return try await self.createApp(input, logger: logger) } - /// Creates a configuration for running a SageMaker image as a KernelGateway app. The configuration specifies the Amazon Elastic File System storage volume on the image, and a list of the kernels in the image. + /// Creates a configuration for running a SageMaker AI image as a KernelGateway app. The configuration specifies the Amazon Elastic File System storage volume on the image, and a list of the kernels in the image. @Sendable @inlinable public func createAppImageConfig(_ input: CreateAppImageConfigRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateAppImageConfigResponse { @@ -436,7 +436,7 @@ public struct SageMaker: AWSService { logger: logger ) } - /// Creates a configuration for running a SageMaker image as a KernelGateway app. The configuration specifies the Amazon Elastic File System storage volume on the image, and a list of the kernels in the image. + /// Creates a configuration for running a SageMaker AI image as a KernelGateway app. The configuration specifies the Amazon Elastic File System storage volume on the image, and a list of the kernels in the image. /// /// Parameters: /// - appImageConfigName: The name of the AppImageConfig. Must be unique to your account. @@ -508,7 +508,7 @@ public struct SageMaker: AWSService { return try await self.createArtifact(input, logger: logger) } - /// Creates an Autopilot job also referred to as Autopilot experiment or AutoML job. An AutoML job in SageMaker is a fully automated process that allows you to build machine learning models with minimal effort and machine learning expertise. When initiating an AutoML job, you provide your data and optionally specify parameters tailored to your use case. SageMaker then automates the entire model development lifecycle, including data preprocessing, model training, tuning, and evaluation. AutoML jobs are designed to simplify and accelerate the model building process by automating various tasks and exploring different combinations of machine learning algorithms, data preprocessing techniques, and hyperparameter values. The output of an AutoML job comprises one or more trained models ready for deployment and inference. Additionally, SageMaker AutoML jobs generate a candidate model leaderboard, allowing you to select the best-performing model for deployment. For more information about AutoML jobs, see https://docs.aws.amazon.com/sagemaker/latest/dg/autopilot-automate-model-development.html in the SageMaker developer guide. We recommend using the new versions CreateAutoMLJobV2 and DescribeAutoMLJobV2, which offer backward compatibility. CreateAutoMLJobV2 can manage tabular problem types identical to those of its previous version CreateAutoMLJob, as well as time-series forecasting, non-tabular problem types such as image or text classification, and text generation (LLMs fine-tuning). Find guidelines about how to migrate a CreateAutoMLJob to CreateAutoMLJobV2 in Migrate a CreateAutoMLJob to CreateAutoMLJobV2. You can find the best-performing model after you run an AutoML job by calling DescribeAutoMLJobV2 (recommended) or DescribeAutoMLJob. + /// Creates an Autopilot job also referred to as Autopilot experiment or AutoML job. An AutoML job in SageMaker AI is a fully automated process that allows you to build machine learning models with minimal effort and machine learning expertise. When initiating an AutoML job, you provide your data and optionally specify parameters tailored to your use case. SageMaker AI then automates the entire model development lifecycle, including data preprocessing, model training, tuning, and evaluation. AutoML jobs are designed to simplify and accelerate the model building process by automating various tasks and exploring different combinations of machine learning algorithms, data preprocessing techniques, and hyperparameter values. The output of an AutoML job comprises one or more trained models ready for deployment and inference. Additionally, SageMaker AI AutoML jobs generate a candidate model leaderboard, allowing you to select the best-performing model for deployment. For more information about AutoML jobs, see https://docs.aws.amazon.com/sagemaker/latest/dg/autopilot-automate-model-development.html in the SageMaker AI developer guide. We recommend using the new versions CreateAutoMLJobV2 and DescribeAutoMLJobV2, which offer backward compatibility. CreateAutoMLJobV2 can manage tabular problem types identical to those of its previous version CreateAutoMLJob, as well as time-series forecasting, non-tabular problem types such as image or text classification, and text generation (LLMs fine-tuning). Find guidelines about how to migrate a CreateAutoMLJob to CreateAutoMLJobV2 in Migrate a CreateAutoMLJob to CreateAutoMLJobV2. You can find the best-performing model after you run an AutoML job by calling DescribeAutoMLJobV2 (recommended) or DescribeAutoMLJob. @Sendable @inlinable public func createAutoMLJob(_ input: CreateAutoMLJobRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateAutoMLJobResponse { @@ -521,7 +521,7 @@ public struct SageMaker: AWSService { logger: logger ) } - /// Creates an Autopilot job also referred to as Autopilot experiment or AutoML job. An AutoML job in SageMaker is a fully automated process that allows you to build machine learning models with minimal effort and machine learning expertise. When initiating an AutoML job, you provide your data and optionally specify parameters tailored to your use case. SageMaker then automates the entire model development lifecycle, including data preprocessing, model training, tuning, and evaluation. AutoML jobs are designed to simplify and accelerate the model building process by automating various tasks and exploring different combinations of machine learning algorithms, data preprocessing techniques, and hyperparameter values. The output of an AutoML job comprises one or more trained models ready for deployment and inference. Additionally, SageMaker AutoML jobs generate a candidate model leaderboard, allowing you to select the best-performing model for deployment. For more information about AutoML jobs, see https://docs.aws.amazon.com/sagemaker/latest/dg/autopilot-automate-model-development.html in the SageMaker developer guide. We recommend using the new versions CreateAutoMLJobV2 and DescribeAutoMLJobV2, which offer backward compatibility. CreateAutoMLJobV2 can manage tabular problem types identical to those of its previous version CreateAutoMLJob, as well as time-series forecasting, non-tabular problem types such as image or text classification, and text generation (LLMs fine-tuning). Find guidelines about how to migrate a CreateAutoMLJob to CreateAutoMLJobV2 in Migrate a CreateAutoMLJob to CreateAutoMLJobV2. You can find the best-performing model after you run an AutoML job by calling DescribeAutoMLJobV2 (recommended) or DescribeAutoMLJob. + /// Creates an Autopilot job also referred to as Autopilot experiment or AutoML job. An AutoML job in SageMaker AI is a fully automated process that allows you to build machine learning models with minimal effort and machine learning expertise. When initiating an AutoML job, you provide your data and optionally specify parameters tailored to your use case. SageMaker AI then automates the entire model development lifecycle, including data preprocessing, model training, tuning, and evaluation. AutoML jobs are designed to simplify and accelerate the model building process by automating various tasks and exploring different combinations of machine learning algorithms, data preprocessing techniques, and hyperparameter values. The output of an AutoML job comprises one or more trained models ready for deployment and inference. Additionally, SageMaker AI AutoML jobs generate a candidate model leaderboard, allowing you to select the best-performing model for deployment. For more information about AutoML jobs, see https://docs.aws.amazon.com/sagemaker/latest/dg/autopilot-automate-model-development.html in the SageMaker AI developer guide. We recommend using the new versions CreateAutoMLJobV2 and DescribeAutoMLJobV2, which offer backward compatibility. CreateAutoMLJobV2 can manage tabular problem types identical to those of its previous version CreateAutoMLJob, as well as time-series forecasting, non-tabular problem types such as image or text classification, and text generation (LLMs fine-tuning). Find guidelines about how to migrate a CreateAutoMLJob to CreateAutoMLJobV2 in Migrate a CreateAutoMLJob to CreateAutoMLJobV2. You can find the best-performing model after you run an AutoML job by calling DescribeAutoMLJobV2 (recommended) or DescribeAutoMLJob. /// /// Parameters: /// - autoMLJobConfig: A collection of settings used to configure an AutoML job. @@ -564,7 +564,7 @@ public struct SageMaker: AWSService { return try await self.createAutoMLJob(input, logger: logger) } - /// Creates an Autopilot job also referred to as Autopilot experiment or AutoML job V2. An AutoML job in SageMaker is a fully automated process that allows you to build machine learning models with minimal effort and machine learning expertise. When initiating an AutoML job, you provide your data and optionally specify parameters tailored to your use case. SageMaker then automates the entire model development lifecycle, including data preprocessing, model training, tuning, and evaluation. AutoML jobs are designed to simplify and accelerate the model building process by automating various tasks and exploring different combinations of machine learning algorithms, data preprocessing techniques, and hyperparameter values. The output of an AutoML job comprises one or more trained models ready for deployment and inference. Additionally, SageMaker AutoML jobs generate a candidate model leaderboard, allowing you to select the best-performing model for deployment. For more information about AutoML jobs, see https://docs.aws.amazon.com/sagemaker/latest/dg/autopilot-automate-model-development.html in the SageMaker developer guide. AutoML jobs V2 support various problem types such as regression, binary, and multiclass classification with tabular data, text and image classification, time-series forecasting, and fine-tuning of large language models (LLMs) for text generation. CreateAutoMLJobV2 and DescribeAutoMLJobV2 are new versions of CreateAutoMLJob and DescribeAutoMLJob which offer backward compatibility. CreateAutoMLJobV2 can manage tabular problem types identical to those of its previous version CreateAutoMLJob, as well as time-series forecasting, non-tabular problem types such as image or text classification, and text generation (LLMs fine-tuning). Find guidelines about how to migrate a CreateAutoMLJob to CreateAutoMLJobV2 in Migrate a CreateAutoMLJob to CreateAutoMLJobV2. For the list of available problem types supported by CreateAutoMLJobV2, see AutoMLProblemTypeConfig. You can find the best-performing model after you run an AutoML job V2 by calling DescribeAutoMLJobV2. + /// Creates an Autopilot job also referred to as Autopilot experiment or AutoML job V2. An AutoML job in SageMaker AI is a fully automated process that allows you to build machine learning models with minimal effort and machine learning expertise. When initiating an AutoML job, you provide your data and optionally specify parameters tailored to your use case. SageMaker AI then automates the entire model development lifecycle, including data preprocessing, model training, tuning, and evaluation. AutoML jobs are designed to simplify and accelerate the model building process by automating various tasks and exploring different combinations of machine learning algorithms, data preprocessing techniques, and hyperparameter values. The output of an AutoML job comprises one or more trained models ready for deployment and inference. Additionally, SageMaker AI AutoML jobs generate a candidate model leaderboard, allowing you to select the best-performing model for deployment. For more information about AutoML jobs, see https://docs.aws.amazon.com/sagemaker/latest/dg/autopilot-automate-model-development.html in the SageMaker AI developer guide. AutoML jobs V2 support various problem types such as regression, binary, and multiclass classification with tabular data, text and image classification, time-series forecasting, and fine-tuning of large language models (LLMs) for text generation. CreateAutoMLJobV2 and DescribeAutoMLJobV2 are new versions of CreateAutoMLJob and DescribeAutoMLJob which offer backward compatibility. CreateAutoMLJobV2 can manage tabular problem types identical to those of its previous version CreateAutoMLJob, as well as time-series forecasting, non-tabular problem types such as image or text classification, and text generation (LLMs fine-tuning). Find guidelines about how to migrate a CreateAutoMLJob to CreateAutoMLJobV2 in Migrate a CreateAutoMLJob to CreateAutoMLJobV2. For the list of available problem types supported by CreateAutoMLJobV2, see AutoMLProblemTypeConfig. You can find the best-performing model after you run an AutoML job V2 by calling DescribeAutoMLJobV2. @Sendable @inlinable public func createAutoMLJobV2(_ input: CreateAutoMLJobV2Request, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateAutoMLJobV2Response { @@ -577,7 +577,7 @@ public struct SageMaker: AWSService { logger: logger ) } - /// Creates an Autopilot job also referred to as Autopilot experiment or AutoML job V2. An AutoML job in SageMaker is a fully automated process that allows you to build machine learning models with minimal effort and machine learning expertise. When initiating an AutoML job, you provide your data and optionally specify parameters tailored to your use case. SageMaker then automates the entire model development lifecycle, including data preprocessing, model training, tuning, and evaluation. AutoML jobs are designed to simplify and accelerate the model building process by automating various tasks and exploring different combinations of machine learning algorithms, data preprocessing techniques, and hyperparameter values. The output of an AutoML job comprises one or more trained models ready for deployment and inference. Additionally, SageMaker AutoML jobs generate a candidate model leaderboard, allowing you to select the best-performing model for deployment. For more information about AutoML jobs, see https://docs.aws.amazon.com/sagemaker/latest/dg/autopilot-automate-model-development.html in the SageMaker developer guide. AutoML jobs V2 support various problem types such as regression, binary, and multiclass classification with tabular data, text and image classification, time-series forecasting, and fine-tuning of large language models (LLMs) for text generation. CreateAutoMLJobV2 and DescribeAutoMLJobV2 are new versions of CreateAutoMLJob and DescribeAutoMLJob which offer backward compatibility. CreateAutoMLJobV2 can manage tabular problem types identical to those of its previous version CreateAutoMLJob, as well as time-series forecasting, non-tabular problem types such as image or text classification, and text generation (LLMs fine-tuning). Find guidelines about how to migrate a CreateAutoMLJob to CreateAutoMLJobV2 in Migrate a CreateAutoMLJob to CreateAutoMLJobV2. For the list of available problem types supported by CreateAutoMLJobV2, see AutoMLProblemTypeConfig. You can find the best-performing model after you run an AutoML job V2 by calling DescribeAutoMLJobV2. + /// Creates an Autopilot job also referred to as Autopilot experiment or AutoML job V2. An AutoML job in SageMaker AI is a fully automated process that allows you to build machine learning models with minimal effort and machine learning expertise. When initiating an AutoML job, you provide your data and optionally specify parameters tailored to your use case. SageMaker AI then automates the entire model development lifecycle, including data preprocessing, model training, tuning, and evaluation. AutoML jobs are designed to simplify and accelerate the model building process by automating various tasks and exploring different combinations of machine learning algorithms, data preprocessing techniques, and hyperparameter values. The output of an AutoML job comprises one or more trained models ready for deployment and inference. Additionally, SageMaker AI AutoML jobs generate a candidate model leaderboard, allowing you to select the best-performing model for deployment. For more information about AutoML jobs, see https://docs.aws.amazon.com/sagemaker/latest/dg/autopilot-automate-model-development.html in the SageMaker AI developer guide. AutoML jobs V2 support various problem types such as regression, binary, and multiclass classification with tabular data, text and image classification, time-series forecasting, and fine-tuning of large language models (LLMs) for text generation. CreateAutoMLJobV2 and DescribeAutoMLJobV2 are new versions of CreateAutoMLJob and DescribeAutoMLJob which offer backward compatibility. CreateAutoMLJobV2 can manage tabular problem types identical to those of its previous version CreateAutoMLJob, as well as time-series forecasting, non-tabular problem types such as image or text classification, and text generation (LLMs fine-tuning). Find guidelines about how to migrate a CreateAutoMLJob to CreateAutoMLJobV2 in Migrate a CreateAutoMLJob to CreateAutoMLJobV2. For the list of available problem types supported by CreateAutoMLJobV2, see AutoMLProblemTypeConfig. You can find the best-performing model after you run an AutoML job V2 by calling DescribeAutoMLJobV2. /// /// Parameters: /// - autoMLComputeConfig: Specifies the compute configuration for the AutoML job V2. @@ -708,7 +708,7 @@ public struct SageMaker: AWSService { return try await self.createClusterSchedulerConfig(input, logger: logger) } - /// Creates a Git repository as a resource in your SageMaker account. You can associate the repository with notebook instances so that you can use Git source control for the notebooks you create. The Git repository is a resource in your SageMaker account, so it can be associated with more than one notebook instance, and it persists independently from the lifecycle of any notebook instances it is associated with. The repository can be hosted either in Amazon Web Services CodeCommit or in any other Git repository. + /// Creates a Git repository as a resource in your SageMaker AI account. You can associate the repository with notebook instances so that you can use Git source control for the notebooks you create. The Git repository is a resource in your SageMaker AI account, so it can be associated with more than one notebook instance, and it persists independently from the lifecycle of any notebook instances it is associated with. The repository can be hosted either in Amazon Web Services CodeCommit or in any other Git repository. @Sendable @inlinable public func createCodeRepository(_ input: CreateCodeRepositoryInput, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateCodeRepositoryOutput { @@ -721,7 +721,7 @@ public struct SageMaker: AWSService { logger: logger ) } - /// Creates a Git repository as a resource in your SageMaker account. You can associate the repository with notebook instances so that you can use Git source control for the notebooks you create. The Git repository is a resource in your SageMaker account, so it can be associated with more than one notebook instance, and it persists independently from the lifecycle of any notebook instances it is associated with. The repository can be hosted either in Amazon Web Services CodeCommit or in any other Git repository. + /// Creates a Git repository as a resource in your SageMaker AI account. You can associate the repository with notebook instances so that you can use Git source control for the notebooks you create. The Git repository is a resource in your SageMaker AI account, so it can be associated with more than one notebook instance, and it persists independently from the lifecycle of any notebook instances it is associated with. The repository can be hosted either in Amazon Web Services CodeCommit or in any other Git repository. /// /// Parameters: /// - codeRepositoryName: The name of the Git repository. The name must have 1 to 63 characters. Valid characters are a-z, A-Z, 0-9, and - (hyphen). @@ -743,7 +743,7 @@ public struct SageMaker: AWSService { return try await self.createCodeRepository(input, logger: logger) } - /// Starts a model compilation job. After the model has been compiled, Amazon SageMaker saves the resulting model artifacts to an Amazon Simple Storage Service (Amazon S3) bucket that you specify. If you choose to host your model using Amazon SageMaker hosting services, you can use the resulting model artifacts as part of the model. You can also use the artifacts with Amazon Web Services IoT Greengrass. In that case, deploy them as an ML resource. In the request body, you provide the following: A name for the compilation job Information about the input model artifacts The output location for the compiled model and the device (target) that the model runs on The Amazon Resource Name (ARN) of the IAM role that Amazon SageMaker assumes to perform the model compilation job. You can also provide a Tag to track the model compilation job's resource use and costs. The response body contains the CompilationJobArn for the compiled job. To stop a model compilation job, use StopCompilationJob. To get information about a particular model compilation job, use DescribeCompilationJob. To get information about multiple model compilation jobs, use ListCompilationJobs. + /// Starts a model compilation job. After the model has been compiled, Amazon SageMaker AI saves the resulting model artifacts to an Amazon Simple Storage Service (Amazon S3) bucket that you specify. If you choose to host your model using Amazon SageMaker AI hosting services, you can use the resulting model artifacts as part of the model. You can also use the artifacts with Amazon Web Services IoT Greengrass. In that case, deploy them as an ML resource. In the request body, you provide the following: A name for the compilation job Information about the input model artifacts The output location for the compiled model and the device (target) that the model runs on The Amazon Resource Name (ARN) of the IAM role that Amazon SageMaker AI assumes to perform the model compilation job. You can also provide a Tag to track the model compilation job's resource use and costs. The response body contains the CompilationJobArn for the compiled job. To stop a model compilation job, use StopCompilationJob. To get information about a particular model compilation job, use DescribeCompilationJob. To get information about multiple model compilation jobs, use ListCompilationJobs. @Sendable @inlinable public func createCompilationJob(_ input: CreateCompilationJobRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateCompilationJobResponse { @@ -756,15 +756,15 @@ public struct SageMaker: AWSService { logger: logger ) } - /// Starts a model compilation job. After the model has been compiled, Amazon SageMaker saves the resulting model artifacts to an Amazon Simple Storage Service (Amazon S3) bucket that you specify. If you choose to host your model using Amazon SageMaker hosting services, you can use the resulting model artifacts as part of the model. You can also use the artifacts with Amazon Web Services IoT Greengrass. In that case, deploy them as an ML resource. In the request body, you provide the following: A name for the compilation job Information about the input model artifacts The output location for the compiled model and the device (target) that the model runs on The Amazon Resource Name (ARN) of the IAM role that Amazon SageMaker assumes to perform the model compilation job. You can also provide a Tag to track the model compilation job's resource use and costs. The response body contains the CompilationJobArn for the compiled job. To stop a model compilation job, use StopCompilationJob. To get information about a particular model compilation job, use DescribeCompilationJob. To get information about multiple model compilation jobs, use ListCompilationJobs. + /// Starts a model compilation job. After the model has been compiled, Amazon SageMaker AI saves the resulting model artifacts to an Amazon Simple Storage Service (Amazon S3) bucket that you specify. If you choose to host your model using Amazon SageMaker AI hosting services, you can use the resulting model artifacts as part of the model. You can also use the artifacts with Amazon Web Services IoT Greengrass. In that case, deploy them as an ML resource. In the request body, you provide the following: A name for the compilation job Information about the input model artifacts The output location for the compiled model and the device (target) that the model runs on The Amazon Resource Name (ARN) of the IAM role that Amazon SageMaker AI assumes to perform the model compilation job. You can also provide a Tag to track the model compilation job's resource use and costs. The response body contains the CompilationJobArn for the compiled job. To stop a model compilation job, use StopCompilationJob. To get information about a particular model compilation job, use DescribeCompilationJob. To get information about multiple model compilation jobs, use ListCompilationJobs. /// /// Parameters: /// - compilationJobName: A name for the model compilation job. The name must be unique within the Amazon Web Services Region and within your Amazon Web Services account. /// - inputConfig: Provides information about the location of input model artifacts, the name and shape of the expected data inputs, and the framework in which the model was trained. /// - modelPackageVersionArn: The Amazon Resource Name (ARN) of a versioned model package. Provide either a ModelPackageVersionArn or an InputConfig object in the request syntax. The presence of both objects in the CreateCompilationJob request will return an exception. /// - outputConfig: Provides information about the output location for the compiled model and the target device the model runs on. - /// - roleArn: The Amazon Resource Name (ARN) of an IAM role that enables Amazon SageMaker to perform tasks on your behalf. During model compilation, Amazon SageMaker needs your permission to: Read input data from an S3 bucket Write model artifacts to an S3 bucket Write logs to Amazon CloudWatch Logs Publish metrics to Amazon CloudWatch You grant permissions for all of these tasks to an IAM role. To pass this role to Amazon SageMaker, the caller of this API must have the iam:PassRole permission. For more information, see Amazon SageMaker Roles. - /// - stoppingCondition: Specifies a limit to how long a model compilation job can run. When the job reaches the time limit, Amazon SageMaker ends the compilation job. Use this API to cap model training costs. + /// - roleArn: The Amazon Resource Name (ARN) of an IAM role that enables Amazon SageMaker AI to perform tasks on your behalf. During model compilation, Amazon SageMaker AI needs your permission to: Read input data from an S3 bucket Write model artifacts to an S3 bucket Write logs to Amazon CloudWatch Logs Publish metrics to Amazon CloudWatch You grant permissions for all of these tasks to an IAM role. To pass this role to Amazon SageMaker AI, the caller of this API must have the iam:PassRole permission. For more information, see Amazon SageMaker AI Roles. + /// - stoppingCondition: Specifies a limit to how long a model compilation job can run. When the job reaches the time limit, Amazon SageMaker AI ends the compilation job. Use this API to cap model training costs. /// - tags: An array of key-value pairs. You can use tags to categorize your Amazon Web Services resources in different ways, for example, by purpose, owner, or environment. For more information, see Tagging Amazon Web Services Resources. /// - vpcConfig: A VpcConfig object that specifies the VPC that you want your compilation job to connect to. Control access to your models by configuring the VPC. For more information, see Protect Compilation Jobs by Using an Amazon Virtual Private Cloud. /// - logger: Logger use during operation @@ -884,7 +884,7 @@ public struct SageMaker: AWSService { return try await self.createContext(input, logger: logger) } - /// Creates a definition for a job that monitors data quality and drift. For information about model monitor, see Amazon SageMaker Model Monitor. + /// Creates a definition for a job that monitors data quality and drift. For information about model monitor, see Amazon SageMaker AI Model Monitor. @Sendable @inlinable public func createDataQualityJobDefinition(_ input: CreateDataQualityJobDefinitionRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateDataQualityJobDefinitionResponse { @@ -897,7 +897,7 @@ public struct SageMaker: AWSService { logger: logger ) } - /// Creates a definition for a job that monitors data quality and drift. For information about model monitor, see Amazon SageMaker Model Monitor. + /// Creates a definition for a job that monitors data quality and drift. For information about model monitor, see Amazon SageMaker AI Model Monitor. /// /// Parameters: /// - dataQualityAppSpecification: Specifies the container that runs the monitoring job. @@ -907,7 +907,7 @@ public struct SageMaker: AWSService { /// - jobDefinitionName: The name for the monitoring job definition. /// - jobResources: /// - networkConfig: Specifies networking configuration for the monitoring job. - /// - roleArn: The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker can assume to perform tasks on your behalf. + /// - roleArn: The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker AI can assume to perform tasks on your behalf. /// - stoppingCondition: /// - tags: (Optional) An array of key-value pairs. For more information, see Using Cost Allocation Tags in the Amazon Web Services Billing and Cost Management User Guide. /// - logger: Logger use during operation @@ -984,7 +984,7 @@ public struct SageMaker: AWSService { return try await self.createDeviceFleet(input, logger: logger) } - /// Creates a Domain. A domain consists of an associated Amazon Elastic File System volume, a list of authorized users, and a variety of security, application, policy, and Amazon Virtual Private Cloud (VPC) configurations. Users within a domain can share notebook files and other artifacts with each other. EFS storage When a domain is created, an EFS volume is created for use by all of the users within the domain. Each user receives a private home directory within the EFS volume for notebooks, Git repositories, and data files. SageMaker uses the Amazon Web Services Key Management Service (Amazon Web Services KMS) to encrypt the EFS volume attached to the domain with an Amazon Web Services managed key by default. For more control, you can specify a customer managed key. For more information, see Protect Data at Rest Using Encryption. VPC configuration All traffic between the domain and the Amazon EFS volume is through the specified VPC and subnets. For other traffic, you can specify the AppNetworkAccessType parameter. AppNetworkAccessType corresponds to the network access type that you choose when you onboard to the domain. The following options are available: PublicInternetOnly - Non-EFS traffic goes through a VPC managed by Amazon SageMaker, which allows internet access. This is the default value. VpcOnly - All traffic is through the specified VPC and subnets. Internet access is disabled by default. To allow internet access, you must specify a NAT gateway. When internet access is disabled, you won't be able to run a Amazon SageMaker Studio notebook or to train or host models unless your VPC has an interface endpoint to the SageMaker API and runtime or a NAT gateway and your security groups allow outbound connections. NFS traffic over TCP on port 2049 needs to be allowed in both inbound and outbound rules in order to launch a Amazon SageMaker Studio app successfully. For more information, see Connect Amazon SageMaker Studio Notebooks to Resources in a VPC. + /// Creates a Domain. A domain consists of an associated Amazon Elastic File System volume, a list of authorized users, and a variety of security, application, policy, and Amazon Virtual Private Cloud (VPC) configurations. Users within a domain can share notebook files and other artifacts with each other. EFS storage When a domain is created, an EFS volume is created for use by all of the users within the domain. Each user receives a private home directory within the EFS volume for notebooks, Git repositories, and data files. SageMaker AI uses the Amazon Web Services Key Management Service (Amazon Web Services KMS) to encrypt the EFS volume attached to the domain with an Amazon Web Services managed key by default. For more control, you can specify a customer managed key. For more information, see Protect Data at Rest Using Encryption. VPC configuration All traffic between the domain and the Amazon EFS volume is through the specified VPC and subnets. For other traffic, you can specify the AppNetworkAccessType parameter. AppNetworkAccessType corresponds to the network access type that you choose when you onboard to the domain. The following options are available: PublicInternetOnly - Non-EFS traffic goes through a VPC managed by Amazon SageMaker AI, which allows internet access. This is the default value. VpcOnly - All traffic is through the specified VPC and subnets. Internet access is disabled by default. To allow internet access, you must specify a NAT gateway. When internet access is disabled, you won't be able to run a Amazon SageMaker AI Studio notebook or to train or host models unless your VPC has an interface endpoint to the SageMaker AI API and runtime or a NAT gateway and your security groups allow outbound connections. NFS traffic over TCP on port 2049 needs to be allowed in both inbound and outbound rules in order to launch a Amazon SageMaker AI Studio app successfully. For more information, see Connect Amazon SageMaker AI Studio Notebooks to Resources in a VPC. @Sendable @inlinable public func createDomain(_ input: CreateDomainRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateDomainResponse { @@ -997,17 +997,17 @@ public struct SageMaker: AWSService { logger: logger ) } - /// Creates a Domain. A domain consists of an associated Amazon Elastic File System volume, a list of authorized users, and a variety of security, application, policy, and Amazon Virtual Private Cloud (VPC) configurations. Users within a domain can share notebook files and other artifacts with each other. EFS storage When a domain is created, an EFS volume is created for use by all of the users within the domain. Each user receives a private home directory within the EFS volume for notebooks, Git repositories, and data files. SageMaker uses the Amazon Web Services Key Management Service (Amazon Web Services KMS) to encrypt the EFS volume attached to the domain with an Amazon Web Services managed key by default. For more control, you can specify a customer managed key. For more information, see Protect Data at Rest Using Encryption. VPC configuration All traffic between the domain and the Amazon EFS volume is through the specified VPC and subnets. For other traffic, you can specify the AppNetworkAccessType parameter. AppNetworkAccessType corresponds to the network access type that you choose when you onboard to the domain. The following options are available: PublicInternetOnly - Non-EFS traffic goes through a VPC managed by Amazon SageMaker, which allows internet access. This is the default value. VpcOnly - All traffic is through the specified VPC and subnets. Internet access is disabled by default. To allow internet access, you must specify a NAT gateway. When internet access is disabled, you won't be able to run a Amazon SageMaker Studio notebook or to train or host models unless your VPC has an interface endpoint to the SageMaker API and runtime or a NAT gateway and your security groups allow outbound connections. NFS traffic over TCP on port 2049 needs to be allowed in both inbound and outbound rules in order to launch a Amazon SageMaker Studio app successfully. For more information, see Connect Amazon SageMaker Studio Notebooks to Resources in a VPC. + /// Creates a Domain. A domain consists of an associated Amazon Elastic File System volume, a list of authorized users, and a variety of security, application, policy, and Amazon Virtual Private Cloud (VPC) configurations. Users within a domain can share notebook files and other artifacts with each other. EFS storage When a domain is created, an EFS volume is created for use by all of the users within the domain. Each user receives a private home directory within the EFS volume for notebooks, Git repositories, and data files. SageMaker AI uses the Amazon Web Services Key Management Service (Amazon Web Services KMS) to encrypt the EFS volume attached to the domain with an Amazon Web Services managed key by default. For more control, you can specify a customer managed key. For more information, see Protect Data at Rest Using Encryption. VPC configuration All traffic between the domain and the Amazon EFS volume is through the specified VPC and subnets. For other traffic, you can specify the AppNetworkAccessType parameter. AppNetworkAccessType corresponds to the network access type that you choose when you onboard to the domain. The following options are available: PublicInternetOnly - Non-EFS traffic goes through a VPC managed by Amazon SageMaker AI, which allows internet access. This is the default value. VpcOnly - All traffic is through the specified VPC and subnets. Internet access is disabled by default. To allow internet access, you must specify a NAT gateway. When internet access is disabled, you won't be able to run a Amazon SageMaker AI Studio notebook or to train or host models unless your VPC has an interface endpoint to the SageMaker AI API and runtime or a NAT gateway and your security groups allow outbound connections. NFS traffic over TCP on port 2049 needs to be allowed in both inbound and outbound rules in order to launch a Amazon SageMaker AI Studio app successfully. For more information, see Connect Amazon SageMaker AI Studio Notebooks to Resources in a VPC. /// /// Parameters: - /// - appNetworkAccessType: Specifies the VPC used for non-EFS traffic. The default value is PublicInternetOnly. PublicInternetOnly - Non-EFS traffic is through a VPC managed by Amazon SageMaker, which allows direct internet access VpcOnly - All traffic is through the specified VPC and subnets + /// - appNetworkAccessType: Specifies the VPC used for non-EFS traffic. The default value is PublicInternetOnly. PublicInternetOnly - Non-EFS traffic is through a VPC managed by Amazon SageMaker AI, which allows direct internet access VpcOnly - All traffic is through the specified VPC and subnets /// - appSecurityGroupManagement: The entity that creates and manages the required security groups for inter-app communication in VPCOnly mode. Required when CreateDomain.AppNetworkAccessType is VPCOnly and DomainSettings.RStudioServerProDomainSettings.DomainExecutionRoleArn is provided. If setting up the domain for use with RStudio, this value must be set to Service. /// - authMode: The mode of authentication that members use to access the domain. /// - defaultSpaceSettings: The default settings for shared spaces that users create in the domain. /// - defaultUserSettings: The default settings to use to create a user profile when UserSettings isn't specified in the call to the CreateUserProfile API. SecurityGroups is aggregated when specified in both calls. For all other settings in UserSettings, the values specified in CreateUserProfile take precedence over those specified in CreateDomain. /// - domainName: A name for the domain. /// - domainSettings: A collection of Domain settings. - /// - kmsKeyId: SageMaker uses Amazon Web Services KMS to encrypt EFS and EBS volumes attached to the domain with an Amazon Web Services managed key by default. For more control, specify a customer managed key. + /// - kmsKeyId: SageMaker AI uses Amazon Web Services KMS to encrypt EFS and EBS volumes attached to the domain with an Amazon Web Services managed key by default. For more control, specify a customer managed key. /// - subnetIds: The VPC subnets that the domain uses for communication. /// - tagPropagation: Indicates whether custom tag propagation is supported for the domain. Defaults to DISABLED. /// - tags: Tags to associated with the Domain. Each tag consists of a key and an optional value. Tag keys must be unique per resource. Tags are searchable using the Search API. Tags that you specify for the Domain are also added to all Apps that the Domain launches. @@ -1227,7 +1227,7 @@ public struct SageMaker: AWSService { /// - dataCaptureConfig: /// - enableNetworkIsolation: Sets whether all model containers deployed to the endpoint are isolated. If they are, no inbound or outbound network calls can be made to or from the model containers. /// - endpointConfigName: The name of the endpoint configuration. You specify this name in a CreateEndpoint request. - /// - executionRoleArn: The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker can assume to perform actions on your behalf. For more information, see SageMaker Roles. To be able to pass this role to Amazon SageMaker, the caller of this action must have the iam:PassRole permission. + /// - executionRoleArn: The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker AI can assume to perform actions on your behalf. For more information, see SageMaker AI Roles. To be able to pass this role to Amazon SageMaker AI, the caller of this action must have the iam:PassRole permission. /// - explainerConfig: A member of CreateEndpointConfig that enables explainers. /// - kmsKeyId: The Amazon Resource Name (ARN) of a Amazon Web Services Key Management Service key that SageMaker uses to encrypt data on the storage volume attached to the ML compute instance that hosts the endpoint. The KmsKeyId can be any of the following formats: Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab Key ARN: arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab Alias name: alias/ExampleAlias Alias name ARN: arn:aws:kms:us-west-2:111122223333:alias/ExampleAlias The KMS key policy must grant permission to the IAM role that you specify in your CreateEndpoint, UpdateEndpoint requests. For more information, refer to the Amazon Web Services Key Management Service section Using Key Policies in Amazon Web Services KMS Certain Nitro-based instances include local storage, dependent on the instance type. Local storage volumes are encrypted using a hardware module on the instance. You can't request a KmsKeyId when using an instance type with local storage. If any of the models that you specify in the ProductionVariants parameter use nitro-based instances with local storage, do not specify a value for the KmsKeyId parameter. If you specify a value for KmsKeyId when using any nitro-based instances with local storage, the call to CreateEndpointConfig fails. For a list of instance types that support local instance storage, see Instance Store Volumes. For more information about local instance storage encryption, see SSD Instance Store Volumes. /// - productionVariants: An array of ProductionVariant objects, one for each model that you want to host at this endpoint. @@ -1574,7 +1574,7 @@ public struct SageMaker: AWSService { return try await self.createHyperParameterTuningJob(input, logger: logger) } - /// Creates a custom SageMaker image. A SageMaker image is a set of image versions. Each image version represents a container image stored in Amazon ECR. For more information, see Bring your own SageMaker image. + /// Creates a custom SageMaker AI image. A SageMaker AI image is a set of image versions. Each image version represents a container image stored in Amazon ECR. For more information, see Bring your own SageMaker AI image. @Sendable @inlinable public func createImage(_ input: CreateImageRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateImageResponse { @@ -1587,13 +1587,13 @@ public struct SageMaker: AWSService { logger: logger ) } - /// Creates a custom SageMaker image. A SageMaker image is a set of image versions. Each image version represents a container image stored in Amazon ECR. For more information, see Bring your own SageMaker image. + /// Creates a custom SageMaker AI image. A SageMaker AI image is a set of image versions. Each image version represents a container image stored in Amazon ECR. For more information, see Bring your own SageMaker AI image. /// /// Parameters: /// - description: The description of the image. /// - displayName: The display name of the image. If not provided, ImageName is displayed. /// - imageName: The name of the image. Must be unique to your account. - /// - roleArn: The ARN of an IAM role that enables Amazon SageMaker to perform tasks on your behalf. + /// - roleArn: The ARN of an IAM role that enables Amazon SageMaker AI to perform tasks on your behalf. /// - tags: A list of tags to apply to the image. /// - logger: Logger use during operation @inlinable @@ -1615,7 +1615,7 @@ public struct SageMaker: AWSService { return try await self.createImage(input, logger: logger) } - /// Creates a version of the SageMaker image specified by ImageName. The version represents the Amazon ECR container image specified by BaseImage. + /// Creates a version of the SageMaker AI image specified by ImageName. The version represents the Amazon ECR container image specified by BaseImage. @Sendable @inlinable public func createImageVersion(_ input: CreateImageVersionRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateImageVersionResponse { @@ -1628,7 +1628,7 @@ public struct SageMaker: AWSService { logger: logger ) } - /// Creates a version of the SageMaker image specified by ImageName. The version represents the Amazon ECR container image specified by BaseImage. + /// Creates a version of the SageMaker AI image specified by ImageName. The version represents the Amazon ECR container image specified by BaseImage. /// /// Parameters: /// - aliases: A list of aliases created with the image version. @@ -1636,7 +1636,7 @@ public struct SageMaker: AWSService { /// - clientToken: A unique ID. If not specified, the Amazon Web Services CLI and Amazon Web Services SDKs, such as the SDK for Python (Boto3), add a unique value to the call. /// - horovod: Indicates Horovod compatibility. /// - imageName: The ImageName of the Image to create a version of. - /// - jobType: Indicates SageMaker job type compatibility. TRAINING: The image version is compatible with SageMaker training jobs. INFERENCE: The image version is compatible with SageMaker inference jobs. NOTEBOOK_KERNEL: The image version is compatible with SageMaker notebook kernels. + /// - jobType: Indicates SageMaker AI job type compatibility. TRAINING: The image version is compatible with SageMaker AI training jobs. INFERENCE: The image version is compatible with SageMaker AI inference jobs. NOTEBOOK_KERNEL: The image version is compatible with SageMaker AI notebook kernels. /// - mlFramework: The machine learning framework vended in the image version. /// - processor: Indicates CPU or GPU compatibility. CPU: The image version is compatible with CPU. GPU: The image version is compatible with GPU. /// - programmingLang: The supported programming language and its version. @@ -1674,7 +1674,7 @@ public struct SageMaker: AWSService { return try await self.createImageVersion(input, logger: logger) } - /// Creates an inference component, which is a SageMaker hosting object that you can use to deploy a model to an endpoint. In the inference component settings, you specify the model, the endpoint, and how the model utilizes the resources that the endpoint hosts. You can optimize resource utilization by tailoring how the required CPU cores, accelerators, and memory are allocated. You can deploy multiple inference components to an endpoint, where each inference component contains one model and the resource utilization needs for that individual model. After you deploy an inference component, you can directly invoke the associated model when you use the InvokeEndpoint API action. + /// Creates an inference component, which is a SageMaker AI hosting object that you can use to deploy a model to an endpoint. In the inference component settings, you specify the model, the endpoint, and how the model utilizes the resources that the endpoint hosts. You can optimize resource utilization by tailoring how the required CPU cores, accelerators, and memory are allocated. You can deploy multiple inference components to an endpoint, where each inference component contains one model and the resource utilization needs for that individual model. After you deploy an inference component, you can directly invoke the associated model when you use the InvokeEndpoint API action. @Sendable @inlinable public func createInferenceComponent(_ input: CreateInferenceComponentInput, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateInferenceComponentOutput { @@ -1687,7 +1687,7 @@ public struct SageMaker: AWSService { logger: logger ) } - /// Creates an inference component, which is a SageMaker hosting object that you can use to deploy a model to an endpoint. In the inference component settings, you specify the model, the endpoint, and how the model utilizes the resources that the endpoint hosts. You can optimize resource utilization by tailoring how the required CPU cores, accelerators, and memory are allocated. You can deploy multiple inference components to an endpoint, where each inference component contains one model and the resource utilization needs for that individual model. After you deploy an inference component, you can directly invoke the associated model when you use the InvokeEndpoint API action. + /// Creates an inference component, which is a SageMaker AI hosting object that you can use to deploy a model to an endpoint. In the inference component settings, you specify the model, the endpoint, and how the model utilizes the resources that the endpoint hosts. You can optimize resource utilization by tailoring how the required CPU cores, accelerators, and memory are allocated. You can deploy multiple inference components to an endpoint, where each inference component contains one model and the resource utilization needs for that individual model. After you deploy an inference component, you can directly invoke the associated model when you use the InvokeEndpoint API action. /// /// Parameters: /// - endpointName: The name of an existing endpoint where you host the inference component. @@ -2006,7 +2006,7 @@ public struct SageMaker: AWSService { /// - modelBiasJobInput: Inputs for the model bias job. /// - modelBiasJobOutputConfig: /// - networkConfig: Networking options for a model bias job. - /// - roleArn: The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker can assume to perform tasks on your behalf. + /// - roleArn: The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker AI can assume to perform tasks on your behalf. /// - stoppingCondition: /// - tags: (Optional) An array of key-value pairs. For more information, see Using Cost Allocation Tags in the Amazon Web Services Billing and Cost Management User Guide. /// - logger: Logger use during operation @@ -2141,7 +2141,7 @@ public struct SageMaker: AWSService { /// - modelExplainabilityJobInput: Inputs for the model explainability job. /// - modelExplainabilityJobOutputConfig: /// - networkConfig: Networking options for a model explainability job. - /// - roleArn: The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker can assume to perform tasks on your behalf. + /// - roleArn: The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker AI can assume to perform tasks on your behalf. /// - stoppingCondition: /// - tags: (Optional) An array of key-value pairs. For more information, see Using Cost Allocation Tags in the Amazon Web Services Billing and Cost Management User Guide. /// - logger: Logger use during operation @@ -2304,7 +2304,7 @@ public struct SageMaker: AWSService { return try await self.createModelPackageGroup(input, logger: logger) } - /// Creates a definition for a job that monitors model quality and drift. For information about model monitor, see Amazon SageMaker Model Monitor. + /// Creates a definition for a job that monitors model quality and drift. For information about model monitor, see Amazon SageMaker AI Model Monitor. @Sendable @inlinable public func createModelQualityJobDefinition(_ input: CreateModelQualityJobDefinitionRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateModelQualityJobDefinitionResponse { @@ -2317,7 +2317,7 @@ public struct SageMaker: AWSService { logger: logger ) } - /// Creates a definition for a job that monitors model quality and drift. For information about model monitor, see Amazon SageMaker Model Monitor. + /// Creates a definition for a job that monitors model quality and drift. For information about model monitor, see Amazon SageMaker AI Model Monitor. /// /// Parameters: /// - jobDefinitionName: The name of the monitoring job definition. @@ -2327,7 +2327,7 @@ public struct SageMaker: AWSService { /// - modelQualityJobInput: A list of the inputs that are monitored. Currently endpoints are supported. /// - modelQualityJobOutputConfig: /// - networkConfig: Specifies the network configuration for the monitoring job. - /// - roleArn: The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker can assume to perform tasks on your behalf. + /// - roleArn: The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker AI can assume to perform tasks on your behalf. /// - stoppingCondition: /// - tags: (Optional) An array of key-value pairs. For more information, see Using Cost Allocation Tags in the Amazon Web Services Billing and Cost Management User Guide. /// - logger: Logger use during operation @@ -2360,7 +2360,7 @@ public struct SageMaker: AWSService { return try await self.createModelQualityJobDefinition(input, logger: logger) } - /// Creates a schedule that regularly starts Amazon SageMaker Processing Jobs to monitor the data captured for an Amazon SageMaker Endpoint. + /// Creates a schedule that regularly starts Amazon SageMaker AI Processing Jobs to monitor the data captured for an Amazon SageMaker AI Endpoint. @Sendable @inlinable public func createMonitoringSchedule(_ input: CreateMonitoringScheduleRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateMonitoringScheduleResponse { @@ -2373,7 +2373,7 @@ public struct SageMaker: AWSService { logger: logger ) } - /// Creates a schedule that regularly starts Amazon SageMaker Processing Jobs to monitor the data captured for an Amazon SageMaker Endpoint. + /// Creates a schedule that regularly starts Amazon SageMaker AI Processing Jobs to monitor the data captured for an Amazon SageMaker AI Endpoint. /// /// Parameters: /// - monitoringScheduleConfig: The configuration object that specifies the monitoring schedule and defines the monitoring job. @@ -2395,7 +2395,7 @@ public struct SageMaker: AWSService { return try await self.createMonitoringSchedule(input, logger: logger) } - /// Creates an SageMaker notebook instance. A notebook instance is a machine learning (ML) compute instance running on a Jupyter notebook. In a CreateNotebookInstance request, specify the type of ML compute instance that you want to run. SageMaker launches the instance, installs common libraries that you can use to explore datasets for model training, and attaches an ML storage volume to the notebook instance. SageMaker also provides a set of example notebooks. Each notebook demonstrates how to use SageMaker with a specific algorithm or with a machine learning framework. After receiving the request, SageMaker does the following: Creates a network interface in the SageMaker VPC. (Option) If you specified SubnetId, SageMaker creates a network interface in your own VPC, which is inferred from the subnet ID that you provide in the input. When creating this network interface, SageMaker attaches the security group that you specified in the request to the network interface that it creates in your VPC. Launches an EC2 instance of the type specified in the request in the SageMaker VPC. If you specified SubnetId of your VPC, SageMaker specifies both network interfaces when launching this instance. This enables inbound traffic from your own VPC to the notebook instance, assuming that the security groups allow it. After creating the notebook instance, SageMaker returns its Amazon Resource Name (ARN). You can't change the name of a notebook instance after you create it. After SageMaker creates the notebook instance, you can connect to the Jupyter server and work in Jupyter notebooks. For example, you can write code to explore a dataset that you can use for model training, train a model, host models by creating SageMaker endpoints, and validate hosted models. For more information, see How It Works. + /// Creates an SageMaker AI notebook instance. A notebook instance is a machine learning (ML) compute instance running on a Jupyter notebook. In a CreateNotebookInstance request, specify the type of ML compute instance that you want to run. SageMaker AI launches the instance, installs common libraries that you can use to explore datasets for model training, and attaches an ML storage volume to the notebook instance. SageMaker AI also provides a set of example notebooks. Each notebook demonstrates how to use SageMaker AI with a specific algorithm or with a machine learning framework. After receiving the request, SageMaker AI does the following: Creates a network interface in the SageMaker AI VPC. (Option) If you specified SubnetId, SageMaker AI creates a network interface in your own VPC, which is inferred from the subnet ID that you provide in the input. When creating this network interface, SageMaker AI attaches the security group that you specified in the request to the network interface that it creates in your VPC. Launches an EC2 instance of the type specified in the request in the SageMaker AI VPC. If you specified SubnetId of your VPC, SageMaker AI specifies both network interfaces when launching this instance. This enables inbound traffic from your own VPC to the notebook instance, assuming that the security groups allow it. After creating the notebook instance, SageMaker AI returns its Amazon Resource Name (ARN). You can't change the name of a notebook instance after you create it. After SageMaker AI creates the notebook instance, you can connect to the Jupyter server and work in Jupyter notebooks. For example, you can write code to explore a dataset that you can use for model training, train a model, host models by creating SageMaker AI endpoints, and validate hosted models. For more information, see How It Works. @Sendable @inlinable public func createNotebookInstance(_ input: CreateNotebookInstanceInput, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateNotebookInstanceOutput { @@ -2408,20 +2408,20 @@ public struct SageMaker: AWSService { logger: logger ) } - /// Creates an SageMaker notebook instance. A notebook instance is a machine learning (ML) compute instance running on a Jupyter notebook. In a CreateNotebookInstance request, specify the type of ML compute instance that you want to run. SageMaker launches the instance, installs common libraries that you can use to explore datasets for model training, and attaches an ML storage volume to the notebook instance. SageMaker also provides a set of example notebooks. Each notebook demonstrates how to use SageMaker with a specific algorithm or with a machine learning framework. After receiving the request, SageMaker does the following: Creates a network interface in the SageMaker VPC. (Option) If you specified SubnetId, SageMaker creates a network interface in your own VPC, which is inferred from the subnet ID that you provide in the input. When creating this network interface, SageMaker attaches the security group that you specified in the request to the network interface that it creates in your VPC. Launches an EC2 instance of the type specified in the request in the SageMaker VPC. If you specified SubnetId of your VPC, SageMaker specifies both network interfaces when launching this instance. This enables inbound traffic from your own VPC to the notebook instance, assuming that the security groups allow it. After creating the notebook instance, SageMaker returns its Amazon Resource Name (ARN). You can't change the name of a notebook instance after you create it. After SageMaker creates the notebook instance, you can connect to the Jupyter server and work in Jupyter notebooks. For example, you can write code to explore a dataset that you can use for model training, train a model, host models by creating SageMaker endpoints, and validate hosted models. For more information, see How It Works. + /// Creates an SageMaker AI notebook instance. A notebook instance is a machine learning (ML) compute instance running on a Jupyter notebook. In a CreateNotebookInstance request, specify the type of ML compute instance that you want to run. SageMaker AI launches the instance, installs common libraries that you can use to explore datasets for model training, and attaches an ML storage volume to the notebook instance. SageMaker AI also provides a set of example notebooks. Each notebook demonstrates how to use SageMaker AI with a specific algorithm or with a machine learning framework. After receiving the request, SageMaker AI does the following: Creates a network interface in the SageMaker AI VPC. (Option) If you specified SubnetId, SageMaker AI creates a network interface in your own VPC, which is inferred from the subnet ID that you provide in the input. When creating this network interface, SageMaker AI attaches the security group that you specified in the request to the network interface that it creates in your VPC. Launches an EC2 instance of the type specified in the request in the SageMaker AI VPC. If you specified SubnetId of your VPC, SageMaker AI specifies both network interfaces when launching this instance. This enables inbound traffic from your own VPC to the notebook instance, assuming that the security groups allow it. After creating the notebook instance, SageMaker AI returns its Amazon Resource Name (ARN). You can't change the name of a notebook instance after you create it. After SageMaker AI creates the notebook instance, you can connect to the Jupyter server and work in Jupyter notebooks. For example, you can write code to explore a dataset that you can use for model training, train a model, host models by creating SageMaker AI endpoints, and validate hosted models. For more information, see How It Works. /// /// Parameters: /// - acceleratorTypes: This parameter is no longer supported. Elastic Inference (EI) is no longer available. This parameter was used to specify a list of EI instance types to associate with this notebook instance. - /// - additionalCodeRepositories: An array of up to three Git repositories to associate with the notebook instance. These can be either the names of Git repositories stored as resources in your account, or the URL of Git repositories in Amazon Web Services CodeCommit or in any other Git repository. These repositories are cloned at the same level as the default repository of your notebook instance. For more information, see Associating Git Repositories with SageMaker Notebook Instances. - /// - defaultCodeRepository: A Git repository to associate with the notebook instance as its default code repository. This can be either the name of a Git repository stored as a resource in your account, or the URL of a Git repository in Amazon Web Services CodeCommit or in any other Git repository. When you open a notebook instance, it opens in the directory that contains this repository. For more information, see Associating Git Repositories with SageMaker Notebook Instances. - /// - directInternetAccess: Sets whether SageMaker provides internet access to the notebook instance. If you set this to Disabled this notebook instance is able to access resources only in your VPC, and is not be able to connect to SageMaker training and endpoint services unless you configure a NAT Gateway in your VPC. For more information, see Notebook Instances Are Internet-Enabled by Default. You can set the value of this parameter to Disabled only if you set a value for the SubnetId parameter. + /// - additionalCodeRepositories: An array of up to three Git repositories to associate with the notebook instance. These can be either the names of Git repositories stored as resources in your account, or the URL of Git repositories in Amazon Web Services CodeCommit or in any other Git repository. These repositories are cloned at the same level as the default repository of your notebook instance. For more information, see Associating Git Repositories with SageMaker AI Notebook Instances. + /// - defaultCodeRepository: A Git repository to associate with the notebook instance as its default code repository. This can be either the name of a Git repository stored as a resource in your account, or the URL of a Git repository in Amazon Web Services CodeCommit or in any other Git repository. When you open a notebook instance, it opens in the directory that contains this repository. For more information, see Associating Git Repositories with SageMaker AI Notebook Instances. + /// - directInternetAccess: Sets whether SageMaker AI provides internet access to the notebook instance. If you set this to Disabled this notebook instance is able to access resources only in your VPC, and is not be able to connect to SageMaker AI training and endpoint services unless you configure a NAT Gateway in your VPC. For more information, see Notebook Instances Are Internet-Enabled by Default. You can set the value of this parameter to Disabled only if you set a value for the SubnetId parameter. /// - instanceMetadataServiceConfiguration: Information on the IMDS configuration of the notebook instance /// - instanceType: The type of ML compute instance to launch for the notebook instance. - /// - kmsKeyId: The Amazon Resource Name (ARN) of a Amazon Web Services Key Management Service key that SageMaker uses to encrypt data on the storage volume attached to your notebook instance. The KMS key you provide must be enabled. For information, see Enabling and Disabling Keys in the Amazon Web Services Key Management Service Developer Guide. + /// - kmsKeyId: The Amazon Resource Name (ARN) of a Amazon Web Services Key Management Service key that SageMaker AI uses to encrypt data on the storage volume attached to your notebook instance. The KMS key you provide must be enabled. For information, see Enabling and Disabling Keys in the Amazon Web Services Key Management Service Developer Guide. /// - lifecycleConfigName: The name of a lifecycle configuration to associate with the notebook instance. For information about lifestyle configurations, see Step 2.1: (Optional) Customize a Notebook Instance. /// - notebookInstanceName: The name of the new notebook instance. /// - platformIdentifier: The platform identifier of the notebook instance runtime environment. - /// - roleArn: When you send any requests to Amazon Web Services resources from the notebook instance, SageMaker assumes this role to perform tasks on your behalf. You must grant this role necessary permissions so SageMaker can perform these tasks. The policy must allow the SageMaker service principal (sagemaker.amazonaws.com) permissions to assume this role. For more information, see SageMaker Roles. To be able to pass this role to SageMaker, the caller of this API must have the iam:PassRole permission. + /// - roleArn: When you send any requests to Amazon Web Services resources from the notebook instance, SageMaker AI assumes this role to perform tasks on your behalf. You must grant this role necessary permissions so SageMaker AI can perform these tasks. The policy must allow the SageMaker AI service principal (sagemaker.amazonaws.com) permissions to assume this role. For more information, see SageMaker AI Roles. To be able to pass this role to SageMaker AI, the caller of this API must have the iam:PassRole permission. /// - rootAccess: Whether root access is enabled or disabled for users of the notebook instance. The default value is Enabled. Lifecycle configurations need root access to be able to set up a notebook instance. Because of this, lifecycle configurations associated with a notebook instance always run with root access even if you disable root access for users. /// - securityGroupIds: The VPC security group IDs, in the form sg-xxxxxxxx. The security groups must be for the same VPC as specified in the subnet. /// - subnetId: The ID of the subnet in a VPC to which you would like to have a connectivity from your ML compute instance. @@ -2526,7 +2526,7 @@ public struct SageMaker: AWSService { /// - optimizationEnvironment: The environment variables to set in the model container. /// - optimizationJobName: A custom name for the new optimization job. /// - outputConfig: Details for where to store the optimized model that you create with the optimization job. - /// - roleArn: The Amazon Resource Name (ARN) of an IAM role that enables Amazon SageMaker to perform tasks on your behalf. During model optimization, Amazon SageMaker needs your permission to: Read input data from an S3 bucket Write model artifacts to an S3 bucket Write logs to Amazon CloudWatch Logs Publish metrics to Amazon CloudWatch You grant permissions for all of these tasks to an IAM role. To pass this role to Amazon SageMaker, the caller of this API must have the iam:PassRole permission. For more information, see Amazon SageMaker Roles. + /// - roleArn: The Amazon Resource Name (ARN) of an IAM role that enables Amazon SageMaker AI to perform tasks on your behalf. During model optimization, Amazon SageMaker AI needs your permission to: Read input data from an S3 bucket Write model artifacts to an S3 bucket Write logs to Amazon CloudWatch Logs Publish metrics to Amazon CloudWatch You grant permissions for all of these tasks to an IAM role. To pass this role to Amazon SageMaker AI, the caller of this API must have the iam:PassRole permission. For more information, see Amazon SageMaker AI Roles. /// - stoppingCondition: /// - tags: A list of key-value pairs associated with the optimization job. For more information, see Tagging Amazon Web Services resources in the Amazon Web Services General Reference Guide. /// - vpcConfig: A VPC in Amazon VPC that your optimized model has access to. @@ -2704,7 +2704,7 @@ public struct SageMaker: AWSService { return try await self.createPipeline(input, logger: logger) } - /// Creates a URL for a specified UserProfile in a Domain. When accessed in a web browser, the user will be automatically signed in to the domain, and granted access to all of the Apps and files associated with the Domain's Amazon Elastic File System volume. This operation can only be called when the authentication mode equals IAM. The IAM role or user passed to this API defines the permissions to access the app. Once the presigned URL is created, no additional permission is required to access this URL. IAM authorization policies for this API are also enforced for every HTTP request and WebSocket frame that attempts to connect to the app. You can restrict access to this API and to the URL that it returns to a list of IP addresses, Amazon VPCs or Amazon VPC Endpoints that you specify. For more information, see Connect to Amazon SageMaker Studio Through an Interface VPC Endpoint . The URL that you get from a call to CreatePresignedDomainUrl has a default timeout of 5 minutes. You can configure this value using ExpiresInSeconds. If you try to use the URL after the timeout limit expires, you are directed to the Amazon Web Services console sign-in page. The JupyterLab session default expiration time is 12 hours. You can configure this value using SessionExpirationDurationInSeconds. + /// Creates a URL for a specified UserProfile in a Domain. When accessed in a web browser, the user will be automatically signed in to the domain, and granted access to all of the Apps and files associated with the Domain's Amazon Elastic File System volume. This operation can only be called when the authentication mode equals IAM. The IAM role or user passed to this API defines the permissions to access the app. Once the presigned URL is created, no additional permission is required to access this URL. IAM authorization policies for this API are also enforced for every HTTP request and WebSocket frame that attempts to connect to the app. You can restrict access to this API and to the URL that it returns to a list of IP addresses, Amazon VPCs or Amazon VPC Endpoints that you specify. For more information, see Connect to Amazon SageMaker AI Studio Through an Interface VPC Endpoint . The URL that you get from a call to CreatePresignedDomainUrl has a default timeout of 5 minutes. You can configure this value using ExpiresInSeconds. If you try to use the URL after the timeout limit expires, you are directed to the Amazon Web Services console sign-in page. The JupyterLab session default expiration time is 12 hours. You can configure this value using SessionExpirationDurationInSeconds. @Sendable @inlinable public func createPresignedDomainUrl(_ input: CreatePresignedDomainUrlRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreatePresignedDomainUrlResponse { @@ -2717,7 +2717,7 @@ public struct SageMaker: AWSService { logger: logger ) } - /// Creates a URL for a specified UserProfile in a Domain. When accessed in a web browser, the user will be automatically signed in to the domain, and granted access to all of the Apps and files associated with the Domain's Amazon Elastic File System volume. This operation can only be called when the authentication mode equals IAM. The IAM role or user passed to this API defines the permissions to access the app. Once the presigned URL is created, no additional permission is required to access this URL. IAM authorization policies for this API are also enforced for every HTTP request and WebSocket frame that attempts to connect to the app. You can restrict access to this API and to the URL that it returns to a list of IP addresses, Amazon VPCs or Amazon VPC Endpoints that you specify. For more information, see Connect to Amazon SageMaker Studio Through an Interface VPC Endpoint . The URL that you get from a call to CreatePresignedDomainUrl has a default timeout of 5 minutes. You can configure this value using ExpiresInSeconds. If you try to use the URL after the timeout limit expires, you are directed to the Amazon Web Services console sign-in page. The JupyterLab session default expiration time is 12 hours. You can configure this value using SessionExpirationDurationInSeconds. + /// Creates a URL for a specified UserProfile in a Domain. When accessed in a web browser, the user will be automatically signed in to the domain, and granted access to all of the Apps and files associated with the Domain's Amazon Elastic File System volume. This operation can only be called when the authentication mode equals IAM. The IAM role or user passed to this API defines the permissions to access the app. Once the presigned URL is created, no additional permission is required to access this URL. IAM authorization policies for this API are also enforced for every HTTP request and WebSocket frame that attempts to connect to the app. You can restrict access to this API and to the URL that it returns to a list of IP addresses, Amazon VPCs or Amazon VPC Endpoints that you specify. For more information, see Connect to Amazon SageMaker AI Studio Through an Interface VPC Endpoint . The URL that you get from a call to CreatePresignedDomainUrl has a default timeout of 5 minutes. You can configure this value using ExpiresInSeconds. If you try to use the URL after the timeout limit expires, you are directed to the Amazon Web Services console sign-in page. The JupyterLab session default expiration time is 12 hours. You can configure this value using SessionExpirationDurationInSeconds. /// /// Parameters: /// - domainId: The domain ID. @@ -2783,7 +2783,7 @@ public struct SageMaker: AWSService { return try await self.createPresignedMlflowTrackingServerUrl(input, logger: logger) } - /// Returns a URL that you can use to connect to the Jupyter server from a notebook instance. In the SageMaker console, when you choose Open next to a notebook instance, SageMaker opens a new tab showing the Jupyter server home page from the notebook instance. The console uses this API to get the URL and show the page. The IAM role or user used to call this API defines the permissions to access the notebook instance. Once the presigned URL is created, no additional permission is required to access this URL. IAM authorization policies for this API are also enforced for every HTTP request and WebSocket frame that attempts to connect to the notebook instance. You can restrict access to this API and to the URL that it returns to a list of IP addresses that you specify. Use the NotIpAddress condition operator and the aws:SourceIP condition context key to specify the list of IP addresses that you want to have access to the notebook instance. For more information, see Limit Access to a Notebook Instance by IP Address. The URL that you get from a call to CreatePresignedNotebookInstanceUrl is valid only for 5 minutes. If you try to use the URL after the 5-minute limit expires, you are directed to the Amazon Web Services console sign-in page. + /// Returns a URL that you can use to connect to the Jupyter server from a notebook instance. In the SageMaker AI console, when you choose Open next to a notebook instance, SageMaker AI opens a new tab showing the Jupyter server home page from the notebook instance. The console uses this API to get the URL and show the page. The IAM role or user used to call this API defines the permissions to access the notebook instance. Once the presigned URL is created, no additional permission is required to access this URL. IAM authorization policies for this API are also enforced for every HTTP request and WebSocket frame that attempts to connect to the notebook instance. You can restrict access to this API and to the URL that it returns to a list of IP addresses that you specify. Use the NotIpAddress condition operator and the aws:SourceIP condition context key to specify the list of IP addresses that you want to have access to the notebook instance. For more information, see Limit Access to a Notebook Instance by IP Address. The URL that you get from a call to CreatePresignedNotebookInstanceUrl is valid only for 5 minutes. If you try to use the URL after the 5-minute limit expires, you are directed to the Amazon Web Services console sign-in page. @Sendable @inlinable public func createPresignedNotebookInstanceUrl(_ input: CreatePresignedNotebookInstanceUrlInput, logger: Logger = AWSClient.loggingDisabled) async throws -> CreatePresignedNotebookInstanceUrlOutput { @@ -2796,7 +2796,7 @@ public struct SageMaker: AWSService { logger: logger ) } - /// Returns a URL that you can use to connect to the Jupyter server from a notebook instance. In the SageMaker console, when you choose Open next to a notebook instance, SageMaker opens a new tab showing the Jupyter server home page from the notebook instance. The console uses this API to get the URL and show the page. The IAM role or user used to call this API defines the permissions to access the notebook instance. Once the presigned URL is created, no additional permission is required to access this URL. IAM authorization policies for this API are also enforced for every HTTP request and WebSocket frame that attempts to connect to the notebook instance. You can restrict access to this API and to the URL that it returns to a list of IP addresses that you specify. Use the NotIpAddress condition operator and the aws:SourceIP condition context key to specify the list of IP addresses that you want to have access to the notebook instance. For more information, see Limit Access to a Notebook Instance by IP Address. The URL that you get from a call to CreatePresignedNotebookInstanceUrl is valid only for 5 minutes. If you try to use the URL after the 5-minute limit expires, you are directed to the Amazon Web Services console sign-in page. + /// Returns a URL that you can use to connect to the Jupyter server from a notebook instance. In the SageMaker AI console, when you choose Open next to a notebook instance, SageMaker AI opens a new tab showing the Jupyter server home page from the notebook instance. The console uses this API to get the URL and show the page. The IAM role or user used to call this API defines the permissions to access the notebook instance. Once the presigned URL is created, no additional permission is required to access this URL. IAM authorization policies for this API are also enforced for every HTTP request and WebSocket frame that attempts to connect to the notebook instance. You can restrict access to this API and to the URL that it returns to a list of IP addresses that you specify. Use the NotIpAddress condition operator and the aws:SourceIP condition context key to specify the list of IP addresses that you want to have access to the notebook instance. For more information, see Limit Access to a Notebook Instance by IP Address. The URL that you get from a call to CreatePresignedNotebookInstanceUrl is valid only for 5 minutes. If you try to use the URL after the 5-minute limit expires, you are directed to the Amazon Web Services console sign-in page. /// /// Parameters: /// - notebookInstanceName: The name of the notebook instance. @@ -2959,7 +2959,7 @@ public struct SageMaker: AWSService { return try await self.createSpace(input, logger: logger) } - /// Creates a new Amazon SageMaker Studio Lifecycle Configuration. + /// Creates a new Amazon SageMaker AI Studio Lifecycle Configuration. @Sendable @inlinable public func createStudioLifecycleConfig(_ input: CreateStudioLifecycleConfigRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateStudioLifecycleConfigResponse { @@ -2972,12 +2972,12 @@ public struct SageMaker: AWSService { logger: logger ) } - /// Creates a new Amazon SageMaker Studio Lifecycle Configuration. + /// Creates a new Amazon SageMaker AI Studio Lifecycle Configuration. /// /// Parameters: /// - studioLifecycleConfigAppType: The App type that the Lifecycle Configuration is attached to. - /// - studioLifecycleConfigContent: The content of your Amazon SageMaker Studio Lifecycle Configuration script. This content must be base64 encoded. - /// - studioLifecycleConfigName: The name of the Amazon SageMaker Studio Lifecycle Configuration to create. + /// - studioLifecycleConfigContent: The content of your Amazon SageMaker AI Studio Lifecycle Configuration script. This content must be base64 encoded. + /// - studioLifecycleConfigName: The name of the Amazon SageMaker AI Studio Lifecycle Configuration to create. /// - tags: Tags to be associated with the Lifecycle Configuration. Each tag consists of a key and an optional value. Tag keys must be unique per resource. Tags are searchable using the Search API. /// - logger: Logger use during operation @inlinable @@ -3714,7 +3714,7 @@ public struct SageMaker: AWSService { return try await self.deleteCodeRepository(input, logger: logger) } - /// Deletes the specified compilation job. This action deletes only the compilation job resource in Amazon SageMaker. It doesn't delete other resources that are related to that job, such as the model artifacts that the job creates, the compilation logs in CloudWatch, the compiled model, or the IAM role. You can delete a compilation job only if its current status is COMPLETED, FAILED, or STOPPED. If the job status is STARTING or INPROGRESS, stop the job, and then delete it after its status becomes STOPPED. + /// Deletes the specified compilation job. This action deletes only the compilation job resource in Amazon SageMaker AI. It doesn't delete other resources that are related to that job, such as the model artifacts that the job creates, the compilation logs in CloudWatch, the compiled model, or the IAM role. You can delete a compilation job only if its current status is COMPLETED, FAILED, or STOPPED. If the job status is STARTING or INPROGRESS, stop the job, and then delete it after its status becomes STOPPED. @Sendable @inlinable public func deleteCompilationJob(_ input: DeleteCompilationJobRequest, logger: Logger = AWSClient.loggingDisabled) async throws { @@ -3727,7 +3727,7 @@ public struct SageMaker: AWSService { logger: logger ) } - /// Deletes the specified compilation job. This action deletes only the compilation job resource in Amazon SageMaker. It doesn't delete other resources that are related to that job, such as the model artifacts that the job creates, the compilation logs in CloudWatch, the compiled model, or the IAM role. You can delete a compilation job only if its current status is COMPLETED, FAILED, or STOPPED. If the job status is STARTING or INPROGRESS, stop the job, and then delete it after its status becomes STOPPED. + /// Deletes the specified compilation job. This action deletes only the compilation job resource in Amazon SageMaker AI. It doesn't delete other resources that are related to that job, such as the model artifacts that the job creates, the compilation logs in CloudWatch, the compiled model, or the IAM role. You can delete a compilation job only if its current status is COMPLETED, FAILED, or STOPPED. If the job status is STARTING or INPROGRESS, stop the job, and then delete it after its status becomes STOPPED. /// /// Parameters: /// - compilationJobName: The name of the compilation job to delete. @@ -4257,7 +4257,7 @@ public struct SageMaker: AWSService { return try await self.deleteHyperParameterTuningJob(input, logger: logger) } - /// Deletes a SageMaker image and all versions of the image. The container images aren't deleted. + /// Deletes a SageMaker AI image and all versions of the image. The container images aren't deleted. @Sendable @inlinable public func deleteImage(_ input: DeleteImageRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DeleteImageResponse { @@ -4270,7 +4270,7 @@ public struct SageMaker: AWSService { logger: logger ) } - /// Deletes a SageMaker image and all versions of the image. The container images aren't deleted. + /// Deletes a SageMaker AI image and all versions of the image. The container images aren't deleted. /// /// Parameters: /// - imageName: The name of the image to delete. @@ -4286,7 +4286,7 @@ public struct SageMaker: AWSService { return try await self.deleteImage(input, logger: logger) } - /// Deletes a version of a SageMaker image. The container image the version represents isn't deleted. + /// Deletes a version of a SageMaker AI image. The container image the version represents isn't deleted. @Sendable @inlinable public func deleteImageVersion(_ input: DeleteImageVersionRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DeleteImageVersionResponse { @@ -4299,7 +4299,7 @@ public struct SageMaker: AWSService { logger: logger ) } - /// Deletes a version of a SageMaker image. The container image the version represents isn't deleted. + /// Deletes a version of a SageMaker AI image. The container image the version represents isn't deleted. /// /// Parameters: /// - alias: The alias of the image to delete. @@ -4437,7 +4437,7 @@ public struct SageMaker: AWSService { return try await self.deleteModel(input, logger: logger) } - /// Deletes an Amazon SageMaker model bias job definition. + /// Deletes an Amazon SageMaker AI model bias job definition. @Sendable @inlinable public func deleteModelBiasJobDefinition(_ input: DeleteModelBiasJobDefinitionRequest, logger: Logger = AWSClient.loggingDisabled) async throws { @@ -4450,7 +4450,7 @@ public struct SageMaker: AWSService { logger: logger ) } - /// Deletes an Amazon SageMaker model bias job definition. + /// Deletes an Amazon SageMaker AI model bias job definition. /// /// Parameters: /// - jobDefinitionName: The name of the model bias job definition to delete. @@ -4495,7 +4495,7 @@ public struct SageMaker: AWSService { return try await self.deleteModelCard(input, logger: logger) } - /// Deletes an Amazon SageMaker model explainability job definition. + /// Deletes an Amazon SageMaker AI model explainability job definition. @Sendable @inlinable public func deleteModelExplainabilityJobDefinition(_ input: DeleteModelExplainabilityJobDefinitionRequest, logger: Logger = AWSClient.loggingDisabled) async throws { @@ -4508,7 +4508,7 @@ public struct SageMaker: AWSService { logger: logger ) } - /// Deletes an Amazon SageMaker model explainability job definition. + /// Deletes an Amazon SageMaker AI model explainability job definition. /// /// Parameters: /// - jobDefinitionName: The name of the model explainability job definition to delete. @@ -4669,7 +4669,7 @@ public struct SageMaker: AWSService { return try await self.deleteMonitoringSchedule(input, logger: logger) } - /// Deletes an SageMaker notebook instance. Before you can delete a notebook instance, you must call the StopNotebookInstance API. When you delete a notebook instance, you lose all of your data. SageMaker removes the ML compute instance, and deletes the ML storage volume and the network interface associated with the notebook instance. + /// Deletes an SageMaker AI notebook instance. Before you can delete a notebook instance, you must call the StopNotebookInstance API. When you delete a notebook instance, you lose all of your data. SageMaker AI removes the ML compute instance, and deletes the ML storage volume and the network interface associated with the notebook instance. @Sendable @inlinable public func deleteNotebookInstance(_ input: DeleteNotebookInstanceInput, logger: Logger = AWSClient.loggingDisabled) async throws { @@ -4682,10 +4682,10 @@ public struct SageMaker: AWSService { logger: logger ) } - /// Deletes an SageMaker notebook instance. Before you can delete a notebook instance, you must call the StopNotebookInstance API. When you delete a notebook instance, you lose all of your data. SageMaker removes the ML compute instance, and deletes the ML storage volume and the network interface associated with the notebook instance. + /// Deletes an SageMaker AI notebook instance. Before you can delete a notebook instance, you must call the StopNotebookInstance API. When you delete a notebook instance, you lose all of your data. SageMaker AI removes the ML compute instance, and deletes the ML storage volume and the network interface associated with the notebook instance. /// /// Parameters: - /// - notebookInstanceName: The name of the SageMaker notebook instance to delete. + /// - notebookInstanceName: The name of the SageMaker AI notebook instance to delete. /// - logger: Logger use during operation @inlinable public func deleteNotebookInstance( @@ -4881,7 +4881,7 @@ public struct SageMaker: AWSService { return try await self.deleteSpace(input, logger: logger) } - /// Deletes the Amazon SageMaker Studio Lifecycle Configuration. In order to delete the Lifecycle Configuration, there must be no running apps using the Lifecycle Configuration. You must also remove the Lifecycle Configuration from UserSettings in all Domains and UserProfiles. + /// Deletes the Amazon SageMaker AI Studio Lifecycle Configuration. In order to delete the Lifecycle Configuration, there must be no running apps using the Lifecycle Configuration. You must also remove the Lifecycle Configuration from UserSettings in all Domains and UserProfiles. @Sendable @inlinable public func deleteStudioLifecycleConfig(_ input: DeleteStudioLifecycleConfigRequest, logger: Logger = AWSClient.loggingDisabled) async throws { @@ -4894,10 +4894,10 @@ public struct SageMaker: AWSService { logger: logger ) } - /// Deletes the Amazon SageMaker Studio Lifecycle Configuration. In order to delete the Lifecycle Configuration, there must be no running apps using the Lifecycle Configuration. You must also remove the Lifecycle Configuration from UserSettings in all Domains and UserProfiles. + /// Deletes the Amazon SageMaker AI Studio Lifecycle Configuration. In order to delete the Lifecycle Configuration, there must be no running apps using the Lifecycle Configuration. You must also remove the Lifecycle Configuration from UserSettings in all Domains and UserProfiles. /// /// Parameters: - /// - studioLifecycleConfigName: The name of the Amazon SageMaker Studio Lifecycle Configuration to delete. + /// - studioLifecycleConfigName: The name of the Amazon SageMaker AI Studio Lifecycle Configuration to delete. /// - logger: Logger use during operation @inlinable public func deleteStudioLifecycleConfig( @@ -6040,7 +6040,7 @@ public struct SageMaker: AWSService { return try await self.describeHyperParameterTuningJob(input, logger: logger) } - /// Describes a SageMaker image. + /// Describes a SageMaker AI image. @Sendable @inlinable public func describeImage(_ input: DescribeImageRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeImageResponse { @@ -6053,7 +6053,7 @@ public struct SageMaker: AWSService { logger: logger ) } - /// Describes a SageMaker image. + /// Describes a SageMaker AI image. /// /// Parameters: /// - imageName: The name of the image to describe. @@ -6069,7 +6069,7 @@ public struct SageMaker: AWSService { return try await self.describeImage(input, logger: logger) } - /// Describes a version of a SageMaker image. + /// Describes a version of a SageMaker AI image. @Sendable @inlinable public func describeImageVersion(_ input: DescribeImageVersionRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeImageVersionResponse { @@ -6082,7 +6082,7 @@ public struct SageMaker: AWSService { logger: logger ) } - /// Describes a version of a SageMaker image. + /// Describes a version of a SageMaker AI image. /// /// Parameters: /// - alias: The alias of the image version. @@ -6835,7 +6835,7 @@ public struct SageMaker: AWSService { return try await self.describeSpace(input, logger: logger) } - /// Describes the Amazon SageMaker Studio Lifecycle Configuration. + /// Describes the Amazon SageMaker AI Studio Lifecycle Configuration. @Sendable @inlinable public func describeStudioLifecycleConfig(_ input: DescribeStudioLifecycleConfigRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeStudioLifecycleConfigResponse { @@ -6848,10 +6848,10 @@ public struct SageMaker: AWSService { logger: logger ) } - /// Describes the Amazon SageMaker Studio Lifecycle Configuration. + /// Describes the Amazon SageMaker AI Studio Lifecycle Configuration. /// /// Parameters: - /// - studioLifecycleConfigName: The name of the Amazon SageMaker Studio Lifecycle Configuration to describe. + /// - studioLifecycleConfigName: The name of the Amazon SageMaker AI Studio Lifecycle Configuration to describe. /// - logger: Logger use during operation @inlinable public func describeStudioLifecycleConfig( @@ -10370,7 +10370,7 @@ public struct SageMaker: AWSService { return try await self.listNotebookInstanceLifecycleConfigs(input, logger: logger) } - /// Returns a list of the SageMaker notebook instances in the requester's account in an Amazon Web Services Region. + /// Returns a list of the SageMaker AI notebook instances in the requester's account in an Amazon Web Services Region. @Sendable @inlinable public func listNotebookInstances(_ input: ListNotebookInstancesInput, logger: Logger = AWSClient.loggingDisabled) async throws -> ListNotebookInstancesOutput { @@ -10383,7 +10383,7 @@ public struct SageMaker: AWSService { logger: logger ) } - /// Returns a list of the SageMaker notebook instances in the requester's account in an Amazon Web Services Region. + /// Returns a list of the SageMaker AI notebook instances in the requester's account in an Amazon Web Services Region. /// /// Parameters: /// - additionalCodeRepositoryEquals: A filter that returns only notebook instances with associated with the specified git repository. @@ -10928,7 +10928,7 @@ public struct SageMaker: AWSService { return try await self.listStageDevices(input, logger: logger) } - /// Lists the Amazon SageMaker Studio Lifecycle Configurations in your Amazon Web Services Account. + /// Lists the Amazon SageMaker AI Studio Lifecycle Configurations in your Amazon Web Services Account. @Sendable @inlinable public func listStudioLifecycleConfigs(_ input: ListStudioLifecycleConfigsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListStudioLifecycleConfigsResponse { @@ -10941,7 +10941,7 @@ public struct SageMaker: AWSService { logger: logger ) } - /// Lists the Amazon SageMaker Studio Lifecycle Configurations in your Amazon Web Services Account. + /// Lists the Amazon SageMaker AI Studio Lifecycle Configurations in your Amazon Web Services Account. /// /// Parameters: /// - appTypeEquals: A parameter to search for the App Type to which the Lifecycle Configuration is attached. @@ -11962,7 +11962,7 @@ public struct SageMaker: AWSService { return try await self.startMonitoringSchedule(input, logger: logger) } - /// Launches an ML compute instance with the latest version of the libraries and attaches your ML storage volume. After configuring the notebook instance, SageMaker sets the notebook instance status to InService. A notebook instance's status must be InService before you can connect to your Jupyter notebook. + /// Launches an ML compute instance with the latest version of the libraries and attaches your ML storage volume. After configuring the notebook instance, SageMaker AI sets the notebook instance status to InService. A notebook instance's status must be InService before you can connect to your Jupyter notebook. @Sendable @inlinable public func startNotebookInstance(_ input: StartNotebookInstanceInput, logger: Logger = AWSClient.loggingDisabled) async throws { @@ -11975,7 +11975,7 @@ public struct SageMaker: AWSService { logger: logger ) } - /// Launches an ML compute instance with the latest version of the libraries and attaches your ML storage volume. After configuring the notebook instance, SageMaker sets the notebook instance status to InService. A notebook instance's status must be InService before you can connect to your Jupyter notebook. + /// Launches an ML compute instance with the latest version of the libraries and attaches your ML storage volume. After configuring the notebook instance, SageMaker AI sets the notebook instance status to InService. A notebook instance's status must be InService before you can connect to your Jupyter notebook. /// /// Parameters: /// - notebookInstanceName: The name of the notebook instance to start. @@ -12067,7 +12067,7 @@ public struct SageMaker: AWSService { return try await self.stopAutoMLJob(input, logger: logger) } - /// Stops a model compilation job. To stop a job, Amazon SageMaker sends the algorithm the SIGTERM signal. This gracefully shuts the job down. If the job hasn't stopped, it sends the SIGKILL signal. When it receives a StopCompilationJob request, Amazon SageMaker changes the CompilationJobStatus of the job to Stopping. After Amazon SageMaker stops the job, it sets the CompilationJobStatus to Stopped. + /// Stops a model compilation job. To stop a job, Amazon SageMaker AI sends the algorithm the SIGTERM signal. This gracefully shuts the job down. If the job hasn't stopped, it sends the SIGKILL signal. When it receives a StopCompilationJob request, Amazon SageMaker AI changes the CompilationJobStatus of the job to Stopping. After Amazon SageMaker stops the job, it sets the CompilationJobStatus to Stopped. @Sendable @inlinable public func stopCompilationJob(_ input: StopCompilationJobRequest, logger: Logger = AWSClient.loggingDisabled) async throws { @@ -12080,7 +12080,7 @@ public struct SageMaker: AWSService { logger: logger ) } - /// Stops a model compilation job. To stop a job, Amazon SageMaker sends the algorithm the SIGTERM signal. This gracefully shuts the job down. If the job hasn't stopped, it sends the SIGKILL signal. When it receives a StopCompilationJob request, Amazon SageMaker changes the CompilationJobStatus of the job to Stopping. After Amazon SageMaker stops the job, it sets the CompilationJobStatus to Stopped. + /// Stops a model compilation job. To stop a job, Amazon SageMaker AI sends the algorithm the SIGTERM signal. This gracefully shuts the job down. If the job hasn't stopped, it sends the SIGKILL signal. When it receives a StopCompilationJob request, Amazon SageMaker AI changes the CompilationJobStatus of the job to Stopping. After Amazon SageMaker stops the job, it sets the CompilationJobStatus to Stopped. /// /// Parameters: /// - compilationJobName: The name of the model compilation job to stop. @@ -12343,7 +12343,7 @@ public struct SageMaker: AWSService { return try await self.stopMonitoringSchedule(input, logger: logger) } - /// Terminates the ML compute instance. Before terminating the instance, SageMaker disconnects the ML storage volume from it. SageMaker preserves the ML storage volume. SageMaker stops charging you for the ML compute instance when you call StopNotebookInstance. To access data on the ML storage volume for a notebook instance that has been terminated, call the StartNotebookInstance API. StartNotebookInstance launches another ML compute instance, configures it, and attaches the preserved ML storage volume so you can continue your work. + /// Terminates the ML compute instance. Before terminating the instance, SageMaker AI disconnects the ML storage volume from it. SageMaker AI preserves the ML storage volume. SageMaker AI stops charging you for the ML compute instance when you call StopNotebookInstance. To access data on the ML storage volume for a notebook instance that has been terminated, call the StartNotebookInstance API. StartNotebookInstance launches another ML compute instance, configures it, and attaches the preserved ML storage volume so you can continue your work. @Sendable @inlinable public func stopNotebookInstance(_ input: StopNotebookInstanceInput, logger: Logger = AWSClient.loggingDisabled) async throws { @@ -12356,7 +12356,7 @@ public struct SageMaker: AWSService { logger: logger ) } - /// Terminates the ML compute instance. Before terminating the instance, SageMaker disconnects the ML storage volume from it. SageMaker preserves the ML storage volume. SageMaker stops charging you for the ML compute instance when you call StopNotebookInstance. To access data on the ML storage volume for a notebook instance that has been terminated, call the StartNotebookInstance API. StartNotebookInstance launches another ML compute instance, configures it, and attaches the preserved ML storage volume so you can continue your work. + /// Terminates the ML compute instance. Before terminating the instance, SageMaker AI disconnects the ML storage volume from it. SageMaker AI preserves the ML storage volume. SageMaker AI stops charging you for the ML compute instance when you call StopNotebookInstance. To access data on the ML storage volume for a notebook instance that has been terminated, call the StartNotebookInstance API. StartNotebookInstance launches another ML compute instance, configures it, and attaches the preserved ML storage volume so you can continue your work. /// /// Parameters: /// - notebookInstanceName: The name of the notebook instance to terminate. @@ -12942,7 +12942,7 @@ public struct SageMaker: AWSService { /// Updates the default settings for new user profiles in the domain. /// /// Parameters: - /// - appNetworkAccessType: Specifies the VPC used for non-EFS traffic. PublicInternetOnly - Non-EFS traffic is through a VPC managed by Amazon SageMaker, which allows direct internet access. VpcOnly - All Studio traffic is through the specified VPC and subnets. This configuration can only be modified if there are no apps in the InService, Pending, or Deleting state. The configuration cannot be updated if DomainSettings.RStudioServerProDomainSettings.DomainExecutionRoleArn is already set or DomainSettings.RStudioServerProDomainSettings.DomainExecutionRoleArn is provided as part of the same request. + /// - appNetworkAccessType: Specifies the VPC used for non-EFS traffic. PublicInternetOnly - Non-EFS traffic is through a VPC managed by Amazon SageMaker AI, which allows direct internet access. VpcOnly - All Studio traffic is through the specified VPC and subnets. This configuration can only be modified if there are no apps in the InService, Pending, or Deleting state. The configuration cannot be updated if DomainSettings.RStudioServerProDomainSettings.DomainExecutionRoleArn is already set or DomainSettings.RStudioServerProDomainSettings.DomainExecutionRoleArn is provided as part of the same request. /// - appSecurityGroupManagement: The entity that creates and manages the required security groups for inter-app communication in VPCOnly mode. Required when CreateDomain.AppNetworkAccessType is VPCOnly and DomainSettings.RStudioServerProDomainSettings.DomainExecutionRoleArn is provided. If setting up the domain for use with RStudio, this value must be set to Service. /// - defaultSpaceSettings: The default settings for shared spaces that users create in the domain. /// - defaultUserSettings: A collection of settings. @@ -13204,7 +13204,7 @@ public struct SageMaker: AWSService { return try await self.updateHub(input, logger: logger) } - /// Updates the properties of a SageMaker image. To change the image's tags, use the AddTags and DeleteTags APIs. + /// Updates the properties of a SageMaker AI image. To change the image's tags, use the AddTags and DeleteTags APIs. @Sendable @inlinable public func updateImage(_ input: UpdateImageRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateImageResponse { @@ -13217,14 +13217,14 @@ public struct SageMaker: AWSService { logger: logger ) } - /// Updates the properties of a SageMaker image. To change the image's tags, use the AddTags and DeleteTags APIs. + /// Updates the properties of a SageMaker AI image. To change the image's tags, use the AddTags and DeleteTags APIs. /// /// Parameters: /// - deleteProperties: A list of properties to delete. Only the Description and DisplayName properties can be deleted. /// - description: The new description for the image. /// - displayName: The new display name for the image. /// - imageName: The name of the image to update. - /// - roleArn: The new ARN for the IAM role that enables Amazon SageMaker to perform tasks on your behalf. + /// - roleArn: The new ARN for the IAM role that enables Amazon SageMaker AI to perform tasks on your behalf. /// - logger: Logger use during operation @inlinable public func updateImage( @@ -13245,7 +13245,7 @@ public struct SageMaker: AWSService { return try await self.updateImage(input, logger: logger) } - /// Updates the properties of a SageMaker image version. + /// Updates the properties of a SageMaker AI image version. @Sendable @inlinable public func updateImageVersion(_ input: UpdateImageVersionRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateImageVersionResponse { @@ -13258,7 +13258,7 @@ public struct SageMaker: AWSService { logger: logger ) } - /// Updates the properties of a SageMaker image version. + /// Updates the properties of a SageMaker AI image version. /// /// Parameters: /// - alias: The alias of the image version. @@ -13266,7 +13266,7 @@ public struct SageMaker: AWSService { /// - aliasesToDelete: A list of aliases to delete. /// - horovod: Indicates Horovod compatibility. /// - imageName: The name of the image. - /// - jobType: Indicates SageMaker job type compatibility. TRAINING: The image version is compatible with SageMaker training jobs. INFERENCE: The image version is compatible with SageMaker inference jobs. NOTEBOOK_KERNEL: The image version is compatible with SageMaker notebook kernels. + /// - jobType: Indicates SageMaker AI job type compatibility. TRAINING: The image version is compatible with SageMaker AI training jobs. INFERENCE: The image version is compatible with SageMaker AI inference jobs. NOTEBOOK_KERNEL: The image version is compatible with SageMaker AI notebook kernels. /// - mlFramework: The machine learning framework vended in the image version. /// - processor: Indicates CPU or GPU compatibility. CPU: The image version is compatible with CPU. GPU: The image version is compatible with GPU. /// - programmingLang: The supported programming language and its version. @@ -13640,8 +13640,8 @@ public struct SageMaker: AWSService { /// /// Parameters: /// - acceleratorTypes: This parameter is no longer supported. Elastic Inference (EI) is no longer available. This parameter was used to specify a list of the EI instance types to associate with this notebook instance. - /// - additionalCodeRepositories: An array of up to three Git repositories to associate with the notebook instance. These can be either the names of Git repositories stored as resources in your account, or the URL of Git repositories in Amazon Web Services CodeCommit or in any other Git repository. These repositories are cloned at the same level as the default repository of your notebook instance. For more information, see Associating Git Repositories with SageMaker Notebook Instances. - /// - defaultCodeRepository: The Git repository to associate with the notebook instance as its default code repository. This can be either the name of a Git repository stored as a resource in your account, or the URL of a Git repository in Amazon Web Services CodeCommit or in any other Git repository. When you open a notebook instance, it opens in the directory that contains this repository. For more information, see Associating Git Repositories with SageMaker Notebook Instances. + /// - additionalCodeRepositories: An array of up to three Git repositories to associate with the notebook instance. These can be either the names of Git repositories stored as resources in your account, or the URL of Git repositories in Amazon Web Services CodeCommit or in any other Git repository. These repositories are cloned at the same level as the default repository of your notebook instance. For more information, see Associating Git Repositories with SageMaker AI Notebook Instances. + /// - defaultCodeRepository: The Git repository to associate with the notebook instance as its default code repository. This can be either the name of a Git repository stored as a resource in your account, or the URL of a Git repository in Amazon Web Services CodeCommit or in any other Git repository. When you open a notebook instance, it opens in the directory that contains this repository. For more information, see Associating Git Repositories with SageMaker AI Notebook Instances. /// - disassociateAcceleratorTypes: This parameter is no longer supported. Elastic Inference (EI) is no longer available. This parameter was used to specify a list of the EI instance types to remove from this notebook instance. /// - disassociateAdditionalCodeRepositories: A list of names or URLs of the default Git repositories to remove from this notebook instance. This operation is idempotent. If you specify a Git repository that is not associated with the notebook instance when you call this method, it does not throw an error. /// - disassociateDefaultCodeRepository: The name or URL of the default Git repository to remove from this notebook instance. This operation is idempotent. If you specify a Git repository that is not associated with the notebook instance when you call this method, it does not throw an error. @@ -13650,9 +13650,9 @@ public struct SageMaker: AWSService { /// - instanceType: The Amazon ML compute instance type. /// - lifecycleConfigName: The name of a lifecycle configuration to associate with the notebook instance. For information about lifestyle configurations, see Step 2.1: (Optional) Customize a Notebook Instance. /// - notebookInstanceName: The name of the notebook instance to update. - /// - roleArn: The Amazon Resource Name (ARN) of the IAM role that SageMaker can assume to access the notebook instance. For more information, see SageMaker Roles. To be able to pass this role to SageMaker, the caller of this API must have the iam:PassRole permission. + /// - roleArn: The Amazon Resource Name (ARN) of the IAM role that SageMaker AI can assume to access the notebook instance. For more information, see SageMaker AI Roles. To be able to pass this role to SageMaker AI, the caller of this API must have the iam:PassRole permission. /// - rootAccess: Whether root access is enabled or disabled for users of the notebook instance. The default value is Enabled. If you set this to Disabled, users don't have root access on the notebook instance, but lifecycle configuration scripts still run with root permissions. - /// - volumeSizeInGB: The size, in GB, of the ML storage volume to attach to the notebook instance. The default value is 5 GB. ML storage volumes are encrypted, so SageMaker can't determine the amount of available free space on the volume. Because of this, you can increase the volume size when you update a notebook instance, but you can't decrease the volume size. If you want to decrease the size of the ML storage volume in use, create a new notebook instance with the desired size. + /// - volumeSizeInGB: The size, in GB, of the ML storage volume to attach to the notebook instance. The default value is 5 GB. ML storage volumes are encrypted, so SageMaker AI can't determine the amount of available free space on the volume. Because of this, you can increase the volume size when you update a notebook instance, but you can't decrease the volume size. If you want to decrease the size of the ML storage volume in use, create a new notebook instance with the desired size. /// - logger: Logger use during operation @inlinable public func updateNotebookInstance( @@ -13896,7 +13896,7 @@ public struct SageMaker: AWSService { return try await self.updateProject(input, logger: logger) } - /// Updates the settings of a space. + /// Updates the settings of a space. You can't edit the app type of a space in the SpaceSettings. @Sendable @inlinable public func updateSpace(_ input: UpdateSpaceRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateSpaceResponse { @@ -13909,7 +13909,7 @@ public struct SageMaker: AWSService { logger: logger ) } - /// Updates the settings of a space. + /// Updates the settings of a space. You can't edit the app type of a space in the SpaceSettings. /// /// Parameters: /// - domainId: The ID of the associated domain. diff --git a/Sources/Soto/Services/SageMaker/SageMaker_shapes.swift b/Sources/Soto/Services/SageMaker/SageMaker_shapes.swift index 9071012b96..489be5da54 100644 --- a/Sources/Soto/Services/SageMaker/SageMaker_shapes.swift +++ b/Sources/Soto/Services/SageMaker/SageMaker_shapes.swift @@ -671,6 +671,15 @@ extension SageMaker { case mlC5N9Xlarge = "ml.c5n.9xlarge" case mlC5NLarge = "ml.c5n.large" case mlC5Xlarge = "ml.c5.xlarge" + case mlC6I12Xlarge = "ml.c6i.12xlarge" + case mlC6I16Xlarge = "ml.c6i.16xlarge" + case mlC6I24Xlarge = "ml.c6i.24xlarge" + case mlC6I2Xlarge = "ml.c6i.2xlarge" + case mlC6I32Xlarge = "ml.c6i.32xlarge" + case mlC6I4Xlarge = "ml.c6i.4xlarge" + case mlC6I8Xlarge = "ml.c6i.8xlarge" + case mlC6ILarge = "ml.c6i.large" + case mlC6IXlarge = "ml.c6i.xlarge" case mlG512Xlarge = "ml.g5.12xlarge" case mlG516Xlarge = "ml.g5.16xlarge" case mlG524Xlarge = "ml.g5.24xlarge" @@ -705,11 +714,29 @@ extension SageMaker { case mlM58Xlarge = "ml.m5.8xlarge" case mlM5Large = "ml.m5.large" case mlM5Xlarge = "ml.m5.xlarge" + case mlM6I12Xlarge = "ml.m6i.12xlarge" + case mlM6I16Xlarge = "ml.m6i.16xlarge" + case mlM6I24Xlarge = "ml.m6i.24xlarge" + case mlM6I2Xlarge = "ml.m6i.2xlarge" + case mlM6I32Xlarge = "ml.m6i.32xlarge" + case mlM6I4Xlarge = "ml.m6i.4xlarge" + case mlM6I8Xlarge = "ml.m6i.8xlarge" + case mlM6ILarge = "ml.m6i.large" + case mlM6IXlarge = "ml.m6i.xlarge" case mlP4D24Xlarge = "ml.p4d.24xlarge" case mlP4De24Xlarge = "ml.p4de.24xlarge" case mlP548Xlarge = "ml.p5.48xlarge" case mlP5E48Xlarge = "ml.p5e.48xlarge" case mlP5En48Xlarge = "ml.p5en.48xlarge" + case mlR6I12Xlarge = "ml.r6i.12xlarge" + case mlR6I16Xlarge = "ml.r6i.16xlarge" + case mlR6I24Xlarge = "ml.r6i.24xlarge" + case mlR6I2Xlarge = "ml.r6i.2xlarge" + case mlR6I32Xlarge = "ml.r6i.32xlarge" + case mlR6I4Xlarge = "ml.r6i.4xlarge" + case mlR6I8Xlarge = "ml.r6i.8xlarge" + case mlR6ILarge = "ml.r6i.large" + case mlR6IXlarge = "ml.r6i.xlarge" case mlT32Xlarge = "ml.t3.2xlarge" case mlT3Large = "ml.t3.large" case mlT3Medium = "ml.t3.medium" @@ -3196,6 +3223,8 @@ extension SageMaker { case mlR7I8Xlarge = "ml.r7i.8xlarge" case mlR7ILarge = "ml.r7i.large" case mlR7IXlarge = "ml.r7i.xlarge" + case mlTrn12Xlarge = "ml.trn1.2xlarge" + case mlTrn132Xlarge = "ml.trn1.32xlarge" public var description: String { return self.rawValue } } @@ -4232,7 +4261,7 @@ extension SageMaker { public let creationTime: Date? /// The configuration for the file system and the runtime, such as the environment variables and entry point. public let jupyterLabAppImageConfig: JupyterLabAppImageConfig? - /// The configuration for the file system and kernels in the SageMaker image. + /// The configuration for the file system and kernels in the SageMaker AI image. public let kernelGatewayImageConfig: KernelGatewayImageConfig? /// When the AppImageConfig was last modified. public let lastModifiedTime: Date? @@ -5147,7 +5176,7 @@ extension SageMaker { } public struct AutoMLS3DataSource: AWSEncodableShape & AWSDecodableShape { - /// The data type. If you choose S3Prefix, S3Uri identifies a key name prefix. SageMaker uses all objects that match the specified key name prefix for model training. The S3Prefix should have the following format: s3://DOC-EXAMPLE-BUCKET/DOC-EXAMPLE-FOLDER-OR-FILE If you choose ManifestFile, S3Uri identifies an object that is a manifest file containing a list of object keys that you want SageMaker to use for model training. A ManifestFile should have the format shown below: [ {"prefix": "s3://DOC-EXAMPLE-BUCKET/DOC-EXAMPLE-FOLDER/DOC-EXAMPLE-PREFIX/"}, "DOC-EXAMPLE-RELATIVE-PATH/DOC-EXAMPLE-FOLDER/DATA-1", "DOC-EXAMPLE-RELATIVE-PATH/DOC-EXAMPLE-FOLDER/DATA-2", ... "DOC-EXAMPLE-RELATIVE-PATH/DOC-EXAMPLE-FOLDER/DATA-N" ] If you choose AugmentedManifestFile, S3Uri identifies an object that is an augmented manifest file in JSON lines format. This file contains the data you want to use for model training. AugmentedManifestFile is available for V2 API jobs only (for example, for jobs created by calling CreateAutoMLJobV2). Here is a minimal, single-record example of an AugmentedManifestFile: {"source-ref": "s3://DOC-EXAMPLE-BUCKET/DOC-EXAMPLE-FOLDER/cats/cat.jpg", "label-metadata": {"class-name": "cat" } For more information on AugmentedManifestFile, see Provide Dataset Metadata to Training Jobs with an Augmented Manifest File. + /// The data type. If you choose S3Prefix, S3Uri identifies a key name prefix. SageMaker AI uses all objects that match the specified key name prefix for model training. The S3Prefix should have the following format: s3://DOC-EXAMPLE-BUCKET/DOC-EXAMPLE-FOLDER-OR-FILE If you choose ManifestFile, S3Uri identifies an object that is a manifest file containing a list of object keys that you want SageMaker AI to use for model training. A ManifestFile should have the format shown below: [ {"prefix": "s3://DOC-EXAMPLE-BUCKET/DOC-EXAMPLE-FOLDER/DOC-EXAMPLE-PREFIX/"}, "DOC-EXAMPLE-RELATIVE-PATH/DOC-EXAMPLE-FOLDER/DATA-1", "DOC-EXAMPLE-RELATIVE-PATH/DOC-EXAMPLE-FOLDER/DATA-2", ... "DOC-EXAMPLE-RELATIVE-PATH/DOC-EXAMPLE-FOLDER/DATA-N" ] If you choose AugmentedManifestFile, S3Uri identifies an object that is an augmented manifest file in JSON lines format. This file contains the data you want to use for model training. AugmentedManifestFile is available for V2 API jobs only (for example, for jobs created by calling CreateAutoMLJobV2). Here is a minimal, single-record example of an AugmentedManifestFile: {"source-ref": "s3://DOC-EXAMPLE-BUCKET/DOC-EXAMPLE-FOLDER/cats/cat.jpg", "label-metadata": {"class-name": "cat" } For more information on AugmentedManifestFile, see Provide Dataset Metadata to Training Jobs with an Augmented Manifest File. public let s3DataType: AutoMLS3DataType? /// The URL to the Amazon S3 data source. The Uri refers to the Amazon S3 prefix or ManifestFile depending on the data type. public let s3Uri: String? @@ -5778,9 +5807,9 @@ extension SageMaker { } public struct CaptureContentTypeHeader: AWSEncodableShape & AWSDecodableShape { - /// The list of all content type headers that Amazon SageMaker will treat as CSV and capture accordingly. + /// The list of all content type headers that Amazon SageMaker AI will treat as CSV and capture accordingly. public let csvContentTypes: [String]? - /// The list of all content type headers that SageMaker will treat as JSON and capture accordingly. + /// The list of all content type headers that SageMaker AI will treat as JSON and capture accordingly. public let jsonContentTypes: [String]? @inlinable @@ -6390,6 +6419,7 @@ extension SageMaker { try self.validate(self.executionRole, name: "executionRole", parent: name, max: 2048) try self.validate(self.executionRole, name: "executionRole", parent: name, min: 20) try self.validate(self.executionRole, name: "executionRole", parent: name, pattern: "^arn:aws[a-z\\-]*:iam::\\d{12}:role/?[a-zA-Z_0-9+=,.@\\-_/]+$") + try self.validate(self.instanceCount, name: "instanceCount", parent: name, max: 6758) try self.validate(self.instanceCount, name: "instanceCount", parent: name, min: 0) try self.validate(self.instanceGroupName, name: "instanceGroupName", parent: name, max: 63) try self.validate(self.instanceGroupName, name: "instanceGroupName", parent: name, min: 1) @@ -7555,7 +7585,7 @@ extension SageMaker { public let appType: AppType? /// The domain ID. public let domainId: String? - /// The instance type and the Amazon Resource Name (ARN) of the SageMaker image created on the instance. The value of InstanceType passed as part of the ResourceSpec in the CreateApp call overrides the value passed as part of the ResourceSpec configured for the user profile or the domain. If InstanceType is not specified in any of those three ResourceSpec values for a KernelGateway app, the CreateApp call fails with a request validation error. + /// The instance type and the Amazon Resource Name (ARN) of the SageMaker AI image created on the instance. The value of InstanceType passed as part of the ResourceSpec in the CreateApp call overrides the value passed as part of the ResourceSpec configured for the user profile or the domain. If InstanceType is not specified in any of those three ResourceSpec values for a KernelGateway app, the CreateApp call fails with a request validation error. public let resourceSpec: ResourceSpec? /// The name of the space. If this value is not set, then UserProfileName must be set. public let spaceName: String? @@ -8043,9 +8073,9 @@ extension SageMaker { public let modelPackageVersionArn: String? /// Provides information about the output location for the compiled model and the target device the model runs on. public let outputConfig: OutputConfig? - /// The Amazon Resource Name (ARN) of an IAM role that enables Amazon SageMaker to perform tasks on your behalf. During model compilation, Amazon SageMaker needs your permission to: Read input data from an S3 bucket Write model artifacts to an S3 bucket Write logs to Amazon CloudWatch Logs Publish metrics to Amazon CloudWatch You grant permissions for all of these tasks to an IAM role. To pass this role to Amazon SageMaker, the caller of this API must have the iam:PassRole permission. For more information, see Amazon SageMaker Roles. + /// The Amazon Resource Name (ARN) of an IAM role that enables Amazon SageMaker AI to perform tasks on your behalf. During model compilation, Amazon SageMaker AI needs your permission to: Read input data from an S3 bucket Write model artifacts to an S3 bucket Write logs to Amazon CloudWatch Logs Publish metrics to Amazon CloudWatch You grant permissions for all of these tasks to an IAM role. To pass this role to Amazon SageMaker AI, the caller of this API must have the iam:PassRole permission. For more information, see Amazon SageMaker AI Roles. public let roleArn: String? - /// Specifies a limit to how long a model compilation job can run. When the job reaches the time limit, Amazon SageMaker ends the compilation job. Use this API to cap model training costs. + /// Specifies a limit to how long a model compilation job can run. When the job reaches the time limit, Amazon SageMaker AI ends the compilation job. Use this API to cap model training costs. public let stoppingCondition: StoppingCondition? /// An array of key-value pairs. You can use tags to categorize your Amazon Web Services resources in different ways, for example, by purpose, owner, or environment. For more information, see Tagging Amazon Web Services Resources. public let tags: [Tag]? @@ -8097,7 +8127,7 @@ extension SageMaker { } public struct CreateCompilationJobResponse: AWSDecodableShape { - /// If the action is successful, the service sends back an HTTP 200 response. Amazon SageMaker returns the following data in JSON format: CompilationJobArn: The Amazon Resource Name (ARN) of the compiled job. + /// If the action is successful, the service sends back an HTTP 200 response. Amazon SageMaker AI returns the following data in JSON format: CompilationJobArn: The Amazon Resource Name (ARN) of the compiled job. public let compilationJobArn: String? @inlinable @@ -8264,7 +8294,7 @@ extension SageMaker { public let jobResources: MonitoringResources? /// Specifies networking configuration for the monitoring job. public let networkConfig: MonitoringNetworkConfig? - /// The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker can assume to perform tasks on your behalf. + /// The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker AI can assume to perform tasks on your behalf. public let roleArn: String? public let stoppingCondition: MonitoringStoppingCondition? /// (Optional) An array of key-value pairs. For more information, see Using Cost Allocation Tags in the Amazon Web Services Billing and Cost Management User Guide. @@ -8384,7 +8414,7 @@ extension SageMaker { } public struct CreateDomainRequest: AWSEncodableShape { - /// Specifies the VPC used for non-EFS traffic. The default value is PublicInternetOnly. PublicInternetOnly - Non-EFS traffic is through a VPC managed by Amazon SageMaker, which allows direct internet access VpcOnly - All traffic is through the specified VPC and subnets + /// Specifies the VPC used for non-EFS traffic. The default value is PublicInternetOnly. PublicInternetOnly - Non-EFS traffic is through a VPC managed by Amazon SageMaker AI, which allows direct internet access VpcOnly - All traffic is through the specified VPC and subnets public let appNetworkAccessType: AppNetworkAccessType? /// The entity that creates and manages the required security groups for inter-app communication in VPCOnly mode. Required when CreateDomain.AppNetworkAccessType is VPCOnly and DomainSettings.RStudioServerProDomainSettings.DomainExecutionRoleArn is provided. If setting up the domain for use with RStudio, this value must be set to Service. public let appSecurityGroupManagement: AppSecurityGroupManagement? @@ -8400,7 +8430,7 @@ extension SageMaker { public let domainSettings: DomainSettings? /// Use KmsKeyId. public let homeEfsFileSystemKmsKeyId: String? - /// SageMaker uses Amazon Web Services KMS to encrypt EFS and EBS volumes attached to the domain with an Amazon Web Services managed key by default. For more control, specify a customer managed key. + /// SageMaker AI uses Amazon Web Services KMS to encrypt EFS and EBS volumes attached to the domain with an Amazon Web Services managed key by default. For more control, specify a customer managed key. public let kmsKeyId: String? /// The VPC subnets that the domain uses for communication. public let subnetIds: [String]? @@ -8670,7 +8700,7 @@ extension SageMaker { public let enableNetworkIsolation: Bool? /// The name of the endpoint configuration. You specify this name in a CreateEndpoint request. public let endpointConfigName: String? - /// The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker can assume to perform actions on your behalf. For more information, see SageMaker Roles. To be able to pass this role to Amazon SageMaker, the caller of this action must have the iam:PassRole permission. + /// The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker AI can assume to perform actions on your behalf. For more information, see SageMaker AI Roles. To be able to pass this role to Amazon SageMaker AI, the caller of this action must have the iam:PassRole permission. public let executionRoleArn: String? /// A member of CreateEndpointConfig that enables explainers. public let explainerConfig: ExplainerConfig? @@ -9274,7 +9304,7 @@ extension SageMaker { public let displayName: String? /// The name of the image. Must be unique to your account. public let imageName: String? - /// The ARN of an IAM role that enables Amazon SageMaker to perform tasks on your behalf. + /// The ARN of an IAM role that enables Amazon SageMaker AI to perform tasks on your behalf. public let roleArn: String? /// A list of tags to apply to the image. public let tags: [Tag]? @@ -9341,7 +9371,7 @@ extension SageMaker { public let horovod: Bool? /// The ImageName of the Image to create a version of. public let imageName: String? - /// Indicates SageMaker job type compatibility. TRAINING: The image version is compatible with SageMaker training jobs. INFERENCE: The image version is compatible with SageMaker inference jobs. NOTEBOOK_KERNEL: The image version is compatible with SageMaker notebook kernels. + /// Indicates SageMaker AI job type compatibility. TRAINING: The image version is compatible with SageMaker AI training jobs. INFERENCE: The image version is compatible with SageMaker AI inference jobs. NOTEBOOK_KERNEL: The image version is compatible with SageMaker AI notebook kernels. public let jobType: JobType? /// The machine learning framework vended in the image version. public let mlFramework: String? @@ -9829,7 +9859,7 @@ extension SageMaker { public let modelBiasJobOutputConfig: MonitoringOutputConfig? /// Networking options for a model bias job. public let networkConfig: MonitoringNetworkConfig? - /// The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker can assume to perform tasks on your behalf. + /// The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker AI can assume to perform tasks on your behalf. public let roleArn: String? public let stoppingCondition: MonitoringStoppingCondition? /// (Optional) An array of key-value pairs. For more information, see Using Cost Allocation Tags in the Amazon Web Services Billing and Cost Management User Guide. @@ -10017,7 +10047,7 @@ extension SageMaker { public let modelExplainabilityJobOutputConfig: MonitoringOutputConfig? /// Networking options for a model explainability job. public let networkConfig: MonitoringNetworkConfig? - /// The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker can assume to perform tasks on your behalf. + /// The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker AI can assume to perform tasks on your behalf. public let roleArn: String? public let stoppingCondition: MonitoringStoppingCondition? /// (Optional) An array of key-value pairs. For more information, see Using Cost Allocation Tags in the Amazon Web Services Billing and Cost Management User Guide. @@ -10383,7 +10413,7 @@ extension SageMaker { public let modelQualityJobOutputConfig: MonitoringOutputConfig? /// Specifies the network configuration for the monitoring job. public let networkConfig: MonitoringNetworkConfig? - /// The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker can assume to perform tasks on your behalf. + /// The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker AI can assume to perform tasks on your behalf. public let roleArn: String? public let stoppingCondition: MonitoringStoppingCondition? /// (Optional) An array of key-value pairs. For more information, see Using Cost Allocation Tags in the Amazon Web Services Billing and Cost Management User Guide. @@ -10501,17 +10531,17 @@ extension SageMaker { public struct CreateNotebookInstanceInput: AWSEncodableShape { /// This parameter is no longer supported. Elastic Inference (EI) is no longer available. This parameter was used to specify a list of EI instance types to associate with this notebook instance. public let acceleratorTypes: [NotebookInstanceAcceleratorType]? - /// An array of up to three Git repositories to associate with the notebook instance. These can be either the names of Git repositories stored as resources in your account, or the URL of Git repositories in Amazon Web Services CodeCommit or in any other Git repository. These repositories are cloned at the same level as the default repository of your notebook instance. For more information, see Associating Git Repositories with SageMaker Notebook Instances. + /// An array of up to three Git repositories to associate with the notebook instance. These can be either the names of Git repositories stored as resources in your account, or the URL of Git repositories in Amazon Web Services CodeCommit or in any other Git repository. These repositories are cloned at the same level as the default repository of your notebook instance. For more information, see Associating Git Repositories with SageMaker AI Notebook Instances. public let additionalCodeRepositories: [String]? - /// A Git repository to associate with the notebook instance as its default code repository. This can be either the name of a Git repository stored as a resource in your account, or the URL of a Git repository in Amazon Web Services CodeCommit or in any other Git repository. When you open a notebook instance, it opens in the directory that contains this repository. For more information, see Associating Git Repositories with SageMaker Notebook Instances. + /// A Git repository to associate with the notebook instance as its default code repository. This can be either the name of a Git repository stored as a resource in your account, or the URL of a Git repository in Amazon Web Services CodeCommit or in any other Git repository. When you open a notebook instance, it opens in the directory that contains this repository. For more information, see Associating Git Repositories with SageMaker AI Notebook Instances. public let defaultCodeRepository: String? - /// Sets whether SageMaker provides internet access to the notebook instance. If you set this to Disabled this notebook instance is able to access resources only in your VPC, and is not be able to connect to SageMaker training and endpoint services unless you configure a NAT Gateway in your VPC. For more information, see Notebook Instances Are Internet-Enabled by Default. You can set the value of this parameter to Disabled only if you set a value for the SubnetId parameter. + /// Sets whether SageMaker AI provides internet access to the notebook instance. If you set this to Disabled this notebook instance is able to access resources only in your VPC, and is not be able to connect to SageMaker AI training and endpoint services unless you configure a NAT Gateway in your VPC. For more information, see Notebook Instances Are Internet-Enabled by Default. You can set the value of this parameter to Disabled only if you set a value for the SubnetId parameter. public let directInternetAccess: DirectInternetAccess? /// Information on the IMDS configuration of the notebook instance public let instanceMetadataServiceConfiguration: InstanceMetadataServiceConfiguration? /// The type of ML compute instance to launch for the notebook instance. public let instanceType: InstanceType? - /// The Amazon Resource Name (ARN) of a Amazon Web Services Key Management Service key that SageMaker uses to encrypt data on the storage volume attached to your notebook instance. The KMS key you provide must be enabled. For information, see Enabling and Disabling Keys in the Amazon Web Services Key Management Service Developer Guide. + /// The Amazon Resource Name (ARN) of a Amazon Web Services Key Management Service key that SageMaker AI uses to encrypt data on the storage volume attached to your notebook instance. The KMS key you provide must be enabled. For information, see Enabling and Disabling Keys in the Amazon Web Services Key Management Service Developer Guide. public let kmsKeyId: String? /// The name of a lifecycle configuration to associate with the notebook instance. For information about lifestyle configurations, see Step 2.1: (Optional) Customize a Notebook Instance. public let lifecycleConfigName: String? @@ -10519,7 +10549,7 @@ extension SageMaker { public let notebookInstanceName: String? /// The platform identifier of the notebook instance runtime environment. public let platformIdentifier: String? - /// When you send any requests to Amazon Web Services resources from the notebook instance, SageMaker assumes this role to perform tasks on your behalf. You must grant this role necessary permissions so SageMaker can perform these tasks. The policy must allow the SageMaker service principal (sagemaker.amazonaws.com) permissions to assume this role. For more information, see SageMaker Roles. To be able to pass this role to SageMaker, the caller of this API must have the iam:PassRole permission. + /// When you send any requests to Amazon Web Services resources from the notebook instance, SageMaker AI assumes this role to perform tasks on your behalf. You must grant this role necessary permissions so SageMaker AI can perform these tasks. The policy must allow the SageMaker AI service principal (sagemaker.amazonaws.com) permissions to assume this role. For more information, see SageMaker AI Roles. To be able to pass this role to SageMaker AI, the caller of this API must have the iam:PassRole permission. public let roleArn: String? /// Whether root access is enabled or disabled for users of the notebook instance. The default value is Enabled. Lifecycle configurations need root access to be able to set up a notebook instance. Because of this, lifecycle configurations associated with a notebook instance always run with root access even if you disable root access for users. public let rootAccess: RootAccess? @@ -10685,7 +10715,7 @@ extension SageMaker { public let optimizationJobName: String? /// Details for where to store the optimized model that you create with the optimization job. public let outputConfig: OptimizationJobOutputConfig? - /// The Amazon Resource Name (ARN) of an IAM role that enables Amazon SageMaker to perform tasks on your behalf. During model optimization, Amazon SageMaker needs your permission to: Read input data from an S3 bucket Write model artifacts to an S3 bucket Write logs to Amazon CloudWatch Logs Publish metrics to Amazon CloudWatch You grant permissions for all of these tasks to an IAM role. To pass this role to Amazon SageMaker, the caller of this API must have the iam:PassRole permission. For more information, see Amazon SageMaker Roles. + /// The Amazon Resource Name (ARN) of an IAM role that enables Amazon SageMaker AI to perform tasks on your behalf. During model optimization, Amazon SageMaker AI needs your permission to: Read input data from an S3 bucket Write model artifacts to an S3 bucket Write logs to Amazon CloudWatch Logs Publish metrics to Amazon CloudWatch You grant permissions for all of these tasks to an IAM role. To pass this role to Amazon SageMaker AI, the caller of this API must have the iam:PassRole permission. For more information, see Amazon SageMaker AI Roles. public let roleArn: String? public let stoppingCondition: StoppingCondition? /// A list of key-value pairs associated with the optimization job. For more information, see Tagging Amazon Web Services resources in the Amazon Web Services General Reference Guide. @@ -11348,9 +11378,9 @@ extension SageMaker { public struct CreateStudioLifecycleConfigRequest: AWSEncodableShape { /// The App type that the Lifecycle Configuration is attached to. public let studioLifecycleConfigAppType: StudioLifecycleConfigAppType? - /// The content of your Amazon SageMaker Studio Lifecycle Configuration script. This content must be base64 encoded. + /// The content of your Amazon SageMaker AI Studio Lifecycle Configuration script. This content must be base64 encoded. public let studioLifecycleConfigContent: String? - /// The name of the Amazon SageMaker Studio Lifecycle Configuration to create. + /// The name of the Amazon SageMaker AI Studio Lifecycle Configuration to create. public let studioLifecycleConfigName: String? /// Tags to be associated with the Lifecycle Configuration. Each tag consists of a key and an optional value. Tag keys must be unique per resource. Tags are searchable using the Search API. public let tags: [Tag]? @@ -12156,7 +12186,7 @@ extension SageMaker { } public struct DataCaptureConfig: AWSEncodableShape & AWSDecodableShape { - /// Configuration specifying how to treat different headers. If no headers are specified SageMaker will by default base64 encode when capturing the data. + /// Configuration specifying how to treat different headers. If no headers are specified SageMaker AI will by default base64 encode when capturing the data. public let captureContentTypeHeader: CaptureContentTypeHeader? /// Specifies data Model Monitor will capture. You can configure whether to collect only input, only output, or both public let captureOptions: [CaptureOption]? @@ -12164,9 +12194,9 @@ extension SageMaker { public let destinationS3Uri: String? /// Whether data capture should be enabled or disabled (defaults to enabled). public let enableCapture: Bool? - /// The percentage of requests SageMaker will capture. A lower value is recommended for Endpoints with high traffic. + /// The percentage of requests SageMaker AI will capture. A lower value is recommended for Endpoints with high traffic. public let initialSamplingPercentage: Int? - /// The Amazon Resource Name (ARN) of an Key Management Service key that SageMaker uses to encrypt the captured data at rest using Amazon S3 server-side encryption. The KmsKeyId can be any of the following formats: Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab Key ARN: arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab Alias name: alias/ExampleAlias Alias name ARN: arn:aws:kms:us-west-2:111122223333:alias/ExampleAlias + /// The Amazon Resource Name (ARN) of an Key Management Service key that SageMaker AI uses to encrypt the captured data at rest using Amazon S3 server-side encryption. The KmsKeyId can be any of the following formats: Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab Key ARN: arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab Alias name: alias/ExampleAlias Alias name ARN: arn:aws:kms:us-west-2:111122223333:alias/ExampleAlias public let kmsKeyId: String? @inlinable @@ -12622,7 +12652,7 @@ extension SageMaker { } public struct DefaultSpaceSettings: AWSEncodableShape & AWSDecodableShape { - /// The settings for assigning a custom file system to a domain. Permitted users can access this file system in Amazon SageMaker Studio. + /// The settings for assigning a custom file system to a domain. Permitted users can access this file system in Amazon SageMaker AI Studio. public let customFileSystemConfigs: [CustomFileSystemConfig]? public let customPosixUserConfig: CustomPosixUserConfig? /// The ARN of the execution role for the space. @@ -13713,7 +13743,7 @@ extension SageMaker { } public struct DeleteNotebookInstanceInput: AWSEncodableShape { - /// The name of the SageMaker notebook instance to delete. + /// The name of the SageMaker AI notebook instance to delete. public let notebookInstanceName: String? @inlinable @@ -13897,7 +13927,7 @@ extension SageMaker { } public struct DeleteStudioLifecycleConfigRequest: AWSEncodableShape { - /// The name of the Amazon SageMaker Studio Lifecycle Configuration to delete. + /// The name of the Amazon SageMaker AI Studio Lifecycle Configuration to delete. public let studioLifecycleConfigName: String? @inlinable @@ -14526,7 +14556,7 @@ extension SageMaker { public let appType: AppType? /// The lifecycle configuration that runs before the default lifecycle configuration public let builtInLifecycleConfigArn: String? - /// The creation time of the application. After an application has been shut down for 24 hours, SageMaker deletes all metadata for the application. To be considered an update and retain application metadata, applications must be restarted within 24 hours after the previous application has been shut down. After this time window, creation of an application is considered a new application rather than an update of the previous application. + /// The creation time of the application. After an application has been shut down for 24 hours, SageMaker AI deletes all metadata for the application. To be considered an update and retain application metadata, applications must be restarted within 24 hours after the previous application has been shut down. After this time window, creation of an application is considered a new application rather than an update of the previous application. public let creationTime: Date? /// The domain ID. public let domainId: String? @@ -14534,9 +14564,9 @@ extension SageMaker { public let failureReason: String? /// The timestamp of the last health check. public let lastHealthCheckTimestamp: Date? - /// The timestamp of the last user's activity. LastUserActivityTimestamp is also updated when SageMaker performs health checks without user activity. As a result, this value is set to the same value as LastHealthCheckTimestamp. + /// The timestamp of the last user's activity. LastUserActivityTimestamp is also updated when SageMaker AI performs health checks without user activity. As a result, this value is set to the same value as LastHealthCheckTimestamp. public let lastUserActivityTimestamp: Date? - /// The instance type and the Amazon Resource Name (ARN) of the SageMaker image created on the instance. + /// The instance type and the Amazon Resource Name (ARN) of the SageMaker AI image created on the instance. public let resourceSpec: ResourceSpec? /// The name of the space. If this value is not set, then UserProfileName must be set. public let spaceName: String? @@ -14684,7 +14714,7 @@ extension SageMaker { public let autoMLJobSecondaryStatus: AutoMLJobSecondaryStatus? /// Returns the status of the AutoML job. public let autoMLJobStatus: AutoMLJobStatus? - /// The best model candidate selected by SageMaker Autopilot using both the best objective metric and lowest InferenceLatency for an experiment. + /// The best model candidate selected by SageMaker AI Autopilot using both the best objective metric and lowest InferenceLatency for an experiment. public let bestCandidate: AutoMLCandidate? /// Returns the creation time of the AutoML job. public let creationTime: Date? @@ -15142,7 +15172,7 @@ extension SageMaker { } public struct DescribeCompilationJobResponse: AWSDecodableShape { - /// The time when the model compilation job on a compilation job instance ended. For a successful or stopped job, this is when the job's model artifacts have finished uploading. For a failed job, this is when Amazon SageMaker detected that the job failed. + /// The time when the model compilation job on a compilation job instance ended. For a successful or stopped job, this is when the job's model artifacts have finished uploading. For a failed job, this is when Amazon SageMaker AI detected that the job failed. public let compilationEndTime: Date? /// The Amazon Resource Name (ARN) of the model compilation job. public let compilationJobArn: String? @@ -15172,9 +15202,9 @@ extension SageMaker { public let modelPackageVersionArn: String? /// Information about the output location for the compiled model and the target device that the model runs on. public let outputConfig: OutputConfig? - /// The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker assumes to perform the model compilation job. + /// The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker AI assumes to perform the model compilation job. public let roleArn: String? - /// Specifies a limit to how long a model compilation job can run. When the job reaches the time limit, Amazon SageMaker ends the compilation job. Use this API to cap model training costs. + /// Specifies a limit to how long a model compilation job can run. When the job reaches the time limit, Amazon SageMaker AI ends the compilation job. Use this API to cap model training costs. public let stoppingCondition: StoppingCondition? /// A VpcConfig object that specifies the VPC that you want your compilation job to connect to. Control access to your models by configuring the VPC. For more information, see Protect Compilation Jobs by Using an Amazon Virtual Private Cloud. public let vpcConfig: NeoVpcConfig? @@ -15422,7 +15452,7 @@ extension SageMaker { public let jobResources: MonitoringResources? /// The networking configuration for the data quality monitoring job. public let networkConfig: MonitoringNetworkConfig? - /// The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker can assume to perform tasks on your behalf. + /// The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker AI can assume to perform tasks on your behalf. public let roleArn: String? public let stoppingCondition: MonitoringStoppingCondition? @@ -15625,7 +15655,7 @@ extension SageMaker { } public struct DescribeDomainResponse: AWSDecodableShape { - /// Specifies the VPC used for non-EFS traffic. The default value is PublicInternetOnly. PublicInternetOnly - Non-EFS traffic is through a VPC managed by Amazon SageMaker, which allows direct internet access VpcOnly - All traffic is through the specified VPC and subnets + /// Specifies the VPC used for non-EFS traffic. The default value is PublicInternetOnly. PublicInternetOnly - Non-EFS traffic is through a VPC managed by Amazon SageMaker AI, which allows direct internet access VpcOnly - All traffic is through the specified VPC and subnets public let appNetworkAccessType: AppNetworkAccessType? /// The entity that creates and manages the required security groups for inter-app communication in VPCOnly mode. Required when CreateDomain.AppNetworkAccessType is VPCOnly and DomainSettings.RStudioServerProDomainSettings.DomainExecutionRoleArn is provided. public let appSecurityGroupManagement: AppSecurityGroupManagement? @@ -15657,7 +15687,7 @@ extension SageMaker { public let lastModifiedTime: Date? /// The ID of the security group that authorizes traffic between the RSessionGateway apps and the RStudioServerPro app. public let securityGroupIdForDomainBoundary: String? - /// The ARN of the application managed by SageMaker in IAM Identity Center. This value is only returned for domains created after October 1, 2023. + /// The ARN of the application managed by SageMaker AI in IAM Identity Center. This value is only returned for domains created after October 1, 2023. public let singleSignOnApplicationArn: String? /// The IAM Identity Center managed application instance ID. public let singleSignOnManagedApplicationInstanceId: String? @@ -16776,7 +16806,7 @@ extension SageMaker { public let imageStatus: ImageStatus? /// When the image was last modified. public let lastModifiedTime: Date? - /// The ARN of the IAM role that enables Amazon SageMaker to perform tasks on your behalf. + /// The ARN of the IAM role that enables Amazon SageMaker AI to perform tasks on your behalf. public let roleArn: String? @inlinable @@ -16854,7 +16884,7 @@ extension SageMaker { public let imageVersionArn: String? /// The status of the version. public let imageVersionStatus: ImageVersionStatus? - /// Indicates SageMaker job type compatibility. TRAINING: The image version is compatible with SageMaker training jobs. INFERENCE: The image version is compatible with SageMaker inference jobs. NOTEBOOK_KERNEL: The image version is compatible with SageMaker notebook kernels. + /// Indicates SageMaker AI job type compatibility. TRAINING: The image version is compatible with SageMaker AI training jobs. INFERENCE: The image version is compatible with SageMaker AI inference jobs. NOTEBOOK_KERNEL: The image version is compatible with SageMaker AI notebook kernels. public let jobType: JobType? /// When the version was last modified. public let lastModifiedTime: Date? @@ -18007,7 +18037,7 @@ extension SageMaker { public let modelQualityJobOutputConfig: MonitoringOutputConfig? /// Networking options for a model quality job. public let networkConfig: MonitoringNetworkConfig? - /// The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker can assume to perform tasks on your behalf. + /// The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker AI can assume to perform tasks on your behalf. public let roleArn: String? public let stoppingCondition: MonitoringStoppingCondition? @@ -18186,13 +18216,13 @@ extension SageMaker { public struct DescribeNotebookInstanceOutput: AWSDecodableShape { /// This parameter is no longer supported. Elastic Inference (EI) is no longer available. This parameter was used to specify a list of the EI instance types associated with this notebook instance. public let acceleratorTypes: [NotebookInstanceAcceleratorType]? - /// An array of up to three Git repositories associated with the notebook instance. These can be either the names of Git repositories stored as resources in your account, or the URL of Git repositories in Amazon Web Services CodeCommit or in any other Git repository. These repositories are cloned at the same level as the default repository of your notebook instance. For more information, see Associating Git Repositories with SageMaker Notebook Instances. + /// An array of up to three Git repositories associated with the notebook instance. These can be either the names of Git repositories stored as resources in your account, or the URL of Git repositories in Amazon Web Services CodeCommit or in any other Git repository. These repositories are cloned at the same level as the default repository of your notebook instance. For more information, see Associating Git Repositories with SageMaker AI Notebook Instances. public let additionalCodeRepositories: [String]? /// A timestamp. Use this parameter to return the time when the notebook instance was created public let creationTime: Date? - /// The Git repository associated with the notebook instance as its default code repository. This can be either the name of a Git repository stored as a resource in your account, or the URL of a Git repository in Amazon Web Services CodeCommit or in any other Git repository. When you open a notebook instance, it opens in the directory that contains this repository. For more information, see Associating Git Repositories with SageMaker Notebook Instances. + /// The Git repository associated with the notebook instance as its default code repository. This can be either the name of a Git repository stored as a resource in your account, or the URL of a Git repository in Amazon Web Services CodeCommit or in any other Git repository. When you open a notebook instance, it opens in the directory that contains this repository. For more information, see Associating Git Repositories with SageMaker AI Notebook Instances. public let defaultCodeRepository: String? - /// Describes whether SageMaker provides internet access to the notebook instance. If this value is set to Disabled, the notebook instance does not have internet access, and cannot connect to SageMaker training and endpoint services. For more information, see Notebook Instances Are Internet-Enabled by Default. + /// Describes whether SageMaker AI provides internet access to the notebook instance. If this value is set to Disabled, the notebook instance does not have internet access, and cannot connect to SageMaker AI training and endpoint services. For more information, see Notebook Instances Are Internet-Enabled by Default. public let directInternetAccess: DirectInternetAccess? /// If status is Failed, the reason it failed. public let failureReason: String? @@ -18200,17 +18230,17 @@ extension SageMaker { public let instanceMetadataServiceConfiguration: InstanceMetadataServiceConfiguration? /// The type of ML compute instance running on the notebook instance. public let instanceType: InstanceType? - /// The Amazon Web Services KMS key ID SageMaker uses to encrypt data when storing it on the ML storage volume attached to the instance. + /// The Amazon Web Services KMS key ID SageMaker AI uses to encrypt data when storing it on the ML storage volume attached to the instance. public let kmsKeyId: String? /// A timestamp. Use this parameter to retrieve the time when the notebook instance was last modified. public let lastModifiedTime: Date? - /// The network interface IDs that SageMaker created at the time of creating the instance. + /// The network interface IDs that SageMaker AI created at the time of creating the instance. public let networkInterfaceId: String? /// The Amazon Resource Name (ARN) of the notebook instance. public let notebookInstanceArn: String? /// Returns the name of a notebook instance lifecycle configuration. For information about notebook instance lifestyle configurations, see Step 2.1: (Optional) Customize a Notebook Instance public let notebookInstanceLifecycleConfigName: String? - /// The name of the SageMaker notebook instance. + /// The name of the SageMaker AI notebook instance. public let notebookInstanceName: String? /// The status of the notebook instance. public let notebookInstanceStatus: NotebookInstanceStatus? @@ -18933,7 +18963,7 @@ extension SageMaker { } public struct DescribeStudioLifecycleConfigRequest: AWSEncodableShape { - /// The name of the Amazon SageMaker Studio Lifecycle Configuration to describe. + /// The name of the Amazon SageMaker AI Studio Lifecycle Configuration to describe. public let studioLifecycleConfigName: String? @inlinable @@ -18952,17 +18982,17 @@ extension SageMaker { } public struct DescribeStudioLifecycleConfigResponse: AWSDecodableShape { - /// The creation time of the Amazon SageMaker Studio Lifecycle Configuration. + /// The creation time of the Amazon SageMaker AI Studio Lifecycle Configuration. public let creationTime: Date? - /// This value is equivalent to CreationTime because Amazon SageMaker Studio Lifecycle Configurations are immutable. + /// This value is equivalent to CreationTime because Amazon SageMaker AI Studio Lifecycle Configurations are immutable. public let lastModifiedTime: Date? /// The App type that the Lifecycle Configuration is attached to. public let studioLifecycleConfigAppType: StudioLifecycleConfigAppType? /// The ARN of the Lifecycle Configuration to describe. public let studioLifecycleConfigArn: String? - /// The content of your Amazon SageMaker Studio Lifecycle Configuration script. + /// The content of your Amazon SageMaker AI Studio Lifecycle Configuration script. public let studioLifecycleConfigContent: String? - /// The name of the Amazon SageMaker Studio Lifecycle Configuration that is described. + /// The name of the Amazon SageMaker AI Studio Lifecycle Configuration that is described. public let studioLifecycleConfigName: String? @inlinable @@ -20120,7 +20150,7 @@ extension SageMaker { public let amazonQSettings: AmazonQSettings? /// A collection of settings that configure the domain's Docker interaction. public let dockerSettings: DockerSettings? - /// The configuration for attaching a SageMaker user profile name to the execution role as a sts:SourceIdentity key. + /// The configuration for attaching a SageMaker AI user profile name to the execution role as a sts:SourceIdentity key. public let executionRoleIdentityConfig: ExecutionRoleIdentityConfig? /// A collection of settings that configure the RStudioServerPro Domain-level app. public let rStudioServerProDomainSettings: RStudioServerProDomainSettings? @@ -20161,7 +20191,7 @@ extension SageMaker { public let amazonQSettings: AmazonQSettings? /// A collection of settings that configure the domain's Docker interaction. public let dockerSettings: DockerSettings? - /// The configuration for attaching a SageMaker user profile name to the execution role as a sts:SourceIdentity key. This configuration can only be modified if there are no apps in the InService or Pending state. + /// The configuration for attaching a SageMaker AI user profile name to the execution role as a sts:SourceIdentity key. This configuration can only be modified if there are no apps in the InService or Pending state. public let executionRoleIdentityConfig: ExecutionRoleIdentityConfig? /// A collection of RStudioServerPro Domain-level app settings to update. A single RStudioServerPro application is created for a domain. public let rStudioServerProDomainSettingsForUpdate: RStudioServerProDomainSettingsForUpdate? @@ -20380,7 +20410,7 @@ extension SageMaker { public struct EFSFileSystemConfig: AWSEncodableShape & AWSDecodableShape { /// The ID of your Amazon EFS file system. public let fileSystemId: String? - /// The path to the file system directory that is accessible in Amazon SageMaker Studio. Permitted users can access only this directory and below. + /// The path to the file system directory that is accessible in Amazon SageMaker AI Studio. Permitted users can access only this directory and below. public let fileSystemPath: String? @inlinable @@ -23758,7 +23788,7 @@ extension SageMaker { public let computeResourceRequirements: InferenceComponentComputeResourceRequirements? /// Defines a container that provides the runtime environment for a model that you deploy with an inference component. public let container: InferenceComponentContainerSpecification? - /// The name of an existing SageMaker model object in your account that you want to deploy with the inference component. + /// The name of an existing SageMaker AI model object in your account that you want to deploy with the inference component. public let modelName: String? /// Settings that take effect while the model container starts up. public let startupParameters: InferenceComponentStartupParameters? @@ -23798,7 +23828,7 @@ extension SageMaker { public let computeResourceRequirements: InferenceComponentComputeResourceRequirements? /// Details about the container that provides the runtime environment for the model that is deployed with the inference component. public let container: InferenceComponentContainerSpecificationSummary? - /// The name of the SageMaker model object that is deployed with the inference component. + /// The name of the SageMaker AI model object that is deployed with the inference component. public let modelName: String? /// Settings that take effect while the model container starts up. public let startupParameters: InferenceComponentStartupParameters? @@ -24443,9 +24473,9 @@ extension SageMaker { } public struct JupyterServerAppSettings: AWSEncodableShape & AWSDecodableShape { - /// A list of Git repositories that SageMaker automatically displays to users for cloning in the JupyterServer application. + /// A list of Git repositories that SageMaker AI automatically displays to users for cloning in the JupyterServer application. public let codeRepositories: [CodeRepository]? - /// The default instance type and the Amazon Resource Name (ARN) of the default SageMaker image used by the JupyterServer app. If you use the LifecycleConfigArns parameter, then this parameter is also required. + /// The default instance type and the Amazon Resource Name (ARN) of the default SageMaker AI image used by the JupyterServer app. If you use the LifecycleConfigArns parameter, then this parameter is also required. public let defaultResourceSpec: ResourceSpec? /// The Amazon Resource Name (ARN) of the Lifecycle Configurations attached to the JupyterServerApp. If you use this parameter, the DefaultResourceSpec parameter is also required. To remove a Lifecycle Config, you must set LifecycleConfigArns to an empty list. public let lifecycleConfigArns: [String]? @@ -24491,9 +24521,9 @@ extension SageMaker { } public struct KernelGatewayAppSettings: AWSEncodableShape & AWSDecodableShape { - /// A list of custom SageMaker images that are configured to run as a KernelGateway app. + /// A list of custom SageMaker AI images that are configured to run as a KernelGateway app. public let customImages: [CustomImage]? - /// The default instance type and the Amazon Resource Name (ARN) of the default SageMaker image used by the KernelGateway app. The Amazon SageMaker Studio UI does not use the default instance type value set here. The default instance type set here is used when Apps are created using the CLI or CloudFormation and the instance type parameter value is not passed. + /// The default instance type and the Amazon Resource Name (ARN) of the default SageMaker AI image used by the KernelGateway app. The Amazon SageMaker AI Studio UI does not use the default instance type value set here. The default instance type set here is used when Apps are created using the CLI or CloudFormation and the instance type parameter value is not passed. public let defaultResourceSpec: ResourceSpec? /// The Amazon Resource Name (ARN) of the Lifecycle Configurations attached to the the user profile or domain. To remove a Lifecycle Config, you must set LifecycleConfigArns to an empty list. public let lifecycleConfigArns: [String]? @@ -24525,7 +24555,7 @@ extension SageMaker { } public struct KernelGatewayImageConfig: AWSEncodableShape & AWSDecodableShape { - /// The Amazon Elastic File System storage configuration for a SageMaker image. + /// The Amazon Elastic File System storage configuration for a SageMaker AI image. public let fileSystemConfig: FileSystemConfig? /// The specification of the Jupyter kernels in the image. public let kernelSpecs: [KernelSpec]? @@ -25196,7 +25226,7 @@ extension SageMaker { public struct ListAliasesResponse: AWSDecodableShape { /// A token for getting the next set of aliases, if more aliases exist. public let nextToken: String? - /// A list of SageMaker image version aliases. + /// A list of SageMaker AI image version aliases. public let sageMakerImageVersionAliases: [String]? @inlinable @@ -26009,7 +26039,7 @@ extension SageMaker { public struct ListCompilationJobsResponse: AWSDecodableShape { /// An array of CompilationJobSummary objects, each describing a model compilation job. public let compilationJobSummaries: [CompilationJobSummary]? - /// If the response is truncated, Amazon SageMaker returns this NextToken. To retrieve the next set of model compilation jobs, use this token in the next request. + /// If the response is truncated, Amazon SageMaker AI returns this NextToken. To retrieve the next set of model compilation jobs, use this token in the next request. public let nextToken: String? @inlinable @@ -28640,7 +28670,7 @@ extension SageMaker { public struct ListModelQualityJobDefinitionsResponse: AWSDecodableShape { /// A list of summaries of model quality monitoring job definitions. public let jobDefinitionSummaries: [MonitoringJobDefinitionSummary]? - /// If the response is truncated, Amazon SageMaker returns this token. To retrieve the next set of model quality monitoring job definitions, use it in the next request. + /// If the response is truncated, Amazon SageMaker AI returns this token. To retrieve the next set of model quality monitoring job definitions, use it in the next request. public let nextToken: String? @inlinable @@ -29100,7 +29130,7 @@ extension SageMaker { } public struct ListNotebookInstanceLifecycleConfigsOutput: AWSDecodableShape { - /// If the response is truncated, SageMaker returns this token. To get the next set of lifecycle configurations, use it in the next request. + /// If the response is truncated, SageMaker AI returns this token. To get the next set of lifecycle configurations, use it in the next request. public let nextToken: String? /// An array of NotebookInstanceLifecycleConfiguration objects, each listing a lifecycle configuration. public let notebookInstanceLifecycleConfigs: [NotebookInstanceLifecycleConfigSummary]? @@ -29196,7 +29226,7 @@ extension SageMaker { } public struct ListNotebookInstancesOutput: AWSDecodableShape { - /// If the response to the previous ListNotebookInstances request was truncated, SageMaker returns this token. To retrieve the next set of notebook instances, use the token in the next request. + /// If the response to the previous ListNotebookInstances request was truncated, SageMaker AI returns this token. To retrieve the next set of notebook instances, use the token in the next request. public let nextToken: String? /// An array of NotebookInstanceSummary objects, one for each notebook instance. public let notebookInstances: [NotebookInstanceSummary]? @@ -32828,7 +32858,7 @@ extension SageMaker { public let instanceCount: Int? /// The ML compute instance type for the processing job. public let instanceType: ProcessingInstanceType? - /// The Key Management Service (KMS) key that Amazon SageMaker uses to encrypt data on the storage volume attached to the ML compute instance(s) that run the model monitoring job. + /// The Key Management Service (KMS) key that Amazon SageMaker AI uses to encrypt data on the storage volume attached to the ML compute instance(s) that run the model monitoring job. public let volumeKmsKeyId: String? /// The size of the ML storage volume, in gigabytes, that you want to provision. You must specify sufficient ML storage for your scenario. public let volumeSizeInGB: Int? @@ -33012,7 +33042,7 @@ extension SageMaker { public let environment: [String: String]? /// Configures the monitoring job to run a specified Docker container image. public let monitoringAppSpecification: MonitoringAppSpecification? - /// The array of inputs for the monitoring job. Currently we support monitoring an Amazon SageMaker Endpoint. + /// The array of inputs for the monitoring job. Currently we support monitoring an Amazon SageMaker AI Endpoint. public let monitoringInputs: [MonitoringInput]? /// The array of outputs from the monitoring job to be uploaded to Amazon S3. public let monitoringOutputConfig: MonitoringOutputConfig? @@ -33020,7 +33050,7 @@ extension SageMaker { public let monitoringResources: MonitoringResources? /// Specifies networking options for an monitoring job. public let networkConfig: NetworkConfig? - /// The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker can assume to perform tasks on your behalf. + /// The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker AI can assume to perform tasks on your behalf. public let roleArn: String? /// Specifies a time limit for how long the monitoring job is allowed to run. public let stoppingCondition: MonitoringStoppingCondition? @@ -33159,7 +33189,7 @@ extension SageMaker { } public struct MonitoringOutputConfig: AWSEncodableShape & AWSDecodableShape { - /// The Key Management Service (KMS) key that Amazon SageMaker uses to encrypt the model artifacts at rest using Amazon S3 server-side encryption. + /// The Key Management Service (KMS) key that Amazon SageMaker AI uses to encrypt the model artifacts at rest using Amazon S3 server-side encryption. public let kmsKeyId: String? /// Monitoring outputs for monitoring jobs. This is where the output of the periodic monitoring jobs is uploaded. public let monitoringOutputs: [MonitoringOutput]? @@ -33209,11 +33239,11 @@ extension SageMaker { } public struct MonitoringS3Output: AWSEncodableShape & AWSDecodableShape { - /// The local path to the Amazon S3 storage location where Amazon SageMaker saves the results of a monitoring job. LocalPath is an absolute path for the output data. + /// The local path to the Amazon S3 storage location where Amazon SageMaker AI saves the results of a monitoring job. LocalPath is an absolute path for the output data. public let localPath: String? /// Whether to upload the results of the monitoring job continuously or after the job completes. public let s3UploadMode: ProcessingS3UploadMode? - /// A URI that identifies the Amazon S3 storage location where Amazon SageMaker saves the results of a monitoring job. + /// A URI that identifies the Amazon S3 storage location where Amazon SageMaker AI saves the results of a monitoring job. public let s3Uri: String? @inlinable @@ -33551,11 +33581,11 @@ extension SageMaker { } public struct NotebookInstanceSummary: AWSDecodableShape { - /// An array of up to three Git repositories associated with the notebook instance. These can be either the names of Git repositories stored as resources in your account, or the URL of Git repositories in Amazon Web Services CodeCommit or in any other Git repository. These repositories are cloned at the same level as the default repository of your notebook instance. For more information, see Associating Git Repositories with SageMaker Notebook Instances. + /// An array of up to three Git repositories associated with the notebook instance. These can be either the names of Git repositories stored as resources in your account, or the URL of Git repositories in Amazon Web Services CodeCommit or in any other Git repository. These repositories are cloned at the same level as the default repository of your notebook instance. For more information, see Associating Git Repositories with SageMaker AI Notebook Instances. public let additionalCodeRepositories: [String]? /// A timestamp that shows when the notebook instance was created. public let creationTime: Date? - /// The Git repository associated with the notebook instance as its default code repository. This can be either the name of a Git repository stored as a resource in your account, or the URL of a Git repository in Amazon Web Services CodeCommit or in any other Git repository. When you open a notebook instance, it opens in the directory that contains this repository. For more information, see Associating Git Repositories with SageMaker Notebook Instances. + /// The Git repository associated with the notebook instance as its default code repository. This can be either the name of a Git repository stored as a resource in your account, or the URL of a Git repository in Amazon Web Services CodeCommit or in any other Git repository. When you open a notebook instance, it opens in the directory that contains this repository. For more information, see Associating Git Repositories with SageMaker AI Notebook Instances. public let defaultCodeRepository: String? /// The type of ML compute instance that the notebook instance is running on. public let instanceType: InstanceType? @@ -34083,9 +34113,9 @@ extension SageMaker { public struct OutputConfig: AWSEncodableShape & AWSDecodableShape { /// Specifies additional parameters for compiler options in JSON format. The compiler options are TargetPlatform specific. It is required for NVIDIA accelerators and highly recommended for CPU compilations. For any other cases, it is optional to specify CompilerOptions. DTYPE: Specifies the data type for the input. When compiling for ml_* (except for ml_inf) instances using PyTorch framework, provide the data type (dtype) of the model's input. "float32" is used if "DTYPE" is not specified. Options for data type are: float32: Use either "float" or "float32". int64: Use either "int64" or "long". For example, {"dtype" : "float32"}. CPU: Compilation for CPU supports the following compiler options. mcpu: CPU micro-architecture. For example, {'mcpu': 'skylake-avx512'} mattr: CPU flags. For example, {'mattr': ['+neon', '+vfpv4']} ARM: Details of ARM CPU compilations. NEON: NEON is an implementation of the Advanced SIMD extension used in ARMv7 processors. For example, add {'mattr': ['+neon']} to the compiler options if compiling for ARM 32-bit platform with the NEON support. NVIDIA: Compilation for NVIDIA GPU supports the following compiler options. gpu_code: Specifies the targeted architecture. trt-ver: Specifies the TensorRT versions in x.y.z. format. cuda-ver: Specifies the CUDA version in x.y format. For example, {'gpu-code': 'sm_72', 'trt-ver': '6.0.1', 'cuda-ver': '10.1'} ANDROID: Compilation for the Android OS supports the following compiler options: ANDROID_PLATFORM: Specifies the Android API levels. Available levels range from 21 to 29. For example, {'ANDROID_PLATFORM': 28}. mattr: Add {'mattr': ['+neon']} to compiler options if compiling for ARM 32-bit platform with NEON support. INFERENTIA: Compilation for target ml_inf1 uses compiler options passed in as a JSON string. For example, "CompilerOptions": "\"--verbose 1 --num-neuroncores 2 -O2\"". For information about supported compiler options, see Neuron Compiler CLI Reference Guide. CoreML: Compilation for the CoreML OutputConfig TargetDevice supports the following compiler options: class_labels: Specifies the classification labels file name inside input tar.gz file. For example, {"class_labels": "imagenet_labels_1000.txt"}. Labels inside the txt file should be separated by newlines. public let compilerOptions: String? - /// The Amazon Web Services Key Management Service key (Amazon Web Services KMS) that Amazon SageMaker uses to encrypt your output models with Amazon S3 server-side encryption after compilation job. If you don't provide a KMS key ID, Amazon SageMaker uses the default KMS key for Amazon S3 for your role's account. For more information, see KMS-Managed Encryption Keys in the Amazon Simple Storage Service Developer Guide. The KmsKeyId can be any of the following formats: Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab Key ARN: arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab Alias name: alias/ExampleAlias Alias name ARN: arn:aws:kms:us-west-2:111122223333:alias/ExampleAlias + /// The Amazon Web Services Key Management Service key (Amazon Web Services KMS) that Amazon SageMaker AI uses to encrypt your output models with Amazon S3 server-side encryption after compilation job. If you don't provide a KMS key ID, Amazon SageMaker AI uses the default KMS key for Amazon S3 for your role's account. For more information, see KMS-Managed Encryption Keys in the Amazon Simple Storage Service Developer Guide. The KmsKeyId can be any of the following formats: Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab Key ARN: arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab Alias name: alias/ExampleAlias Alias name ARN: arn:aws:kms:us-west-2:111122223333:alias/ExampleAlias public let kmsKeyId: String? - /// Identifies the S3 bucket where you want Amazon SageMaker to store the model artifacts. For example, s3://bucket-name/key-name-prefix. + /// Identifies the S3 bucket where you want Amazon SageMaker AI to store the model artifacts. For example, s3://bucket-name/key-name-prefix. public let s3OutputLocation: String? /// Identifies the target device or the machine learning instance that you want to run your model on after the compilation has completed. Alternatively, you can specify OS, architecture, and accelerator using TargetPlatform fields. It can be used instead of TargetPlatform. Currently ml_trn1 is available only in US East (N. Virginia) Region, and ml_inf2 is available only in US East (Ohio) Region. public let targetDevice: TargetDevice? @@ -36206,7 +36236,7 @@ extension SageMaker { } public struct RSessionAppSettings: AWSEncodableShape & AWSDecodableShape { - /// A list of custom SageMaker images that are configured to run as a RSession app. + /// A list of custom SageMaker AI images that are configured to run as a RSession app. public let customImages: [CustomImage]? public let defaultResourceSpec: ResourceSpec? @@ -37233,7 +37263,7 @@ extension SageMaker { public let instanceType: AppInstanceType? /// The Amazon Resource Name (ARN) of the Lifecycle Configuration attached to the Resource. public let lifecycleConfigArn: String? - /// The ARN of the SageMaker image that the image version belongs to. + /// The ARN of the SageMaker AI image that the image version belongs to. public let sageMakerImageArn: String? /// The SageMakerImageVersionAlias of the image to launch with. This value is in SemVer 2.0.0 versioning format. public let sageMakerImageVersionAlias: String? @@ -37557,7 +37587,7 @@ extension SageMaker { public let dataAnalysisEndTime: String? /// Sets the start time for a monitoring job window. Express this time as an offset to the times that you schedule your monitoring jobs to run. You schedule monitoring jobs with the ScheduleExpression parameter. Specify this offset in ISO 8601 duration format. For example, if you want to monitor the five hours of data in your dataset that precede the start of each monitoring job, you would specify: "-PT5H". The start time that you specify must not precede the end time that you specify by more than 24 hours. You specify the end time with the DataAnalysisEndTime parameter. If you set ScheduleExpression to NOW, this parameter is required. public let dataAnalysisStartTime: String? - /// A cron expression that describes details about the monitoring schedule. The supported cron expressions are: If you want to set the job to start every hour, use the following: Hourly: cron(0 * ? * * *) If you want to start the job daily: cron(0 [00-23] ? * * *) If you want to run the job one time, immediately, use the following keyword: NOW For example, the following are valid cron expressions: Daily at noon UTC: cron(0 12 ? * * *) Daily at midnight UTC: cron(0 0 ? * * *) To support running every 6, 12 hours, the following are also supported: cron(0 [00-23]/[01-24] ? * * *) For example, the following are valid cron expressions: Every 12 hours, starting at 5pm UTC: cron(0 17/12 ? * * *) Every two hours starting at midnight: cron(0 0/2 ? * * *) Even though the cron expression is set to start at 5PM UTC, note that there could be a delay of 0-20 minutes from the actual requested time to run the execution. We recommend that if you would like a daily schedule, you do not provide this parameter. Amazon SageMaker will pick a time for running every day. You can also specify the keyword NOW to run the monitoring job immediately, one time, without recurring. + /// A cron expression that describes details about the monitoring schedule. The supported cron expressions are: If you want to set the job to start every hour, use the following: Hourly: cron(0 * ? * * *) If you want to start the job daily: cron(0 [00-23] ? * * *) If you want to run the job one time, immediately, use the following keyword: NOW For example, the following are valid cron expressions: Daily at noon UTC: cron(0 12 ? * * *) Daily at midnight UTC: cron(0 0 ? * * *) To support running every 6, 12 hours, the following are also supported: cron(0 [00-23]/[01-24] ? * * *) For example, the following are valid cron expressions: Every 12 hours, starting at 5pm UTC: cron(0 17/12 ? * * *) Every two hours starting at midnight: cron(0 0/2 ? * * *) Even though the cron expression is set to start at 5PM UTC, note that there could be a delay of 0-20 minutes from the actual requested time to run the execution. We recommend that if you would like a daily schedule, you do not provide this parameter. Amazon SageMaker AI will pick a time for running every day. You can also specify the keyword NOW to run the monitoring job immediately, one time, without recurring. public let scheduleExpression: String? @inlinable @@ -38428,11 +38458,11 @@ extension SageMaker { } public struct SpaceSettings: AWSEncodableShape & AWSDecodableShape { - /// The type of app created within the space. + /// The type of app created within the space. If using the UpdateSpace API, you can't change the app type of your space by specifying a different value for this field. public let appType: AppType? /// The Code Editor application settings. public let codeEditorAppSettings: SpaceCodeEditorAppSettings? - /// A file system, created by you, that you assign to a space for an Amazon SageMaker Domain. Permitted users can access this file system in Amazon SageMaker Studio. + /// A file system, created by you, that you assign to a space for an Amazon SageMaker AI Domain. Permitted users can access this file system in Amazon SageMaker AI Studio. public let customFileSystems: [CustomFileSystem]? /// The settings for the JupyterLab application. public let jupyterLabAppSettings: SpaceJupyterLabAppSettings? @@ -39205,15 +39235,15 @@ extension SageMaker { } public struct StudioLifecycleConfigDetails: AWSDecodableShape { - /// The creation time of the Amazon SageMaker Studio Lifecycle Configuration. + /// The creation time of the Amazon SageMaker AI Studio Lifecycle Configuration. public let creationTime: Date? - /// This value is equivalent to CreationTime because Amazon SageMaker Studio Lifecycle Configurations are immutable. + /// This value is equivalent to CreationTime because Amazon SageMaker AI Studio Lifecycle Configurations are immutable. public let lastModifiedTime: Date? /// The App type to which the Lifecycle Configuration is attached. public let studioLifecycleConfigAppType: StudioLifecycleConfigAppType? /// The Amazon Resource Name (ARN) of the Lifecycle Configuration. public let studioLifecycleConfigArn: String? - /// The name of the Amazon SageMaker Studio Lifecycle Configuration. + /// The name of the Amazon SageMaker AI Studio Lifecycle Configuration. public let studioLifecycleConfigName: String? @inlinable @@ -39448,7 +39478,7 @@ extension SageMaker { } public struct TensorBoardAppSettings: AWSEncodableShape & AWSDecodableShape { - /// The default instance type and the Amazon Resource Name (ARN) of the SageMaker image created on the instance. + /// The default instance type and the Amazon Resource Name (ARN) of the SageMaker AI image created on the instance. public let defaultResourceSpec: ResourceSpec? @inlinable @@ -41907,7 +41937,7 @@ extension SageMaker { } public struct UpdateDomainRequest: AWSEncodableShape { - /// Specifies the VPC used for non-EFS traffic. PublicInternetOnly - Non-EFS traffic is through a VPC managed by Amazon SageMaker, which allows direct internet access. VpcOnly - All Studio traffic is through the specified VPC and subnets. This configuration can only be modified if there are no apps in the InService, Pending, or Deleting state. The configuration cannot be updated if DomainSettings.RStudioServerProDomainSettings.DomainExecutionRoleArn is already set or DomainSettings.RStudioServerProDomainSettings.DomainExecutionRoleArn is provided as part of the same request. + /// Specifies the VPC used for non-EFS traffic. PublicInternetOnly - Non-EFS traffic is through a VPC managed by Amazon SageMaker AI, which allows direct internet access. VpcOnly - All Studio traffic is through the specified VPC and subnets. This configuration can only be modified if there are no apps in the InService, Pending, or Deleting state. The configuration cannot be updated if DomainSettings.RStudioServerProDomainSettings.DomainExecutionRoleArn is already set or DomainSettings.RStudioServerProDomainSettings.DomainExecutionRoleArn is provided as part of the same request. public let appNetworkAccessType: AppNetworkAccessType? /// The entity that creates and manages the required security groups for inter-app communication in VPCOnly mode. Required when CreateDomain.AppNetworkAccessType is VPCOnly and DomainSettings.RStudioServerProDomainSettings.DomainExecutionRoleArn is provided. If setting up the domain for use with RStudio, this value must be set to Service. public let appSecurityGroupManagement: AppSecurityGroupManagement? @@ -42286,7 +42316,7 @@ extension SageMaker { public let displayName: String? /// The name of the image to update. public let imageName: String? - /// The new ARN for the IAM role that enables Amazon SageMaker to perform tasks on your behalf. + /// The new ARN for the IAM role that enables Amazon SageMaker AI to perform tasks on your behalf. public let roleArn: String? @inlinable @@ -42353,7 +42383,7 @@ extension SageMaker { public let horovod: Bool? /// The name of the image. public let imageName: String? - /// Indicates SageMaker job type compatibility. TRAINING: The image version is compatible with SageMaker training jobs. INFERENCE: The image version is compatible with SageMaker inference jobs. NOTEBOOK_KERNEL: The image version is compatible with SageMaker notebook kernels. + /// Indicates SageMaker AI job type compatibility. TRAINING: The image version is compatible with SageMaker AI training jobs. INFERENCE: The image version is compatible with SageMaker AI inference jobs. NOTEBOOK_KERNEL: The image version is compatible with SageMaker AI notebook kernels. public let jobType: JobType? /// The machine learning framework vended in the image version. public let mlFramework: String? @@ -42888,9 +42918,9 @@ extension SageMaker { public struct UpdateNotebookInstanceInput: AWSEncodableShape { /// This parameter is no longer supported. Elastic Inference (EI) is no longer available. This parameter was used to specify a list of the EI instance types to associate with this notebook instance. public let acceleratorTypes: [NotebookInstanceAcceleratorType]? - /// An array of up to three Git repositories to associate with the notebook instance. These can be either the names of Git repositories stored as resources in your account, or the URL of Git repositories in Amazon Web Services CodeCommit or in any other Git repository. These repositories are cloned at the same level as the default repository of your notebook instance. For more information, see Associating Git Repositories with SageMaker Notebook Instances. + /// An array of up to three Git repositories to associate with the notebook instance. These can be either the names of Git repositories stored as resources in your account, or the URL of Git repositories in Amazon Web Services CodeCommit or in any other Git repository. These repositories are cloned at the same level as the default repository of your notebook instance. For more information, see Associating Git Repositories with SageMaker AI Notebook Instances. public let additionalCodeRepositories: [String]? - /// The Git repository to associate with the notebook instance as its default code repository. This can be either the name of a Git repository stored as a resource in your account, or the URL of a Git repository in Amazon Web Services CodeCommit or in any other Git repository. When you open a notebook instance, it opens in the directory that contains this repository. For more information, see Associating Git Repositories with SageMaker Notebook Instances. + /// The Git repository to associate with the notebook instance as its default code repository. This can be either the name of a Git repository stored as a resource in your account, or the URL of a Git repository in Amazon Web Services CodeCommit or in any other Git repository. When you open a notebook instance, it opens in the directory that contains this repository. For more information, see Associating Git Repositories with SageMaker AI Notebook Instances. public let defaultCodeRepository: String? /// This parameter is no longer supported. Elastic Inference (EI) is no longer available. This parameter was used to specify a list of the EI instance types to remove from this notebook instance. public let disassociateAcceleratorTypes: Bool? @@ -42908,11 +42938,11 @@ extension SageMaker { public let lifecycleConfigName: String? /// The name of the notebook instance to update. public let notebookInstanceName: String? - /// The Amazon Resource Name (ARN) of the IAM role that SageMaker can assume to access the notebook instance. For more information, see SageMaker Roles. To be able to pass this role to SageMaker, the caller of this API must have the iam:PassRole permission. + /// The Amazon Resource Name (ARN) of the IAM role that SageMaker AI can assume to access the notebook instance. For more information, see SageMaker AI Roles. To be able to pass this role to SageMaker AI, the caller of this API must have the iam:PassRole permission. public let roleArn: String? /// Whether root access is enabled or disabled for users of the notebook instance. The default value is Enabled. If you set this to Disabled, users don't have root access on the notebook instance, but lifecycle configuration scripts still run with root permissions. public let rootAccess: RootAccess? - /// The size, in GB, of the ML storage volume to attach to the notebook instance. The default value is 5 GB. ML storage volumes are encrypted, so SageMaker can't determine the amount of available free space on the volume. Because of this, you can increase the volume size when you update a notebook instance, but you can't decrease the volume size. If you want to decrease the size of the ML storage volume in use, create a new notebook instance with the desired size. + /// The size, in GB, of the ML storage volume to attach to the notebook instance. The default value is 5 GB. ML storage volumes are encrypted, so SageMaker AI can't determine the amount of available free space on the volume. Because of this, you can increase the volume size when you update a notebook instance, but you can't decrease the volume size. If you want to decrease the size of the ML storage volume in use, create a new notebook instance with the desired size. public let volumeSizeInGB: Int? @inlinable @@ -43730,7 +43760,7 @@ extension SageMaker { public let canvasAppSettings: CanvasAppSettings? /// The Code Editor application settings. SageMaker applies these settings only to private spaces that the user creates in the domain. SageMaker doesn't apply these settings to shared spaces. public let codeEditorAppSettings: CodeEditorAppSettings? - /// The settings for assigning a custom file system to a user profile. Permitted users can access this file system in Amazon SageMaker Studio. SageMaker applies these settings only to private spaces that the user creates in the domain. SageMaker doesn't apply these settings to shared spaces. + /// The settings for assigning a custom file system to a user profile. Permitted users can access this file system in Amazon SageMaker AI Studio. SageMaker applies these settings only to private spaces that the user creates in the domain. SageMaker doesn't apply these settings to shared spaces. public let customFileSystemConfigs: [CustomFileSystemConfig]? /// Details about the POSIX identity that is used for file system operations. SageMaker applies these settings only to private spaces that the user creates in the domain. SageMaker doesn't apply these settings to shared spaces. public let customPosixUserConfig: CustomPosixUserConfig? @@ -43748,9 +43778,9 @@ extension SageMaker { public let rSessionAppSettings: RSessionAppSettings? /// A collection of settings that configure user interaction with the RStudioServerPro app. public let rStudioServerProAppSettings: RStudioServerProAppSettings? - /// The security groups for the Amazon Virtual Private Cloud (VPC) that the domain uses for communication. Optional when the CreateDomain.AppNetworkAccessType parameter is set to PublicInternetOnly. Required when the CreateDomain.AppNetworkAccessType parameter is set to VpcOnly, unless specified as part of the DefaultUserSettings for the domain. Amazon SageMaker adds a security group to allow NFS traffic from Amazon SageMaker Studio. Therefore, the number of security groups that you can specify is one less than the maximum number shown. SageMaker applies these settings only to private spaces that the user creates in the domain. SageMaker doesn't apply these settings to shared spaces. + /// The security groups for the Amazon Virtual Private Cloud (VPC) that the domain uses for communication. Optional when the CreateDomain.AppNetworkAccessType parameter is set to PublicInternetOnly. Required when the CreateDomain.AppNetworkAccessType parameter is set to VpcOnly, unless specified as part of the DefaultUserSettings for the domain. Amazon SageMaker AI adds a security group to allow NFS traffic from Amazon SageMaker AI Studio. Therefore, the number of security groups that you can specify is one less than the maximum number shown. SageMaker applies these settings only to private spaces that the user creates in the domain. SageMaker doesn't apply these settings to shared spaces. public let securityGroups: [String]? - /// Specifies options for sharing Amazon SageMaker Studio notebooks. + /// Specifies options for sharing Amazon SageMaker AI Studio notebooks. public let sharingSettings: SharingSettings? /// The storage settings for a space. SageMaker applies these settings only to private spaces that the user creates in the domain. SageMaker doesn't apply these settings to shared spaces. public let spaceStorageSettings: DefaultSpaceStorageSettings? diff --git a/Sources/Soto/Services/SecurityHub/SecurityHub_api.swift b/Sources/Soto/Services/SecurityHub/SecurityHub_api.swift index 3ded5a417b..1451cedac0 100644 --- a/Sources/Soto/Services/SecurityHub/SecurityHub_api.swift +++ b/Sources/Soto/Services/SecurityHub/SecurityHub_api.swift @@ -1582,11 +1582,11 @@ public struct SecurityHub: AWSService { /// Returns history for a Security Hub finding in the last 90 days. The history includes changes made to any fields in the Amazon Web Services Security Finding Format (ASFF). /// /// Parameters: - /// - endTime: An ISO 8601-formatted timestamp that indicates the end time of the requested finding history. If you provide values for both StartTime and EndTime, Security Hub returns finding history for the specified time period. If you provide a value for StartTime but not for EndTime, Security Hub returns finding history from the StartTime to the time at which the API is called. If you provide a value for EndTime but not for StartTime, Security Hub returns finding history from the CreatedAt timestamp of the finding to the EndTime. If you provide neither StartTime nor EndTime, Security Hub returns finding history from the CreatedAt timestamp of the finding to the time at which the API is called. In all of these scenarios, the response is limited to 100 results, and the maximum time period is limited to 90 days. This field accepts only the specified formats. Timestamps + /// - endTime: An ISO 8601-formatted timestamp that indicates the end time of the requested finding history. If you provide values for both StartTime and EndTime, Security Hub returns finding history for the specified time period. If you provide a value for StartTime but not for EndTime, Security Hub returns finding history from the StartTime to the time at which the API is called. If you provide a value for EndTime but not for StartTime, Security Hub returns finding history from the CreatedAt timestamp of the finding to the EndTime. If you provide neither StartTime nor EndTime, Security Hub returns finding history from the CreatedAt timestamp of the finding to the time at which the API is called. In all of these scenarios, the response is limited to 100 results, and the maximum time period is limited to 90 days. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. /// - findingIdentifier: /// - maxResults: The maximum number of results to be returned. If you don’t provide it, Security Hub returns up to 100 results of finding history. /// - nextToken: A token for pagination purposes. Provide NULL as the initial value. In subsequent requests, provide the token included in the response to get up to an additional 100 results of finding history. If you don’t provide NextToken, Security Hub returns up to 100 results of finding history for each request. - /// - startTime: A timestamp that indicates the start time of the requested finding history. If you provide values for both StartTime and EndTime, Security Hub returns finding history for the specified time period. If you provide a value for StartTime but not for EndTime, Security Hub returns finding history from the StartTime to the time at which the API is called. If you provide a value for EndTime but not for StartTime, Security Hub returns finding history from the CreatedAt timestamp of the finding to the EndTime. If you provide neither StartTime nor EndTime, Security Hub returns finding history from the CreatedAt timestamp of the finding to the time at which the API is called. In all of these scenarios, the response is limited to 100 results, and the maximum time period is limited to 90 days. This field accepts only the specified formats. Timestamps + /// - startTime: A timestamp that indicates the start time of the requested finding history. If you provide values for both StartTime and EndTime, Security Hub returns finding history for the specified time period. If you provide a value for StartTime but not for EndTime, Security Hub returns finding history from the StartTime to the time at which the API is called. If you provide a value for EndTime but not for StartTime, Security Hub returns finding history from the CreatedAt timestamp of the finding to the EndTime. If you provide neither StartTime nor EndTime, Security Hub returns finding history from the CreatedAt timestamp of the finding to the time at which the API is called. In all of these scenarios, the response is limited to 100 results, and the maximum time period is limited to 90 days. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. /// - logger: Logger use during operation @inlinable public func getFindingHistory( @@ -2611,7 +2611,7 @@ public struct SecurityHub: AWSService { /// Updates configuration options for Security Hub. /// /// Parameters: - /// - autoEnableControls: Whether to automatically enable new controls when they are added to standards that are enabled. By default, this is set to true, and new controls are enabled automatically. To not automatically enable new controls, set this to false. + /// - autoEnableControls: Whether to automatically enable new controls when they are added to standards that are enabled. By default, this is set to true, and new controls are enabled automatically. To not automatically enable new controls, set this to false. When you automatically enable new controls, you can interact with the controls in the console and programmatically immediately after release. However, automatically enabled controls have a temporary default status of DISABLED. It can take up to several days for Security Hub to process the control release and designate the control as ENABLED in your account. During the processing period, you can manually enable or disable a control, and Security Hub will maintain that designation regardless of whether you have AutoEnableControls set to true. /// - controlFindingGenerator: Updates whether the calling account has consolidated control findings turned on. If the value for this field is set to SECURITY_CONTROL, Security Hub generates a single finding for a control check even when the check applies to multiple enabled standards. If the value for this field is set to STANDARD_CONTROL, Security Hub generates separate findings for a control check when the check applies to multiple enabled standards. For accounts that are part of an organization, this value can only be updated in the administrator account. /// - logger: Logger use during operation @inlinable @@ -2879,10 +2879,10 @@ extension SecurityHub { /// Return PaginatorSequence for operation ``getFindingHistory(_:logger:)``. /// /// - Parameters: - /// - endTime: An ISO 8601-formatted timestamp that indicates the end time of the requested finding history. If you provide values for both StartTime and EndTime, Security Hub returns finding history for the specified time period. If you provide a value for StartTime but not for EndTime, Security Hub returns finding history from the StartTime to the time at which the API is called. If you provide a value for EndTime but not for StartTime, Security Hub returns finding history from the CreatedAt timestamp of the finding to the EndTime. If you provide neither StartTime nor EndTime, Security Hub returns finding history from the CreatedAt timestamp of the finding to the time at which the API is called. In all of these scenarios, the response is limited to 100 results, and the maximum time period is limited to 90 days. This field accepts only the specified formats. Timestamps + /// - endTime: An ISO 8601-formatted timestamp that indicates the end time of the requested finding history. If you provide values for both StartTime and EndTime, Security Hub returns finding history for the specified time period. If you provide a value for StartTime but not for EndTime, Security Hub returns finding history from the StartTime to the time at which the API is called. If you provide a value for EndTime but not for StartTime, Security Hub returns finding history from the CreatedAt timestamp of the finding to the EndTime. If you provide neither StartTime nor EndTime, Security Hub returns finding history from the CreatedAt timestamp of the finding to the time at which the API is called. In all of these scenarios, the response is limited to 100 results, and the maximum time period is limited to 90 days. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. /// - findingIdentifier: /// - maxResults: The maximum number of results to be returned. If you don’t provide it, Security Hub returns up to 100 results of finding history. - /// - startTime: A timestamp that indicates the start time of the requested finding history. If you provide values for both StartTime and EndTime, Security Hub returns finding history for the specified time period. If you provide a value for StartTime but not for EndTime, Security Hub returns finding history from the StartTime to the time at which the API is called. If you provide a value for EndTime but not for StartTime, Security Hub returns finding history from the CreatedAt timestamp of the finding to the EndTime. If you provide neither StartTime nor EndTime, Security Hub returns finding history from the CreatedAt timestamp of the finding to the time at which the API is called. In all of these scenarios, the response is limited to 100 results, and the maximum time period is limited to 90 days. This field accepts only the specified formats. Timestamps + /// - startTime: A timestamp that indicates the start time of the requested finding history. If you provide values for both StartTime and EndTime, Security Hub returns finding history for the specified time period. If you provide a value for StartTime but not for EndTime, Security Hub returns finding history from the StartTime to the time at which the API is called. If you provide a value for EndTime but not for StartTime, Security Hub returns finding history from the CreatedAt timestamp of the finding to the EndTime. If you provide neither StartTime nor EndTime, Security Hub returns finding history from the CreatedAt timestamp of the finding to the time at which the API is called. In all of these scenarios, the response is limited to 100 results, and the maximum time period is limited to 90 days. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. /// - logger: Logger used for logging @inlinable public func getFindingHistoryPaginator( diff --git a/Sources/Soto/Services/SecurityHub/SecurityHub_shapes.swift b/Sources/Soto/Services/SecurityHub/SecurityHub_shapes.swift index fc10d2ed21..e2ce661377 100644 --- a/Sources/Soto/Services/SecurityHub/SecurityHub_shapes.swift +++ b/Sources/Soto/Services/SecurityHub/SecurityHub_shapes.swift @@ -1089,9 +1089,7 @@ extension SecurityHub { public struct AutomationRulesConfig: AWSDecodableShape { /// One or more actions to update finding fields if a finding matches the defined criteria of the rule. public let actions: [AutomationRulesAction]? - /// A timestamp that indicates when the rule was created. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// A timestamp that indicates when the rule was created. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. @OptionalCustomCoding public var createdAt: Date? /// The principal that created a rule. @@ -1110,9 +1108,7 @@ extension SecurityHub { public let ruleOrder: Int? /// Whether the rule is active after it is created. If this parameter is equal to ENABLED, Security Hub starts applying the rule to findings and finding updates after the rule is created. public let ruleStatus: RuleStatus? - /// A timestamp that indicates when the rule was most recently updated. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// A timestamp that indicates when the rule was most recently updated. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. @OptionalCustomCoding public var updatedAt: Date? @@ -1223,31 +1219,23 @@ extension SecurityHub { public let complianceStatus: [StringFilter]? /// The likelihood that a finding accurately identifies the behavior or issue that it was intended to identify. Confidence is scored on a 0–100 basis using a ratio scale. A value of 0 means 0 percent confidence, and a value of 100 means 100 percent confidence. For example, a data exfiltration detection based on a statistical deviation of network traffic has low confidence because an actual exfiltration hasn't been verified. For more information, see Confidence in the Security Hub User Guide. Array Members: Minimum number of 1 item. Maximum number of 20 items. public let confidence: [NumberFilter]? - /// A timestamp that indicates when this finding record was created. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) Array Members: Minimum number of 1 item. Maximum number of 20 items. + /// A timestamp that indicates when this finding record was created. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. Array Members: Minimum number of 1 item. Maximum number of 20 items. public let createdAt: [DateFilter]? /// The level of importance that is assigned to the resources that are associated with a finding. Criticality is scored on a 0–100 basis, using a ratio scale that supports only full integers. A score of 0 means that the underlying resources have no criticality, and a score of 100 is reserved for the most critical resources. For more information, see Criticality in the Security Hub User Guide. Array Members: Minimum number of 1 item. Maximum number of 20 items. public let criticality: [NumberFilter]? /// A finding's description. Array Members: Minimum number of 1 item. Maximum number of 20 items. public let description: [StringFilter]? - /// A timestamp that indicates when the potential security issue captured by a finding was first observed by the security findings product. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) Array Members: Minimum number of 1 item. Maximum number of 20 items. + /// A timestamp that indicates when the potential security issue captured by a finding was first observed by the security findings product. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. Array Members: Minimum number of 1 item. Maximum number of 20 items. public let firstObservedAt: [DateFilter]? /// The identifier for the solution-specific component that generated a finding. Array Members: Minimum number of 1 item. Maximum number of 100 items. public let generatorId: [StringFilter]? /// The product-specific identifier for a finding. Array Members: Minimum number of 1 item. Maximum number of 20 items. public let id: [StringFilter]? - /// A timestamp that indicates when the potential security issue captured by a finding was most recently observed by the security findings product. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) Array Members: Minimum number of 1 item. Maximum number of 20 items. + /// A timestamp that indicates when the security findings provider most recently observed a change in the resource that is involved in the finding. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. Array Members: Minimum number of 1 item. Maximum number of 20 items. public let lastObservedAt: [DateFilter]? /// The text of a user-defined note that's added to a finding. Array Members: Minimum number of 1 item. Maximum number of 20 items. public let noteText: [StringFilter]? - /// The timestamp of when the note was updated. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) Array Members: Minimum number of 1 item. Maximum number of 20 items. + /// The timestamp of when the note was updated. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. Array Members: Minimum number of 1 item. Maximum number of 20 items. public let noteUpdatedAt: [DateFilter]? /// The principal that created a note. Array Members: Minimum number of 1 item. Maximum number of 20 items. public let noteUpdatedBy: [StringFilter]? @@ -1285,9 +1273,7 @@ extension SecurityHub { public let title: [StringFilter]? /// One or more finding types in the format of namespace/category/classifier that classify a finding. For a list of namespaces, classifiers, and categories, see Types taxonomy for ASFF in the Security Hub User Guide. Array Members: Minimum number of 1 item. Maximum number of 20 items. public let type: [StringFilter]? - /// A timestamp that indicates when the finding record was most recently updated. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) Array Members: Minimum number of 1 item. Maximum number of 20 items. + /// A timestamp that indicates when the finding record was most recently updated. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. Array Members: Minimum number of 1 item. Maximum number of 20 items. public let updatedAt: [DateFilter]? /// A list of user-defined name and value string pairs added to a finding. Array Members: Minimum number of 1 item. Maximum number of 20 items. public let userDefinedFields: [MapFilter]? @@ -1492,9 +1478,7 @@ extension SecurityHub { } public struct AutomationRulesMetadata: AWSDecodableShape { - /// A timestamp that indicates when the rule was created. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// A timestamp that indicates when the rule was created. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. @OptionalCustomCoding public var createdAt: Date? /// The principal that created a rule. @@ -1511,9 +1495,7 @@ extension SecurityHub { public let ruleOrder: Int? /// Whether the rule is active after it is created. If this parameter is equal to ENABLED, Security Hub starts applying the rule to findings and finding updates after the rule is created. To change the value of this parameter after creating a rule, use BatchUpdateAutomationRules . public let ruleStatus: RuleStatus? - /// A timestamp that indicates when the rule was most recently updated. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// A timestamp that indicates when the rule was most recently updated. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. @OptionalCustomCoding public var updatedAt: Date? @@ -1871,13 +1853,9 @@ extension SecurityHub { public let callerType: String? /// Provided if CallerType is domain. Provides information about the DNS domain that the API call originated from. public let domainDetails: AwsApiCallActionDomainDetails? - /// A timestamp that indicates when the API call was first observed. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// A timestamp that indicates when the API call was first observed. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let firstSeen: String? - /// A timestamp that indicates when the API call was most recently observed. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// A timestamp that indicates when the API call was most recently observed. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let lastSeen: String? /// Provided if CallerType is remoteip. Provides information about the remote IP address that the API call originated from. public let remoteIpDetails: ActionRemoteIpDetails? @@ -2087,9 +2065,7 @@ extension SecurityHub { public let apiKeySource: String? /// The list of binary media types supported by the REST API. public let binaryMediaTypes: [String]? - /// Indicates when the API was created. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// Indicates when the API was created. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let createdDate: String? /// A description of the REST API. public let description: String? @@ -2156,9 +2132,7 @@ extension SecurityHub { public let canarySettings: AwsApiGatewayCanarySettings? /// The identifier of the client certificate for the stage. public let clientCertificateId: String? - /// Indicates when the stage was created. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// Indicates when the stage was created. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let createdDate: String? /// The identifier of the deployment that the stage points to. public let deploymentId: String? @@ -2166,9 +2140,7 @@ extension SecurityHub { public let description: String? /// The version of the API documentation that is associated with the stage. public let documentationVersion: String? - /// Indicates when the stage was most recently updated. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// Indicates when the stage was most recently updated. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let lastUpdatedDate: String? /// Defines the method settings for the stage. public let methodSettings: [AwsApiGatewayMethodSettings]? @@ -2252,9 +2224,7 @@ extension SecurityHub { public let apiKeySelectionExpression: String? /// A cross-origin resource sharing (CORS) configuration. Supported only for HTTP APIs. public let corsConfiguration: AwsCorsConfiguration? - /// Indicates when the API was created. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// Indicates when the API was created. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let createdDate: String? /// A description of the API. public let description: String? @@ -2351,9 +2321,7 @@ extension SecurityHub { public let autoDeploy: Bool? /// The identifier of a client certificate for a stage. Supported only for WebSocket API calls. public let clientCertificateId: String? - /// Indicates when the stage was created. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// Indicates when the stage was created. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let createdDate: String? /// Default route settings for the stage. public let defaultRouteSettings: AwsApiGatewayV2RouteSettings? @@ -2363,9 +2331,7 @@ extension SecurityHub { public let description: String? /// The status of the last deployment of a stage. Supported only if the stage has automatic deployment enabled. public let lastDeploymentStatusMessage: String? - /// Indicates when the stage was most recently updated. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// Indicates when the stage was most recently updated. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let lastUpdatedDate: String? /// The route settings for the stage. public let routeSettings: AwsApiGatewayV2RouteSettings? @@ -2765,9 +2731,7 @@ extension SecurityHub { public let availabilityZones: [AwsAutoScalingAutoScalingGroupAvailabilityZonesListDetails]? /// Indicates whether capacity rebalancing is enabled. public let capacityRebalance: Bool? - /// Indicates when the auto scaling group was created. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// Indicates when the auto scaling group was created. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let createdTime: String? /// The amount of time, in seconds, that Amazon EC2 Auto Scaling waits before it checks the health status of an EC2 instance that has come into service. public let healthCheckGracePeriod: Int? @@ -3069,9 +3033,7 @@ extension SecurityHub { public let classicLinkVpcId: String? /// The identifiers of one or more security groups for the VPC that is specified in ClassicLinkVPCId. public let classicLinkVpcSecurityGroups: [String]? - /// The creation date and time for the launch configuration. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// The creation date and time for the launch configuration. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let createdTime: String? /// Whether the launch configuration is optimized for Amazon EBS I/O. public let ebsOptimized: Bool? @@ -3667,9 +3629,7 @@ extension SecurityHub { public struct AwsCertificateManagerCertificateDetails: AWSEncodableShape & AWSDecodableShape { /// The ARN of the private certificate authority (CA) that will be used to issue the certificate. public let certificateAuthorityArn: String? - /// Indicates when the certificate was requested. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// Indicates when the certificate was requested. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let createdAt: String? /// The fully qualified domain name (FQDN), such as www.example.com, that is secured by the certificate. public let domainName: String? @@ -3679,15 +3639,11 @@ extension SecurityHub { public let extendedKeyUsages: [AwsCertificateManagerCertificateExtendedKeyUsage]? /// For a failed certificate request, the reason for the failure. Valid values: NO_AVAILABLE_CONTACTS | ADDITIONAL_VERIFICATION_REQUIRED | DOMAIN_NOT_ALLOWED | INVALID_PUBLIC_DOMAIN | DOMAIN_VALIDATION_DENIED | CAA_ERROR | PCA_LIMIT_EXCEEDED | PCA_INVALID_ARN | PCA_INVALID_STATE | PCA_REQUEST_FAILED | PCA_NAME_CONSTRAINTS_VALIDATION | PCA_RESOURCE_NOT_FOUND | PCA_INVALID_ARGS | PCA_INVALID_DURATION | PCA_ACCESS_DENIED | SLR_NOT_FOUND | OTHER public let failureReason: String? - /// Indicates when the certificate was imported. Provided if the certificate type is IMPORTED. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// Indicates when the certificate was imported. Provided if the certificate type is IMPORTED. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let importedAt: String? /// The list of ARNs for the Amazon Web Services resources that use the certificate. public let inUseBy: [String]? - /// Indicates when the certificate was issued. Provided if the certificate type is AMAZON_ISSUED. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// Indicates when the certificate was issued. Provided if the certificate type is AMAZON_ISSUED. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let issuedAt: String? /// The name of the certificate authority that issued and signed the certificate. public let issuer: String? @@ -3695,13 +3651,9 @@ extension SecurityHub { public let keyAlgorithm: String? /// A list of key usage X.509 v3 extension objects. public let keyUsages: [AwsCertificateManagerCertificateKeyUsage]? - /// The time after which the certificate becomes invalid. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// The time after which the certificate becomes invalid. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let notAfter: String? - /// The time before which the certificate is not valid. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// The time before which the certificate is not valid. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let notBefore: String? /// Provides a value that specifies whether to add the certificate to a transparency log. public let options: AwsCertificateManagerCertificateOptions? @@ -3923,9 +3875,7 @@ extension SecurityHub { public let renewalStatus: String? /// The reason that a renewal request was unsuccessful. This attribute is used only when RenewalStatus is FAILED. Valid values: NO_AVAILABLE_CONTACTS | ADDITIONAL_VERIFICATION_REQUIRED | DOMAIN_NOT_ALLOWED | INVALID_PUBLIC_DOMAIN | DOMAIN_VALIDATION_DENIED | CAA_ERROR | PCA_LIMIT_EXCEEDED | PCA_INVALID_ARN | PCA_INVALID_STATE | PCA_REQUEST_FAILED | PCA_NAME_CONSTRAINTS_VALIDATION | PCA_RESOURCE_NOT_FOUND | PCA_INVALID_ARGS | PCA_INVALID_DURATION | PCA_ACCESS_DENIED | SLR_NOT_FOUND | OTHER public let renewalStatusReason: String? - /// Indicates when the renewal summary was last updated. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// Indicates when the renewal summary was last updated. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let updatedAt: String? @inlinable @@ -4188,9 +4138,7 @@ extension SecurityHub { public let domainName: String? /// The entity tag is a hash of the object. public let eTag: String? - /// Indicates when that the distribution was last modified. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// Indicates when that the distribution was last modified. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let lastModifiedTime: String? /// A complex type that controls whether access logs are written for the distribution. public let logging: AwsCloudFrontDistributionLogging? @@ -5464,9 +5412,7 @@ extension SecurityHub { public struct AwsDynamoDbTableBillingModeSummary: AWSEncodableShape & AWSDecodableShape { /// The method used to charge for read and write throughput and to manage capacity. public let billingMode: String? - /// If the billing mode is PAY_PER_REQUEST, indicates when the billing mode was set to that value. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// If the billing mode is PAY_PER_REQUEST, indicates when the billing mode was set to that value. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let lastUpdateToPayPerRequestDateTime: String? @inlinable @@ -5491,9 +5437,7 @@ extension SecurityHub { public let attributeDefinitions: [AwsDynamoDbTableAttributeDefinition]? /// Information about the billing for read/write capacity on the table. public let billingModeSummary: AwsDynamoDbTableBillingModeSummary? - /// Indicates when the table was created. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// Indicates when the table was created. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let creationDateTime: String? /// Indicates whether deletion protection is to be enabled (true) or disabled (false) on the table. public let deletionProtectionEnabled: Bool? @@ -5749,13 +5693,9 @@ extension SecurityHub { } public struct AwsDynamoDbTableProvisionedThroughput: AWSEncodableShape & AWSDecodableShape { - /// Indicates when the provisioned throughput was last decreased. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// Indicates when the provisioned throughput was last decreased. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let lastDecreaseDateTime: String? - /// Indicates when the provisioned throughput was last increased. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// Indicates when the provisioned throughput was last increased. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let lastIncreaseDateTime: String? /// The number of times during the current UTC calendar day that the provisioned throughput was decreased. public let numberOfDecreasesToday: Int? @@ -5868,9 +5808,7 @@ extension SecurityHub { } public struct AwsDynamoDbTableRestoreSummary: AWSEncodableShape & AWSDecodableShape { - /// Indicates the point in time that the table was restored to. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// Indicates the point in time that the table was restored to. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let restoreDateTime: String? /// Whether a restore is currently in progress. public let restoreInProgress: Bool? @@ -5902,9 +5840,7 @@ extension SecurityHub { } public struct AwsDynamoDbTableSseDescription: AWSEncodableShape & AWSDecodableShape { - /// If the key is inaccessible, the date and time when DynamoDB detected that the key was inaccessible. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// If the key is inaccessible, the date and time when DynamoDB detected that the key was inaccessible. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let inaccessibleEncryptionDateTime: String? /// The ARN of the KMS key that is used for the KMS encryption. public let kmsMasterKeyArn: String? @@ -6319,9 +6255,7 @@ extension SecurityHub { public let ipV6Addresses: [String]? /// The key name associated with the instance. public let keyName: String? - /// Indicates when the instance was launched. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// Indicates when the instance was launched. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let launchedAt: String? /// Details about the metadata options for the Amazon EC2 instance. public let metadataOptions: AwsEc2InstanceMetadataOptions? @@ -7729,9 +7663,7 @@ extension SecurityHub { public struct AwsEc2NetworkInterfaceAttachment: AWSEncodableShape & AWSDecodableShape { /// The identifier of the network interface attachment public let attachmentId: String? - /// Indicates when the attachment initiated. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// Indicates when the attachment initiated. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let attachTime: String? /// Indicates whether the network interface is deleted when the instance is terminated. public let deleteOnTermination: Bool? @@ -8328,9 +8260,7 @@ extension SecurityHub { public struct AwsEc2VolumeDetails: AWSEncodableShape & AWSDecodableShape { /// The volume attachments. public let attachments: [AwsEc2VolumeAttachment]? - /// Indicates when the volume was created. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// Indicates when the volume was created. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let createTime: String? /// The device name for the volume that is attached to the instance. public let deviceName: String? @@ -8857,9 +8787,7 @@ extension SecurityHub { public let acceptedRouteCount: Int? /// The ARN of the VPN tunnel endpoint certificate. public let certificateArn: String? - /// The date and time of the last change in status. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// The date and time of the last change in status. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let lastStatusChange: String? /// The Internet-routable IP address of the virtual private gateway's outside interface. public let outsideIpAddress: String? @@ -8901,9 +8829,7 @@ extension SecurityHub { public let architecture: String? /// The sha256 digest of the image manifest. public let imageDigest: String? - /// The date and time when the image was pushed to the repository. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// The date and time when the image was pushed to the repository. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let imagePublishedAt: String? /// The list of tags that are associated with the image. public let imageTags: [String]? @@ -11837,9 +11763,7 @@ extension SecurityHub { public let canonicalHostedZoneName: String? /// The ID of the Amazon Route 53 hosted zone for the load balancer. public let canonicalHostedZoneNameID: String? - /// Indicates when the load balancer was created. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// Indicates when the load balancer was created. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let createdTime: String? /// The DNS name of the load balancer. public let dnsName: String? @@ -12138,9 +12062,7 @@ extension SecurityHub { public let availabilityZones: [AvailabilityZone]? /// The ID of the Amazon Route 53 hosted zone associated with the load balancer. public let canonicalHostedZoneId: String? - /// Indicates when the load balancer was created. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// Indicates when the load balancer was created. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let createdTime: String? /// The public DNS name of the load balancer. public let dnsName: String? @@ -12731,9 +12653,7 @@ extension SecurityHub { public let accessKeyId: String? /// The Amazon Web Services account ID of the account for the key. public let accountId: String? - /// Indicates when the IAM access key was created. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// Indicates when the IAM access key was created. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let createdAt: String? /// The ID of the principal associated with an access key. public let principalId: String? @@ -12823,9 +12743,7 @@ extension SecurityHub { } public struct AwsIamAccessKeySessionContextAttributes: AWSEncodableShape & AWSDecodableShape { - /// Indicates when the session was created. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// Indicates when the session was created. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let creationDate: String? /// Indicates whether the session used multi-factor authentication (MFA). public let mfaAuthenticated: Bool? @@ -12910,9 +12828,7 @@ extension SecurityHub { public struct AwsIamGroupDetails: AWSEncodableShape & AWSDecodableShape { /// A list of the managed policies that are attached to the IAM group. public let attachedManagedPolicies: [AwsIamAttachedManagedPolicy]? - /// Indicates when the IAM group was created. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// Indicates when the IAM group was created. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let createDate: String? /// The identifier of the IAM group. public let groupId: String? @@ -12977,9 +12893,7 @@ extension SecurityHub { public struct AwsIamInstanceProfile: AWSEncodableShape & AWSDecodableShape { /// The ARN of the instance profile. public let arn: String? - /// Indicates when the instance profile was created. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// Indicates when the instance profile was created. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let createDate: String? /// The identifier of the instance profile. public let instanceProfileId: String? @@ -13026,9 +12940,7 @@ extension SecurityHub { public let arn: String? /// The policy that grants an entity permission to assume the role. public let assumeRolePolicyDocument: String? - /// Indicates when the role was created. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// Indicates when the role was created. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let createDate: String? /// The path to the role. public let path: String? @@ -13094,9 +13006,7 @@ extension SecurityHub { public struct AwsIamPolicyDetails: AWSEncodableShape & AWSDecodableShape { /// The number of users, groups, and roles that the policy is attached to. public let attachmentCount: Int? - /// When the policy was created. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// When the policy was created. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let createDate: String? /// The identifier of the default version of the policy. public let defaultVersionId: String? @@ -13114,9 +13024,7 @@ extension SecurityHub { public let policyName: String? /// List of versions of the policy. public let policyVersionList: [AwsIamPolicyVersion]? - /// When the policy was most recently updated. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// When the policy was most recently updated. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let updateDate: String? @inlinable @@ -13163,9 +13071,7 @@ extension SecurityHub { } public struct AwsIamPolicyVersion: AWSEncodableShape & AWSDecodableShape { - /// Indicates when the version was created. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// Indicates when the version was created. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let createDate: String? /// Whether the version is the default version. public let isDefaultVersion: Bool? @@ -13196,9 +13102,7 @@ extension SecurityHub { public let assumeRolePolicyDocument: String? /// The list of the managed policies that are attached to the role. public let attachedManagedPolicies: [AwsIamAttachedManagedPolicy]? - /// Indicates when the role was created. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// Indicates when the role was created. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let createDate: String? /// The list of instance profiles that contain this role. public let instanceProfileList: [AwsIamInstanceProfile]? @@ -13283,9 +13187,7 @@ extension SecurityHub { public struct AwsIamUserDetails: AWSEncodableShape & AWSDecodableShape { /// A list of the managed policies that are attached to the user. public let attachedManagedPolicies: [AwsIamAttachedManagedPolicy]? - /// Indicates when the user was created. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// Indicates when the user was created. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let createDate: String? /// A list of IAM groups that the user belongs to. public let groupList: [String]? @@ -13423,9 +13325,7 @@ extension SecurityHub { public struct AwsKmsKeyDetails: AWSEncodableShape & AWSDecodableShape { /// The twelve-digit account ID of the Amazon Web Services account that owns the KMS key. public let awsAccountId: String? - /// Indicates when the KMS key was created. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// Indicates when the KMS key was created. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let creationDate: Double? /// A description of the KMS key. public let description: String? @@ -13542,9 +13442,7 @@ extension SecurityHub { public let handler: String? /// The KMS key that is used to encrypt the function's environment variables. This key is only returned if you've configured a customer managed customer managed key. public let kmsKeyArn: String? - /// Indicates when the function was last updated. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// Indicates when the function was last updated. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let lastModified: String? /// The function's layers. public let layers: [AwsLambdaFunctionLayer]? @@ -13766,9 +13664,7 @@ extension SecurityHub { public struct AwsLambdaLayerVersionDetails: AWSEncodableShape & AWSDecodableShape { /// The layer's compatible function runtimes. The following list includes deprecated runtimes. For more information, see Runtime deprecation policy in the Lambda Developer Guide. Array Members: Maximum number of 5 items. Valid Values: nodejs | nodejs4.3 | nodejs6.10 | nodejs8.10 | nodejs10.x | nodejs12.x | nodejs14.x | nodejs16.x | java8 | java8.al2 | java11 | python2.7 | python3.6 | python3.7 | python3.8 | python3.9 | dotnetcore1.0 | dotnetcore2.0 | dotnetcore2.1 | dotnetcore3.1 | dotnet6 | nodejs4.3-edge | go1.x | ruby2.5 | ruby2.7 | provided | provided.al2 | nodejs18.x | python3.10 | java17 | ruby3.2 | python3.11 | nodejs20.x | provided.al2023 | python3.12 | java21 public let compatibleRuntimes: [String]? - /// Indicates when the version was created. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// Indicates when the version was created. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let createdDate: String? /// The version number. public let version: Int64? @@ -14669,9 +14565,7 @@ extension SecurityHub { public let availabilityZones: [String]? /// The number of days for which automated backups are retained. public let backupRetentionPeriod: Int? - /// Indicates when the DB cluster was created, in Universal Coordinated Time (UTC). This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// Indicates when the DB cluster was created, in Universal Coordinated Time (UTC). For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let clusterCreateTime: String? /// Whether tags are copied from the DB cluster to snapshots of the DB cluster. public let copyTagsToSnapshot: Bool? @@ -14952,9 +14846,7 @@ extension SecurityHub { public let allocatedStorage: Int? /// A list of Availability Zones where instances in the DB cluster can be created. public let availabilityZones: [String]? - /// Indicates when the DB cluster was created, in Universal Coordinated Time (UTC). This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// Indicates when the DB cluster was created, in Universal Coordinated Time (UTC). For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let clusterCreateTime: String? /// The DB cluster identifier. public let dbClusterIdentifier: String? @@ -14978,9 +14870,7 @@ extension SecurityHub { public let percentProgress: Int? /// The port number on which the DB instances in the DB cluster accept connections. public let port: Int? - /// Indicates when the snapshot was taken. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// Indicates when the snapshot was taken. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let snapshotCreateTime: String? /// The type of DB cluster snapshot. public let snapshotType: String? @@ -15172,17 +15062,13 @@ extension SecurityHub { public let enhancedMonitoringResourceArn: String? /// True if mapping of IAM accounts to database accounts is enabled, and otherwise false. IAM database authentication can be enabled for the following database engines. For MySQL 5.6, minor version 5.6.34 or higher For MySQL 5.7, minor version 5.7.16 or higher Aurora 5.6 or higher public let iamDatabaseAuthenticationEnabled: Bool? - /// Indicates when the DB instance was created. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// Indicates when the DB instance was created. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let instanceCreateTime: String? /// Specifies the provisioned IOPS (I/O operations per second) for this DB instance. public let iops: Int? /// If StorageEncrypted is true, the KMS key identifier for the encrypted DB instance. public let kmsKeyId: String? - /// Specifies the latest time to which a database can be restored with point-in-time restore. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// Specifies the latest time to which a database can be restored with point-in-time restore. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let latestRestorableTime: String? /// License model information for this DB instance. public let licenseModel: String? @@ -16024,9 +15910,7 @@ extension SecurityHub { public let sourceType: String? /// The status of the event notification subscription. Valid values: creating | modifying | deleting | active | no-permission | topic-not-exist public let status: String? - /// The datetime when the event notification subscription was created. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// The datetime when the event notification subscription was created. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let subscriptionCreationTime: String? @inlinable @@ -16241,15 +16125,11 @@ extension SecurityHub { } public struct AwsRedshiftClusterDeferredMaintenanceWindow: AWSEncodableShape & AWSDecodableShape { - /// The end of the time window for which maintenance was deferred. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// The end of the time window for which maintenance was deferred. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let deferMaintenanceEndTime: String? /// The identifier of the maintenance window. public let deferMaintenanceIdentifier: String? - /// The start of the time window for which maintenance was deferred. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// The start of the time window for which maintenance was deferred. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let deferMaintenanceStartTime: String? @inlinable @@ -16281,9 +16161,7 @@ extension SecurityHub { public let availabilityZone: String? /// The availability status of the cluster for queries. Possible values are the following: Available - The cluster is available for queries. Unavailable - The cluster is not available for queries. Maintenance - The cluster is intermittently available for queries due to maintenance activities. Modifying -The cluster is intermittently available for queries due to changes that modify the cluster. Failed - The cluster failed and is not available for queries. public let clusterAvailabilityStatus: String? - /// Indicates when the cluster was created. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// Indicates when the cluster was created. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let clusterCreateTime: String? /// The unique identifier of the cluster. public let clusterIdentifier: String? @@ -16319,9 +16197,7 @@ extension SecurityHub { public let endpoint: AwsRedshiftClusterEndpoint? /// Indicates whether to create the cluster with enhanced VPC routing enabled. public let enhancedVpcRouting: Bool? - /// Indicates when the next snapshot is expected to be taken. The cluster must have a valid snapshot schedule and have backups enabled. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// Indicates when the next snapshot is expected to be taken. The cluster must have a valid snapshot schedule and have backups enabled. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let expectedNextSnapshotScheduleTime: String? /// The status of the next expected snapshot. Valid values: OnTrack | Pending public let expectedNextSnapshotScheduleTimeStatus: String? @@ -16339,9 +16215,7 @@ extension SecurityHub { public let manualSnapshotRetentionPeriod: Int? /// The master user name for the cluster. This name is used to connect to the database that is specified in as the value of DBName. public let masterUsername: String? - /// Indicates the start of the next maintenance window. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// Indicates the start of the next maintenance window. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let nextMaintenanceWindowStartTime: String? /// The node type for the nodes in the cluster. public let nodeType: String? @@ -16619,13 +16493,9 @@ extension SecurityHub { public let bucketName: String? /// The message indicating that the logs failed to be delivered. public let lastFailureMessage: String? - /// The last time when logs failed to be delivered. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// The last time when logs failed to be delivered. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let lastFailureTime: String? - /// The last time that logs were delivered successfully. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// The last time that logs were delivered successfully. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let lastSuccessfulDeliveryTime: String? /// Indicates whether logging is enabled. public let loggingEnabled: Bool? @@ -17065,9 +16935,7 @@ extension SecurityHub { public struct AwsS3BucketBucketLifecycleConfigurationRulesDetails: AWSEncodableShape & AWSDecodableShape { /// How Amazon S3 responds when a multipart upload is incomplete. Specifically, provides a number of days before Amazon S3 cancels the entire upload. public let abortIncompleteMultipartUpload: AwsS3BucketBucketLifecycleConfigurationRulesAbortIncompleteMultipartUploadDetails? - /// The date when objects are moved or deleted. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// The date when objects are moved or deleted. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let expirationDate: String? /// The length in days of the lifetime for objects that are subject to the rule. public let expirationInDays: Int? @@ -17284,9 +17152,7 @@ extension SecurityHub { } public struct AwsS3BucketBucketLifecycleConfigurationRulesTransitionsDetails: AWSEncodableShape & AWSDecodableShape { - /// A date on which to transition objects to the specified storage class. If you provide Date, you cannot provide Days. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// A date on which to transition objects to the specified storage class. If you provide Date, you cannot provide Days. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let date: String? /// The number of days after which to transition the object to the specified storage class. If you provide Days, you cannot provide Date. public let days: Int? @@ -17347,9 +17213,7 @@ extension SecurityHub { public let bucketVersioningConfiguration: AwsS3BucketBucketVersioningConfiguration? /// The website configuration parameters for the S3 bucket. public let bucketWebsiteConfiguration: AwsS3BucketWebsiteConfiguration? - /// Indicates when the S3 bucket was created. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// Indicates when the S3 bucket was created. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let createdAt: String? /// The name of the bucket. public let name: String? @@ -17832,9 +17696,7 @@ extension SecurityHub { public let contentType: String? /// The opaque identifier assigned by a web server to a specific version of a resource found at a URL. public let eTag: String? - /// Indicates when the object was last modified. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// Indicates when the object was last modified. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let lastModified: String? /// If the object is stored using server-side encryption, the value of the server-side encryption algorithm used when storing this object in Amazon S3. public let serverSideEncryption: String? @@ -17875,11 +17737,11 @@ extension SecurityHub { public struct AwsSageMakerNotebookInstanceDetails: AWSEncodableShape & AWSDecodableShape { /// A list of Amazon Elastic Inference instance types to associate with the notebook instance. Currently, only one instance type can be associated with a notebook instance. public let acceleratorTypes: [String]? - /// An array of up to three Git repositories associated with the notebook instance. These can be either the names of Git repositories stored as resources in your account, or the URL of Git repositories in CodeCommit or in any other Git repository. These repositories are cloned at the same level as the default repository of your notebook instance. For more information, see Associating Git repositories with SageMaker notebook instances in the Amazon SageMaker Developer Guide. + /// An array of up to three Git repositories associated with the notebook instance. These can be either the names of Git repositories stored as resources in your account, or the URL of Git repositories in CodeCommit or in any other Git repository. These repositories are cloned at the same level as the default repository of your notebook instance. For more information, see Associating Git repositories with SageMaker AI notebook instances in the Amazon SageMaker AI Developer Guide. public let additionalCodeRepositories: [String]? - /// The Git repository associated with the notebook instance as its default code repository. This can be either the name of a Git repository stored as a resource in your account, or the URL of a Git repository in CodeCommit or in any other Git repository. When you open a notebook instance, it opens in the directory that contains this repository. For more information, see Associating Git repositories with SageMaker notebook instances in the Amazon SageMaker Developer Guide. + /// The Git repository associated with the notebook instance as its default code repository. This can be either the name of a Git repository stored as a resource in your account, or the URL of a Git repository in CodeCommit or in any other Git repository. When you open a notebook instance, it opens in the directory that contains this repository. For more information, see Associating Git repositories with SageMaker AI notebook instances in the Amazon SageMaker AI Developer Guide. public let defaultCodeRepository: String? - /// Sets whether SageMaker provides internet access to the notebook instance. If you set this to Disabled, this notebook instance is able to access resources only in your VPC, and is not be able to connect to SageMaker training and endpoint services unless you configure a Network Address Translation (NAT) Gateway in your VPC. + /// Sets whether SageMaker AI provides internet access to the notebook instance. If you set this to Disabled, this notebook instance is able to access resources only in your VPC, and is not be able to connect to SageMaker AI training and endpoint services unless you configure a Network Address Translation (NAT) Gateway in your VPC. public let directInternetAccess: String? /// If status of the instance is Failed, the reason it failed. public let failureReason: String? @@ -17887,9 +17749,9 @@ extension SecurityHub { public let instanceMetadataServiceConfiguration: AwsSageMakerNotebookInstanceMetadataServiceConfigurationDetails? /// The type of machine learning (ML) compute instance to launch for the notebook instance. public let instanceType: String? - /// The Amazon Resource Name (ARN) of an Key Management Service (KMS) key that SageMaker uses to encrypt data on the storage volume attached to your notebook instance. The KMS key you provide must be enabled. For information, see Enabling and disabling keys in the Key Management Service Developer Guide. + /// The Amazon Resource Name (ARN) of an Key Management Service (KMS) key that SageMaker AI uses to encrypt data on the storage volume attached to your notebook instance. The KMS key you provide must be enabled. For information, see Enabling and disabling keys in the Key Management Service Developer Guide. public let kmsKeyId: String? - /// The network interface ID that SageMaker created when the instance was created. + /// The network interface ID that SageMaker AI created when the instance was created. public let networkInterfaceId: String? /// The Amazon Resource Name (ARN) of the notebook instance. public let notebookInstanceArn: String? @@ -18084,22 +17946,18 @@ extension SecurityHub { public let compliance: Compliance? /// A finding's confidence. Confidence is defined as the likelihood that a finding accurately identifies the behavior or issue that it was intended to identify. Confidence is scored on a 0-100 basis using a ratio scale, where 0 means zero percent confidence and 100 means 100 percent confidence. public let confidence: Int? - /// Indicates when the security findings provider created the potential security issue that a finding captured. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// Indicates when the security findings provider created the potential security issue that a finding captured. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let createdAt: String? /// The level of importance assigned to the resources associated with the finding. A score of 0 means that the underlying resources have no criticality, and a score of 100 is reserved for the most critical resources. public let criticality: Int? /// A finding's description. Description is a required property. Length Constraints: Minimum length of 1. Maximum length of 1024. public let description: String? /// Provides details about an Amazon GuardDuty Extended Threat Detection attack sequence. GuardDuty generates an attack sequence finding when multiple events align to a potentially suspicious activity. To receive GuardDuty attack sequence findings in Security Hub, you - /// must have GuardDuty and GuardDuty S3 Protection enabled. For more information, see GuardDuty Extended Threat Detection in the Amazon GuardDuty User Guide. + /// must have GuardDuty enabled. For more information, see GuardDuty Extended Threat Detection in the Amazon GuardDuty User Guide. public let detection: Detection? /// In a BatchImportFindings request, finding providers use FindingProviderFields to provide and update their own values for confidence, criticality, related findings, severity, and types. public let findingProviderFields: FindingProviderFields? - /// Indicates when the security findings provider first observed the potential security issue that a finding captured. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// Indicates when the security findings provider first observed the potential security issue that a finding captured. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let firstObservedAt: String? /// Provides metadata for the Amazon CodeGuru detector associated with a finding. This field pertains to /// findings that relate to Lambda functions. Amazon Inspector identifies policy violations and @@ -18110,9 +17968,7 @@ extension SecurityHub { public let generatorId: String? /// The security findings provider-specific identifier for a finding. Length Constraints: Minimum length of 1. Maximum length of 512. public let id: String? - /// Indicates when the security findings provider most recently observed the potential security issue that a finding captured. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// Indicates when the security findings provider most recently observed a change in the resource that is involved in the finding. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let lastObservedAt: String? /// A list of malware related to a finding. Array Members: Maximum number of 5 items. public let malware: [Malware]? @@ -18126,9 +17982,7 @@ extension SecurityHub { public let patchSummary: PatchSummary? /// The details of process-related information about a finding. public let process: ProcessDetails? - /// A timestamp that indicates when Security Hub received a finding and begins to process it. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// A timestamp that indicates when Security Hub received a finding and begins to process it. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let processedAt: String? /// The ARN generated by Security Hub that uniquely identifies a product that generates findings. This can be the ARN for a third-party product that is integrated with Security Hub, or the ARN for a custom integration. Length Constraints: Minimum length of 12. Maximum length of 2048. public let productArn: String? @@ -18162,9 +18016,7 @@ extension SecurityHub { public let title: String? /// One or more finding types in the format of namespace/category/classifier that classify a finding. Valid namespace values are: Software and Configuration Checks | TTPs | Effects | Unusual Behaviors | Sensitive Data Identifications Array Members: Maximum number of 50 items. public let types: [String]? - /// Indicates when the security findings provider last updated the finding record. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// Indicates when the security findings provider last updated the finding record. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let updatedAt: String? /// A list of name/value string pairs associated with the finding. These are custom, user-defined fields added to a finding. Can contain up to 50 key-value pairs. For each key-value pair, the key can contain up to 128 characters, and the value can contain up to 1024 characters. public let userDefinedFields: [String: String]? @@ -18357,9 +18209,7 @@ extension SecurityHub { public let complianceStatus: [StringFilter]? /// A finding's confidence. Confidence is defined as the likelihood that a finding accurately identifies the behavior or issue that it was intended to identify. Confidence is scored on a 0-100 basis using a ratio scale, where 0 means zero percent confidence and 100 means 100 percent confidence. public let confidence: [NumberFilter]? - /// A timestamp that indicates when the security findings provider created the potential security issue that a finding reflects. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// A timestamp that indicates when the security findings provider created the potential security issue that a finding reflects. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let createdAt: [DateFilter]? /// The level of importance assigned to the resources associated with the finding. A score of 0 means that the underlying resources have no criticality, and a score of 100 is reserved for the most critical resources. public let criticality: [NumberFilter]? @@ -18379,9 +18229,7 @@ extension SecurityHub { public let findingProviderFieldsSeverityOriginal: [StringFilter]? /// One or more finding types that the finding provider assigned to the finding. Uses the format of namespace/category/classifier that classify a finding. Valid namespace values are: Software and Configuration Checks | TTPs | Effects | Unusual Behaviors | Sensitive Data Identifications public let findingProviderFieldsTypes: [StringFilter]? - /// A timestamp that indicates when the security findings provider first observed the potential security issue that a finding captured. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// A timestamp that indicates when the security findings provider first observed the potential security issue that a finding captured. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let firstObservedAt: [DateFilter]? /// The identifier for the solution-specific component (a discrete unit of logic) that generated a finding. In various security findings providers' solutions, this generator can be called a rule, a check, a detector, a plugin, etc. public let generatorId: [StringFilter]? @@ -18389,9 +18237,7 @@ extension SecurityHub { public let id: [StringFilter]? /// A keyword for a finding. public let keyword: [KeywordFilter]? - /// A timestamp that indicates when the security findings provider most recently observed the potential security issue that a finding captured. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// A timestamp that indicates when the security findings provider most recently observed a change in the resource that is involved in the finding. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let lastObservedAt: [DateFilter]? /// The name of the malware that was observed. public let malwareName: [StringFilter]? @@ -18429,9 +18275,7 @@ extension SecurityHub { public let noteUpdatedAt: [DateFilter]? /// The principal that created a note. public let noteUpdatedBy: [StringFilter]? - /// A timestamp that identifies when the process was launched. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// A timestamp that identifies when the process was launched. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let processLaunchedAt: [DateFilter]? /// The name of the process. public let processName: [StringFilter]? @@ -18441,9 +18285,7 @@ extension SecurityHub { public let processPath: [StringFilter]? /// The process ID. public let processPid: [NumberFilter]? - /// A timestamp that identifies when the process was terminated. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// A timestamp that identifies when the process was terminated. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let processTerminatedAt: [DateFilter]? /// The ARN generated by Security Hub that uniquely identifies a third-party company (security findings provider) after this provider's product (solution that generates findings) is registered with Security Hub. public let productArn: [StringFilter]? @@ -18501,9 +18343,7 @@ extension SecurityHub { public let resourceContainerImageId: [StringFilter]? /// The name of the image related to a finding. public let resourceContainerImageName: [StringFilter]? - /// A timestamp that identifies when the container was started. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// A timestamp that identifies when the container was started. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let resourceContainerLaunchedAt: [DateFilter]? /// The name of the container related to a finding. public let resourceContainerName: [StringFilter]? @@ -18531,7 +18371,7 @@ extension SecurityHub { public let sourceUrl: [StringFilter]? /// The category of a threat intelligence indicator. public let threatIntelIndicatorCategory: [StringFilter]? - /// A timestamp that identifies the last observation of a threat intelligence indicator. + /// A timestamp that identifies the last observation of a threat intelligence indicator. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let threatIntelIndicatorLastObservedAt: [DateFilter]? /// The source of the threat intelligence. public let threatIntelIndicatorSource: [StringFilter]? @@ -18545,9 +18385,7 @@ extension SecurityHub { public let title: [StringFilter]? /// A finding type in the format of namespace/category/classifier that classifies a finding. public let type: [StringFilter]? - /// A timestamp that indicates when the security findings provider last updated the finding record. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// A timestamp that indicates when the security findings provider last updated the finding record. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let updatedAt: [DateFilter]? /// A list of name/value string pairs associated with the finding. These are custom, user-defined fields added to a finding. public let userDefinedFields: [MapFilter]? @@ -21519,9 +21357,7 @@ extension SecurityHub { public let imageId: String? /// The name of the container image related to a finding. public let imageName: String? - /// Indicates when the container started. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// Indicates when the container started. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let launchedAt: String? /// The name of the container related to a finding. public let name: String? @@ -22036,13 +21872,9 @@ extension SecurityHub { public struct DateFilter: AWSEncodableShape & AWSDecodableShape { /// A date range for the date filter. public let dateRange: DateRange? - /// A timestamp that provides the end date for the date filter. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// A timestamp that provides the end date for the date filter. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let end: String? - /// A timestamp that provides the start date for the date filter. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// A timestamp that provides the start date for the date filter. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let start: String? @inlinable @@ -22379,7 +22211,7 @@ extension SecurityHub { } public struct DescribeHubResponse: AWSDecodableShape { - /// Whether to automatically enable new controls when they are added to standards that are enabled. If set to true, then new controls for enabled standards are enabled automatically. If set to false, then new controls are not enabled. + /// Whether to automatically enable new controls when they are added to standards that are enabled. If set to true, then new controls for enabled standards are enabled automatically. If set to false, then new controls are not enabled. When you automatically enable new controls, you can interact with the controls in the console and programmatically immediately after release. However, automatically enabled controls have a temporary default status of DISABLED. It can take up to several days for Security Hub to process the control release and designate the control as ENABLED in your account. During the processing period, you can manually enable or disable a control, and Security Hub will maintain that designation regardless of whether you have AutoEnableControls set to true. public let autoEnableControls: Bool? /// Specifies whether the calling account has consolidated control findings turned on. If the value for this field is set to SECURITY_CONTROL, Security Hub generates a single finding for a control check even when the check applies to multiple enabled standards. If the value for this field is set to STANDARD_CONTROL, Security Hub generates separate findings for a control check when the check applies to multiple enabled standards. The value for this field in a member account matches the value in the administrator account. For accounts that aren't part of an organization, the default value of this field is SECURITY_CONTROL if you enabled Security Hub on or after February 23, 2023. public let controlFindingGenerator: ControlFindingGenerator? @@ -22933,9 +22765,7 @@ extension SecurityHub { public let updates: [FindingHistoryUpdate]? /// Identifies the source of the event that changed the finding. For example, an integrated Amazon Web Services service or third-party partner integration may call BatchImportFindings , or an Security Hub customer may call BatchUpdateFindings . public let updateSource: FindingHistoryUpdateSource? - /// A timestamp that indicates when Security Hub processed the updated finding record. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// A timestamp that indicates when Security Hub processed the updated finding record. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. @OptionalCustomCoding public var updateTime: Date? @@ -23459,9 +23289,7 @@ extension SecurityHub { } public struct GetFindingHistoryRequest: AWSEncodableShape { - /// An ISO 8601-formatted timestamp that indicates the end time of the requested finding history. If you provide values for both StartTime and EndTime, Security Hub returns finding history for the specified time period. If you provide a value for StartTime but not for EndTime, Security Hub returns finding history from the StartTime to the time at which the API is called. If you provide a value for EndTime but not for StartTime, Security Hub returns finding history from the CreatedAt timestamp of the finding to the EndTime. If you provide neither StartTime nor EndTime, Security Hub returns finding history from the CreatedAt timestamp of the finding to the time at which the API is called. In all of these scenarios, the response is limited to 100 results, and the maximum time period is limited to 90 days. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// An ISO 8601-formatted timestamp that indicates the end time of the requested finding history. If you provide values for both StartTime and EndTime, Security Hub returns finding history for the specified time period. If you provide a value for StartTime but not for EndTime, Security Hub returns finding history from the StartTime to the time at which the API is called. If you provide a value for EndTime but not for StartTime, Security Hub returns finding history from the CreatedAt timestamp of the finding to the EndTime. If you provide neither StartTime nor EndTime, Security Hub returns finding history from the CreatedAt timestamp of the finding to the time at which the API is called. In all of these scenarios, the response is limited to 100 results, and the maximum time period is limited to 90 days. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. @OptionalCustomCoding public var endTime: Date? public let findingIdentifier: AwsSecurityFindingIdentifier? @@ -23469,9 +23297,7 @@ extension SecurityHub { public let maxResults: Int? /// A token for pagination purposes. Provide NULL as the initial value. In subsequent requests, provide the token included in the response to get up to an additional 100 results of finding history. If you don’t provide NextToken, Security Hub returns up to 100 results of finding history for each request. public let nextToken: String? - /// A timestamp that indicates the start time of the requested finding history. If you provide values for both StartTime and EndTime, Security Hub returns finding history for the specified time period. If you provide a value for StartTime but not for EndTime, Security Hub returns finding history from the StartTime to the time at which the API is called. If you provide a value for EndTime but not for StartTime, Security Hub returns finding history from the CreatedAt timestamp of the finding to the EndTime. If you provide neither StartTime nor EndTime, Security Hub returns finding history from the CreatedAt timestamp of the finding to the time at which the API is called. In all of these scenarios, the response is limited to 100 results, and the maximum time period is limited to 90 days. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// A timestamp that indicates the start time of the requested finding history. If you provide values for both StartTime and EndTime, Security Hub returns finding history for the specified time period. If you provide a value for StartTime but not for EndTime, Security Hub returns finding history from the StartTime to the time at which the API is called. If you provide a value for EndTime but not for StartTime, Security Hub returns finding history from the CreatedAt timestamp of the finding to the EndTime. If you provide neither StartTime nor EndTime, Security Hub returns finding history from the CreatedAt timestamp of the finding to the time at which the API is called. In all of these scenarios, the response is limited to 100 results, and the maximum time period is limited to 90 days. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. @OptionalCustomCoding public var startTime: Date? @@ -25055,9 +24881,7 @@ extension SecurityHub { public struct Note: AWSEncodableShape & AWSDecodableShape { /// The text of a note. Length Constraints: Minimum of 1. Maximum of 512. public let text: String? - /// A timestamp that indicates when the note was updated. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// A timestamp that indicates when the note was updated. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let updatedAt: String? /// The principal that created a note. public let updatedBy: String? @@ -25279,13 +25103,9 @@ extension SecurityHub { public let missingCount: Int? /// The type of patch operation performed. For Patch Manager, the values are SCAN and INSTALL. Length Constraints: Minimum length of 1. Maximum length of 256. public let operation: String? - /// Indicates when the operation completed. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// Indicates when the operation completed. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let operationEndTime: String? - /// Indicates when the operation started. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// Indicates when the operation started. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let operationStartTime: String? /// The reboot option specified for the instance. Length Constraints: Minimum length of 1. Maximum length of 256. public let rebootOption: String? @@ -25417,9 +25237,7 @@ extension SecurityHub { } public struct ProcessDetails: AWSEncodableShape & AWSDecodableShape { - /// Indicates when the process was launched. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// Indicates when the process was launched. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let launchedAt: String? /// The name of the process. Length Constraints: Minimum of 1. Maximum of 64. public let name: String? @@ -25429,9 +25247,7 @@ extension SecurityHub { public let path: String? /// The process ID. public let pid: Int? - /// Indicates when the process was terminated. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// Indicates when the process was terminated. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let terminatedAt: String? @inlinable @@ -27209,7 +27025,7 @@ extension SecurityHub { public let productArn: String? /// The ARN or ID of the Amazon Web Services resource associated with the signal. public let resourceIds: [String]? - /// The severity associated with the signal. For more information about severity, see Findings severity levels in the Amazon GuardDuty User Guide. + /// The severity associated with the signal. For more information about severity, see Severity levels for GuardDuty findings in the Amazon GuardDuty User Guide. public let severity: Double? /// Contains information about the indicators associated with the signals in this attack sequence finding. The values for SignalIndicators are a subset of the values for SequenceIndicators, but the values for these fields don't always match 1:1. public let signalIndicators: [Indicator]? @@ -28010,9 +27826,7 @@ extension SecurityHub { public struct ThreatIntelIndicator: AWSEncodableShape & AWSDecodableShape { /// The category of a threat intelligence indicator. public let category: ThreatIntelIndicatorCategory? - /// Indicates when the most recent instance of a threat intelligence indicator was observed. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// Indicates when the most recent instance of a threat intelligence indicator was observed. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let lastObservedAt: String? /// The source of the threat intelligence indicator. Length Constraints: Minimum of 1 length. Maximum of 64 length. public let source: String? @@ -28577,7 +28391,7 @@ extension SecurityHub { } public struct UpdateSecurityHubConfigurationRequest: AWSEncodableShape { - /// Whether to automatically enable new controls when they are added to standards that are enabled. By default, this is set to true, and new controls are enabled automatically. To not automatically enable new controls, set this to false. + /// Whether to automatically enable new controls when they are added to standards that are enabled. By default, this is set to true, and new controls are enabled automatically. To not automatically enable new controls, set this to false. When you automatically enable new controls, you can interact with the controls in the console and programmatically immediately after release. However, automatically enabled controls have a temporary default status of DISABLED. It can take up to several days for Security Hub to process the control release and designate the control as ENABLED in your account. During the processing period, you can manually enable or disable a control, and Security Hub will maintain that designation regardless of whether you have AutoEnableControls set to true. public let autoEnableControls: Bool? /// Updates whether the calling account has consolidated control findings turned on. If the value for this field is set to SECURITY_CONTROL, Security Hub generates a single finding for a control check even when the check applies to multiple enabled standards. If the value for this field is set to STANDARD_CONTROL, Security Hub generates separate findings for a control check when the check applies to multiple enabled standards. For accounts that are part of an organization, this value can only be updated in the administrator account. public let controlFindingGenerator: ControlFindingGenerator? @@ -28856,15 +28670,11 @@ extension SecurityHub { public let name: String? /// The URL of the vulnerability advisory. public let url: String? - /// Indicates when the vulnerability advisory was created. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// Indicates when the vulnerability advisory was created. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let vendorCreatedAt: String? /// The severity that the vendor assigned to the vulnerability. public let vendorSeverity: String? - /// Indicates when the vulnerability advisory was last updated. This field accepts only the specified formats. Timestamps - /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited - /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) + /// Indicates when the vulnerability advisory was last updated. For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps. public let vendorUpdatedAt: String? @inlinable diff --git a/Sources/Soto/Services/SsmSap/SsmSap_api.swift b/Sources/Soto/Services/SsmSap/SsmSap_api.swift index 5f96af885d..90f80d1dc4 100644 --- a/Sources/Soto/Services/SsmSap/SsmSap_api.swift +++ b/Sources/Soto/Services/SsmSap/SsmSap_api.swift @@ -586,6 +586,7 @@ public struct SsmSap: AWSService { /// Parameters: /// - applicationId: The ID of the application. /// - applicationType: The type of the application. + /// - componentsInfo: This is an optional parameter for component details to which the SAP ABAP application is attached, such as Web Dispatcher. This is an array of ApplicationComponent objects. You may input 0 to 5 items. /// - credentials: The credentials of the SAP application. /// - databaseArn: The Amazon Resource Name of the SAP HANA database. /// - instances: The Amazon EC2 instances on which your SAP application is running. @@ -597,6 +598,7 @@ public struct SsmSap: AWSService { public func registerApplication( applicationId: String, applicationType: ApplicationType, + componentsInfo: [ComponentInfo]? = nil, credentials: [ApplicationCredential]? = nil, databaseArn: String? = nil, instances: [String], @@ -608,6 +610,7 @@ public struct SsmSap: AWSService { let input = RegisterApplicationInput( applicationId: applicationId, applicationType: applicationType, + componentsInfo: componentsInfo, credentials: credentials, databaseArn: databaseArn, instances: instances, diff --git a/Sources/Soto/Services/SsmSap/SsmSap_shapes.swift b/Sources/Soto/Services/SsmSap/SsmSap_shapes.swift index 4e20d98a8b..6f6fb0a39e 100644 --- a/Sources/Soto/Services/SsmSap/SsmSap_shapes.swift +++ b/Sources/Soto/Services/SsmSap/SsmSap_shapes.swift @@ -451,6 +451,33 @@ extension SsmSap { } } + public struct ComponentInfo: AWSEncodableShape { + /// This string is the type of the component. Accepted value is WD. + public let componentType: ComponentType + /// This is the Amazon EC2 instance on which your SAP component is running. Accepted values are alphanumeric. + public let ec2InstanceId: String + /// This string is the SAP System ID of the component. Accepted values are alphanumeric. + public let sid: String + + @inlinable + public init(componentType: ComponentType, ec2InstanceId: String, sid: String) { + self.componentType = componentType + self.ec2InstanceId = ec2InstanceId + self.sid = sid + } + + public func validate(name: String) throws { + try self.validate(self.ec2InstanceId, name: "ec2InstanceId", parent: name, pattern: "^i-[\\w\\d]{8}$|^i-[\\w\\d]{17}$") + try self.validate(self.sid, name: "sid", parent: name, pattern: "^[A-Z][A-Z0-9]{2}$") + } + + private enum CodingKeys: String, CodingKey { + case componentType = "ComponentType" + case ec2InstanceId = "Ec2InstanceId" + case sid = "Sid" + } + } + public struct ComponentSummary: AWSDecodableShape { /// The ID of the application. public let applicationId: String? @@ -1392,6 +1419,8 @@ extension SsmSap { public let applicationId: String /// The type of the application. public let applicationType: ApplicationType + /// This is an optional parameter for component details to which the SAP ABAP application is attached, such as Web Dispatcher. This is an array of ApplicationComponent objects. You may input 0 to 5 items. + public let componentsInfo: [ComponentInfo]? /// The credentials of the SAP application. public let credentials: [ApplicationCredential]? /// The Amazon Resource Name of the SAP HANA database. @@ -1406,9 +1435,10 @@ extension SsmSap { public let tags: [String: String]? @inlinable - public init(applicationId: String, applicationType: ApplicationType, credentials: [ApplicationCredential]? = nil, databaseArn: String? = nil, instances: [String], sapInstanceNumber: String? = nil, sid: String? = nil, tags: [String: String]? = nil) { + public init(applicationId: String, applicationType: ApplicationType, componentsInfo: [ComponentInfo]? = nil, credentials: [ApplicationCredential]? = nil, databaseArn: String? = nil, instances: [String], sapInstanceNumber: String? = nil, sid: String? = nil, tags: [String: String]? = nil) { self.applicationId = applicationId self.applicationType = applicationType + self.componentsInfo = componentsInfo self.credentials = credentials self.databaseArn = databaseArn self.instances = instances @@ -1421,6 +1451,10 @@ extension SsmSap { try self.validate(self.applicationId, name: "applicationId", parent: name, max: 60) try self.validate(self.applicationId, name: "applicationId", parent: name, min: 1) try self.validate(self.applicationId, name: "applicationId", parent: name, pattern: "^[\\w\\d\\.-]+$") + try self.componentsInfo?.forEach { + try $0.validate(name: "\(name).componentsInfo[]") + } + try self.validate(self.componentsInfo, name: "componentsInfo", parent: name, max: 5) try self.credentials?.forEach { try $0.validate(name: "\(name).credentials[]") } @@ -1443,6 +1477,7 @@ extension SsmSap { private enum CodingKeys: String, CodingKey { case applicationId = "ApplicationId" case applicationType = "ApplicationType" + case componentsInfo = "ComponentsInfo" case credentials = "Credentials" case databaseArn = "DatabaseArn" case instances = "Instances" diff --git a/Sources/Soto/Services/Transfer/Transfer_api.swift b/Sources/Soto/Services/Transfer/Transfer_api.swift index 2fb1c33cd4..f9a86fae42 100644 --- a/Sources/Soto/Services/Transfer/Transfer_api.swift +++ b/Sources/Soto/Services/Transfer/Transfer_api.swift @@ -163,8 +163,10 @@ public struct Transfer: AWSService { /// - accessRole: Connectors are used to send files using either the AS2 or SFTP protocol. For the access role, provide the Amazon Resource Name (ARN) of the Identity and Access Management role to use. For AS2 connectors With AS2, you can send files by calling StartFileTransfer and specifying the file paths in the request parameter, SendFilePaths. We use the file’s parent directory (for example, for --send-file-paths /bucket/dir/file.txt, parent directory is /bucket/dir/) to temporarily store a processed AS2 message file, store the MDN when we receive them from the partner, and write a final JSON file containing relevant metadata of the transmission. So, the AccessRole needs to provide read and write access to the parent directory of the file location used in the StartFileTransfer request. Additionally, you need to provide read and write access to the parent directory of the files that you intend to send with StartFileTransfer. If you are using Basic authentication for your AS2 connector, the access role requires the secretsmanager:GetSecretValue permission for the secret. If the secret is encrypted using a customer-managed key instead of the Amazon Web Services managed key in Secrets Manager, then the role also needs the kms:Decrypt permission for that key. For SFTP connectors Make sure that the access role provides read and write access to the parent directory of the file location that's used in the StartFileTransfer request. Additionally, make sure that the role provides secretsmanager:GetSecretValue permission to Secrets Manager. /// - baseDirectory: The landing directory (folder) for files transferred by using the AS2 protocol. A BaseDirectory example is /amzn-s3-demo-bucket/home/mydirectory. /// - description: A name or short description to identify the agreement. + /// - enforceMessageSigning: Determines whether or not unsigned messages from your trading partners will be accepted. ENABLED: Transfer Family rejects unsigned messages from your trading partner. DISABLED (default value): Transfer Family accepts unsigned messages from your trading partner. /// - localProfileId: A unique identifier for the AS2 local profile. /// - partnerProfileId: A unique identifier for the partner profile used in the agreement. + /// - preserveFilename: Determines whether or not Transfer Family appends a unique string of characters to the end of the AS2 message payload filename when saving it. ENABLED: the filename provided by your trading parter is preserved when the file is saved. DISABLED (default value): when Transfer Family saves the file, the filename is adjusted, as described in File names and locations. /// - serverId: A system-assigned unique identifier for a server instance. This is the specific server that the agreement uses. /// - status: The status of the agreement. The agreement can be either ACTIVE or INACTIVE. /// - tags: Key-value pairs that can be used to group and search for agreements. @@ -174,8 +176,10 @@ public struct Transfer: AWSService { accessRole: String, baseDirectory: String, description: String? = nil, + enforceMessageSigning: EnforceMessageSigningType? = nil, localProfileId: String, partnerProfileId: String, + preserveFilename: PreserveFilenameType? = nil, serverId: String, status: AgreementStatusType? = nil, tags: [Tag]? = nil, @@ -185,8 +189,10 @@ public struct Transfer: AWSService { accessRole: accessRole, baseDirectory: baseDirectory, description: description, + enforceMessageSigning: enforceMessageSigning, localProfileId: localProfileId, partnerProfileId: partnerProfileId, + preserveFilename: preserveFilename, serverId: serverId, status: status, tags: tags @@ -2223,8 +2229,10 @@ public struct Transfer: AWSService { /// - agreementId: A unique identifier for the agreement. This identifier is returned when you create an agreement. /// - baseDirectory: To change the landing directory (folder) for files that are transferred, provide the bucket folder that you want to use; for example, /amzn-s3-demo-bucket/home/mydirectory . /// - description: To replace the existing description, provide a short description for the agreement. + /// - enforceMessageSigning: Determines whether or not unsigned messages from your trading partners will be accepted. ENABLED: Transfer Family rejects unsigned messages from your trading partner. DISABLED (default value): Transfer Family accepts unsigned messages from your trading partner. /// - localProfileId: A unique identifier for the AS2 local profile. To change the local profile identifier, provide a new value here. /// - partnerProfileId: A unique identifier for the partner profile. To change the partner profile identifier, provide a new value here. + /// - preserveFilename: Determines whether or not Transfer Family appends a unique string of characters to the end of the AS2 message payload filename when saving it. ENABLED: the filename provided by your trading parter is preserved when the file is saved. DISABLED (default value): when Transfer Family saves the file, the filename is adjusted, as described in File names and locations. /// - serverId: A system-assigned unique identifier for a server instance. This is the specific server that the agreement uses. /// - status: You can update the status for the agreement, either activating an inactive agreement or the reverse. /// - logger: Logger use during operation @@ -2234,8 +2242,10 @@ public struct Transfer: AWSService { agreementId: String, baseDirectory: String? = nil, description: String? = nil, + enforceMessageSigning: EnforceMessageSigningType? = nil, localProfileId: String? = nil, partnerProfileId: String? = nil, + preserveFilename: PreserveFilenameType? = nil, serverId: String, status: AgreementStatusType? = nil, logger: Logger = AWSClient.loggingDisabled @@ -2245,8 +2255,10 @@ public struct Transfer: AWSService { agreementId: agreementId, baseDirectory: baseDirectory, description: description, + enforceMessageSigning: enforceMessageSigning, localProfileId: localProfileId, partnerProfileId: partnerProfileId, + preserveFilename: preserveFilename, serverId: serverId, status: status ) diff --git a/Sources/Soto/Services/Transfer/Transfer_shapes.swift b/Sources/Soto/Services/Transfer/Transfer_shapes.swift index 384dc042a4..06c530572a 100644 --- a/Sources/Soto/Services/Transfer/Transfer_shapes.swift +++ b/Sources/Soto/Services/Transfer/Transfer_shapes.swift @@ -102,6 +102,12 @@ extension Transfer { public var description: String { return self.rawValue } } + public enum EnforceMessageSigningType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case disabled = "DISABLED" + case enabled = "ENABLED" + public var description: String { return self.rawValue } + } + public enum ExecutionErrorType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case alreadyExists = "ALREADY_EXISTS" case badRequest = "BAD_REQUEST" @@ -164,6 +170,18 @@ extension Transfer { public var description: String { return self.rawValue } } + public enum PreserveContentType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case disabled = "DISABLED" + case enabled = "ENABLED" + public var description: String { return self.rawValue } + } + + public enum PreserveFilenameType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case disabled = "DISABLED" + case enabled = "ENABLED" + public var description: String { return self.rawValue } + } + public enum ProfileType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case local = "LOCAL" case partner = "PARTNER" @@ -266,11 +284,13 @@ extension Transfer { public let messageSubject: String? /// A unique identifier for the partner profile for the connector. public let partnerProfileId: String? + /// Allows you to use the Amazon S3 Content-Type that is associated with objects in S3 instead of having the content type mapped based on the file extension. This parameter is enabled by default when you create an AS2 connector from the console, but disabled by default when you create an AS2 connector by calling the API directly. + public let preserveContentType: PreserveContentType? /// The algorithm that is used to sign the AS2 messages sent with the connector. public let signingAlgorithm: SigningAlg? @inlinable - public init(basicAuthSecretId: String? = nil, compression: CompressionEnum? = nil, encryptionAlgorithm: EncryptionAlg? = nil, localProfileId: String? = nil, mdnResponse: MdnResponse? = nil, mdnSigningAlgorithm: MdnSigningAlg? = nil, messageSubject: String? = nil, partnerProfileId: String? = nil, signingAlgorithm: SigningAlg? = nil) { + public init(basicAuthSecretId: String? = nil, compression: CompressionEnum? = nil, encryptionAlgorithm: EncryptionAlg? = nil, localProfileId: String? = nil, mdnResponse: MdnResponse? = nil, mdnSigningAlgorithm: MdnSigningAlg? = nil, messageSubject: String? = nil, partnerProfileId: String? = nil, preserveContentType: PreserveContentType? = nil, signingAlgorithm: SigningAlg? = nil) { self.basicAuthSecretId = basicAuthSecretId self.compression = compression self.encryptionAlgorithm = encryptionAlgorithm @@ -279,6 +299,7 @@ extension Transfer { self.mdnSigningAlgorithm = mdnSigningAlgorithm self.messageSubject = messageSubject self.partnerProfileId = partnerProfileId + self.preserveContentType = preserveContentType self.signingAlgorithm = signingAlgorithm } @@ -304,6 +325,7 @@ extension Transfer { case mdnSigningAlgorithm = "MdnSigningAlgorithm" case messageSubject = "MessageSubject" case partnerProfileId = "PartnerProfileId" + case preserveContentType = "PreserveContentType" case signingAlgorithm = "SigningAlgorithm" } } @@ -455,10 +477,14 @@ extension Transfer { public let baseDirectory: String /// A name or short description to identify the agreement. public let description: String? + /// Determines whether or not unsigned messages from your trading partners will be accepted. ENABLED: Transfer Family rejects unsigned messages from your trading partner. DISABLED (default value): Transfer Family accepts unsigned messages from your trading partner. + public let enforceMessageSigning: EnforceMessageSigningType? /// A unique identifier for the AS2 local profile. public let localProfileId: String /// A unique identifier for the partner profile used in the agreement. public let partnerProfileId: String + /// Determines whether or not Transfer Family appends a unique string of characters to the end of the AS2 message payload filename when saving it. ENABLED: the filename provided by your trading parter is preserved when the file is saved. DISABLED (default value): when Transfer Family saves the file, the filename is adjusted, as described in File names and locations. + public let preserveFilename: PreserveFilenameType? /// A system-assigned unique identifier for a server instance. This is the specific server that the agreement uses. public let serverId: String /// The status of the agreement. The agreement can be either ACTIVE or INACTIVE. @@ -467,12 +493,14 @@ extension Transfer { public let tags: [Tag]? @inlinable - public init(accessRole: String, baseDirectory: String, description: String? = nil, localProfileId: String, partnerProfileId: String, serverId: String, status: AgreementStatusType? = nil, tags: [Tag]? = nil) { + public init(accessRole: String, baseDirectory: String, description: String? = nil, enforceMessageSigning: EnforceMessageSigningType? = nil, localProfileId: String, partnerProfileId: String, preserveFilename: PreserveFilenameType? = nil, serverId: String, status: AgreementStatusType? = nil, tags: [Tag]? = nil) { self.accessRole = accessRole self.baseDirectory = baseDirectory self.description = description + self.enforceMessageSigning = enforceMessageSigning self.localProfileId = localProfileId self.partnerProfileId = partnerProfileId + self.preserveFilename = preserveFilename self.serverId = serverId self.status = status self.tags = tags @@ -507,8 +535,10 @@ extension Transfer { case accessRole = "AccessRole" case baseDirectory = "BaseDirectory" case description = "Description" + case enforceMessageSigning = "EnforceMessageSigning" case localProfileId = "LocalProfileId" case partnerProfileId = "PartnerProfileId" + case preserveFilename = "PreserveFilename" case serverId = "ServerId" case status = "Status" case tags = "Tags" @@ -1902,10 +1932,14 @@ extension Transfer { public let baseDirectory: String? /// The name or short description that's used to identify the agreement. public let description: String? + /// Determines whether or not unsigned messages from your trading partners will be accepted. ENABLED: Transfer Family rejects unsigned messages from your trading partner. DISABLED (default value): Transfer Family accepts unsigned messages from your trading partner. + public let enforceMessageSigning: EnforceMessageSigningType? /// A unique identifier for the AS2 local profile. public let localProfileId: String? /// A unique identifier for the partner profile used in the agreement. public let partnerProfileId: String? + /// Determines whether or not Transfer Family appends a unique string of characters to the end of the AS2 message payload filename when saving it. ENABLED: the filename provided by your trading parter is preserved when the file is saved. DISABLED (default value): when Transfer Family saves the file, the filename is adjusted, as described in File names and locations. + public let preserveFilename: PreserveFilenameType? /// A system-assigned unique identifier for a server instance. This identifier indicates the specific server that the agreement uses. public let serverId: String? /// The current status of the agreement, either ACTIVE or INACTIVE. @@ -1914,14 +1948,16 @@ extension Transfer { public let tags: [Tag]? @inlinable - public init(accessRole: String? = nil, agreementId: String? = nil, arn: String, baseDirectory: String? = nil, description: String? = nil, localProfileId: String? = nil, partnerProfileId: String? = nil, serverId: String? = nil, status: AgreementStatusType? = nil, tags: [Tag]? = nil) { + public init(accessRole: String? = nil, agreementId: String? = nil, arn: String, baseDirectory: String? = nil, description: String? = nil, enforceMessageSigning: EnforceMessageSigningType? = nil, localProfileId: String? = nil, partnerProfileId: String? = nil, preserveFilename: PreserveFilenameType? = nil, serverId: String? = nil, status: AgreementStatusType? = nil, tags: [Tag]? = nil) { self.accessRole = accessRole self.agreementId = agreementId self.arn = arn self.baseDirectory = baseDirectory self.description = description + self.enforceMessageSigning = enforceMessageSigning self.localProfileId = localProfileId self.partnerProfileId = partnerProfileId + self.preserveFilename = preserveFilename self.serverId = serverId self.status = status self.tags = tags @@ -1933,8 +1969,10 @@ extension Transfer { case arn = "Arn" case baseDirectory = "BaseDirectory" case description = "Description" + case enforceMessageSigning = "EnforceMessageSigning" case localProfileId = "LocalProfileId" case partnerProfileId = "PartnerProfileId" + case preserveFilename = "PreserveFilename" case serverId = "ServerId" case status = "Status" case tags = "Tags" @@ -1962,7 +2000,7 @@ extension Transfer { public let notBeforeDate: Date? /// The serial number for the certificate. public let serial: String? - /// The certificate can be either ACTIVE, PENDING_ROTATION, or INACTIVE. PENDING_ROTATION means that this certificate will replace the current certificate when it expires. + /// Currently, the only available status is ACTIVE: all other values are reserved for future use. public let status: CertificateStatusType? /// Key-value pairs that can be used to group and search for certificates. public let tags: [Tag]? @@ -4727,23 +4765,29 @@ extension Transfer { public let baseDirectory: String? /// To replace the existing description, provide a short description for the agreement. public let description: String? + /// Determines whether or not unsigned messages from your trading partners will be accepted. ENABLED: Transfer Family rejects unsigned messages from your trading partner. DISABLED (default value): Transfer Family accepts unsigned messages from your trading partner. + public let enforceMessageSigning: EnforceMessageSigningType? /// A unique identifier for the AS2 local profile. To change the local profile identifier, provide a new value here. public let localProfileId: String? /// A unique identifier for the partner profile. To change the partner profile identifier, provide a new value here. public let partnerProfileId: String? + /// Determines whether or not Transfer Family appends a unique string of characters to the end of the AS2 message payload filename when saving it. ENABLED: the filename provided by your trading parter is preserved when the file is saved. DISABLED (default value): when Transfer Family saves the file, the filename is adjusted, as described in File names and locations. + public let preserveFilename: PreserveFilenameType? /// A system-assigned unique identifier for a server instance. This is the specific server that the agreement uses. public let serverId: String /// You can update the status for the agreement, either activating an inactive agreement or the reverse. public let status: AgreementStatusType? @inlinable - public init(accessRole: String? = nil, agreementId: String, baseDirectory: String? = nil, description: String? = nil, localProfileId: String? = nil, partnerProfileId: String? = nil, serverId: String, status: AgreementStatusType? = nil) { + public init(accessRole: String? = nil, agreementId: String, baseDirectory: String? = nil, description: String? = nil, enforceMessageSigning: EnforceMessageSigningType? = nil, localProfileId: String? = nil, partnerProfileId: String? = nil, preserveFilename: PreserveFilenameType? = nil, serverId: String, status: AgreementStatusType? = nil) { self.accessRole = accessRole self.agreementId = agreementId self.baseDirectory = baseDirectory self.description = description + self.enforceMessageSigning = enforceMessageSigning self.localProfileId = localProfileId self.partnerProfileId = partnerProfileId + self.preserveFilename = preserveFilename self.serverId = serverId self.status = status } @@ -4776,8 +4820,10 @@ extension Transfer { case agreementId = "AgreementId" case baseDirectory = "BaseDirectory" case description = "Description" + case enforceMessageSigning = "EnforceMessageSigning" case localProfileId = "LocalProfileId" case partnerProfileId = "PartnerProfileId" + case preserveFilename = "PreserveFilename" case serverId = "ServerId" case status = "Status" } diff --git a/Sources/Soto/Services/WorkSpaces/WorkSpaces_shapes.swift b/Sources/Soto/Services/WorkSpaces/WorkSpaces_shapes.swift index 9130888b89..3e8731b613 100644 --- a/Sources/Soto/Services/WorkSpaces/WorkSpaces_shapes.swift +++ b/Sources/Soto/Services/WorkSpaces/WorkSpaces_shapes.swift @@ -26,6 +26,32 @@ import Foundation extension WorkSpaces { // MARK: Enums + public enum AGAModeForDirectoryEnum: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case disabled = "DISABLED" + case enabledAuto = "ENABLED_AUTO" + public var description: String { return self.rawValue } + } + + public enum AGAModeForWorkSpaceEnum: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case disabled = "DISABLED" + case enabledAuto = "ENABLED_AUTO" + case inherited = "INHERITED" + public var description: String { return self.rawValue } + } + + public enum AGAPreferredProtocolForDirectory: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case none = "NONE" + case tcp = "TCP" + public var description: String { return self.rawValue } + } + + public enum AGAPreferredProtocolForWorkSpace: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case inherited = "INHERITED" + case none = "NONE" + case tcp = "TCP" + public var description: String { return self.rawValue } + } + public enum AccessPropertyValue: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case allow = "ALLOW" case deny = "DENY" @@ -3596,6 +3622,42 @@ extension WorkSpaces { } } + public struct GlobalAcceleratorForDirectory: AWSEncodableShape & AWSDecodableShape { + /// Indicates if Global Accelerator for directory is enabled or disabled. + public let mode: AGAModeForDirectoryEnum + /// Indicates the preferred protocol for Global Accelerator. + public let preferredProtocol: AGAPreferredProtocolForDirectory? + + @inlinable + public init(mode: AGAModeForDirectoryEnum, preferredProtocol: AGAPreferredProtocolForDirectory? = nil) { + self.mode = mode + self.preferredProtocol = preferredProtocol + } + + private enum CodingKeys: String, CodingKey { + case mode = "Mode" + case preferredProtocol = "PreferredProtocol" + } + } + + public struct GlobalAcceleratorForWorkSpace: AWSEncodableShape & AWSDecodableShape { + /// Indicates if Global Accelerator for WorkSpaces is enabled, disabled, or the same mode as the associated directory. + public let mode: AGAModeForWorkSpaceEnum + /// Indicates the preferred protocol for Global Accelerator. + public let preferredProtocol: AGAPreferredProtocolForWorkSpace? + + @inlinable + public init(mode: AGAModeForWorkSpaceEnum, preferredProtocol: AGAPreferredProtocolForWorkSpace? = nil) { + self.mode = mode + self.preferredProtocol = preferredProtocol + } + + private enum CodingKeys: String, CodingKey { + case mode = "Mode" + case preferredProtocol = "PreferredProtocol" + } + } + public struct IDCConfig: AWSDecodableShape { /// The Amazon Resource Name (ARN) of the application. public let applicationArn: String? @@ -5098,6 +5160,8 @@ extension WorkSpaces { } public struct StreamingProperties: AWSEncodableShape & AWSDecodableShape { + /// Indicates the Global Accelerator properties. + public let globalAccelerator: GlobalAcceleratorForDirectory? /// Indicates the storage connector used public let storageConnectors: [StorageConnector]? /// Indicates the type of preferred protocol for the streaming experience. @@ -5106,7 +5170,8 @@ extension WorkSpaces { public let userSettings: [UserSetting]? @inlinable - public init(storageConnectors: [StorageConnector]? = nil, streamingExperiencePreferredProtocol: StreamingExperiencePreferredProtocolEnum? = nil, userSettings: [UserSetting]? = nil) { + public init(globalAccelerator: GlobalAcceleratorForDirectory? = nil, storageConnectors: [StorageConnector]? = nil, streamingExperiencePreferredProtocol: StreamingExperiencePreferredProtocolEnum? = nil, userSettings: [UserSetting]? = nil) { + self.globalAccelerator = globalAccelerator self.storageConnectors = storageConnectors self.streamingExperiencePreferredProtocol = streamingExperiencePreferredProtocol self.userSettings = userSettings @@ -5121,6 +5186,7 @@ extension WorkSpaces { } private enum CodingKeys: String, CodingKey { + case globalAccelerator = "GlobalAccelerator" case storageConnectors = "StorageConnectors" case streamingExperiencePreferredProtocol = "StreamingExperiencePreferredProtocol" case userSettings = "UserSettings" @@ -6064,6 +6130,8 @@ extension WorkSpaces { public struct WorkspaceProperties: AWSEncodableShape & AWSDecodableShape { /// The compute type. For more information, see Amazon WorkSpaces Bundles. public let computeTypeName: Compute? + /// Indicates the Global Accelerator properties. + public let globalAccelerator: GlobalAcceleratorForWorkSpace? /// The name of the operating system. public let operatingSystemName: OperatingSystemName? /// The protocol. For more information, see Protocols for Amazon WorkSpaces. Only available for WorkSpaces created with PCoIP bundles. The Protocols property is case sensitive. Ensure you use PCOIP or DCV (formerly WSP). Unavailable for Windows 7 WorkSpaces and WorkSpaces using GPU-based bundles (Graphics, GraphicsPro, Graphics.g4dn, and GraphicsPro.g4dn). @@ -6078,8 +6146,9 @@ extension WorkSpaces { public let userVolumeSizeGib: Int? @inlinable - public init(computeTypeName: Compute? = nil, operatingSystemName: OperatingSystemName? = nil, protocols: [`Protocol`]? = nil, rootVolumeSizeGib: Int? = nil, runningMode: RunningMode? = nil, runningModeAutoStopTimeoutInMinutes: Int? = nil, userVolumeSizeGib: Int? = nil) { + public init(computeTypeName: Compute? = nil, globalAccelerator: GlobalAcceleratorForWorkSpace? = nil, operatingSystemName: OperatingSystemName? = nil, protocols: [`Protocol`]? = nil, rootVolumeSizeGib: Int? = nil, runningMode: RunningMode? = nil, runningModeAutoStopTimeoutInMinutes: Int? = nil, userVolumeSizeGib: Int? = nil) { self.computeTypeName = computeTypeName + self.globalAccelerator = globalAccelerator self.operatingSystemName = operatingSystemName self.protocols = protocols self.rootVolumeSizeGib = rootVolumeSizeGib @@ -6090,6 +6159,7 @@ extension WorkSpaces { private enum CodingKeys: String, CodingKey { case computeTypeName = "ComputeTypeName" + case globalAccelerator = "GlobalAccelerator" case operatingSystemName = "OperatingSystemName" case protocols = "Protocols" case rootVolumeSizeGib = "RootVolumeSizeGib" diff --git a/models/amplify.json b/models/amplify.json index 45ba331ec0..61be8fba94 100644 --- a/models/amplify.json +++ b/models/amplify.json @@ -1100,14 +1100,14 @@ "createTime": { "target": "com.amazonaws.amplify#CreateTime", "traits": { - "smithy.api#documentation": "

Creates a date and time for the Amplify app.

", + "smithy.api#documentation": "

A timestamp of when Amplify created the application.

", "smithy.api#required": {} } }, "updateTime": { "target": "com.amazonaws.amplify#UpdateTime", "traits": { - "smithy.api#documentation": "

Updates the date and time for the Amplify app.

", + "smithy.api#documentation": "

A timestamp of when Amplify updated the application.

", "smithy.api#required": {} } }, @@ -1210,6 +1210,18 @@ "traits": { "smithy.api#documentation": "

The cache configuration for the Amplify app. If you don't specify the\n cache configuration type, Amplify uses the default\n AMPLIFY_MANAGED setting.

" } + }, + "webhookCreateTime": { + "target": "com.amazonaws.amplify#webhookCreateTime", + "traits": { + "smithy.api#documentation": "

A timestamp of when Amplify created the webhook in your Git repository.

" + } + }, + "wafConfiguration": { + "target": "com.amazonaws.amplify#WafConfiguration", + "traits": { + "smithy.api#documentation": "

Describes the Firewall configuration for the Amplify app. Firewall support enables you to protect your hosted applications with a direct integration\n with WAF.

" + } } }, "traits": { @@ -1587,14 +1599,14 @@ "createTime": { "target": "com.amazonaws.amplify#CreateTime", "traits": { - "smithy.api#documentation": "

The creation date and time for a branch that is part of an Amplify app.

", + "smithy.api#documentation": "

A timestamp of when Amplify created the branch.

", "smithy.api#required": {} } }, "updateTime": { "target": "com.amazonaws.amplify#UpdateTime", "traits": { - "smithy.api#documentation": "

The last updated date and time for a branch that is part of an Amplify app.

", + "smithy.api#documentation": "

A timestamp for the last updated time for a branch.

", "smithy.api#required": {} } }, @@ -4169,6 +4181,12 @@ "com.amazonaws.amplify#JobStatus": { "type": "enum", "members": { + "CREATED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "CREATED" + } + }, "PENDING": { "target": "smithy.api#Unit", "traits": { @@ -4253,7 +4271,7 @@ "commitTime": { "target": "com.amazonaws.amplify#CommitTime", "traits": { - "smithy.api#documentation": "

The commit date and time for the job.

", + "smithy.api#documentation": "

The commit date and time for the job.

", "smithy.api#required": {} } }, @@ -6686,6 +6704,77 @@ "com.amazonaws.amplify#Verified": { "type": "boolean" }, + "com.amazonaws.amplify#WafConfiguration": { + "type": "structure", + "members": { + "webAclArn": { + "target": "com.amazonaws.amplify#WebAclArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) for the web ACL associated with an Amplify app.

" + } + }, + "wafStatus": { + "target": "com.amazonaws.amplify#WafStatus", + "traits": { + "smithy.api#documentation": "

The status of the process to associate or disassociate a web ACL to an Amplify app.

" + } + }, + "statusReason": { + "target": "com.amazonaws.amplify#StatusReason", + "traits": { + "smithy.api#documentation": "

The reason for the current status of the Firewall configuration.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Describes the Firewall configuration for a hosted Amplify application.\n Firewall support enables you to protect your web applications with a direct integration\n with WAF. For more information about using WAF protections for an Amplify application, see\n Firewall support for hosted sites in the Amplify\n User Guide.

" + } + }, + "com.amazonaws.amplify#WafStatus": { + "type": "enum", + "members": { + "ASSOCIATING": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ASSOCIATING" + } + }, + "ASSOCIATION_FAILED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ASSOCIATION_FAILED" + } + }, + "ASSOCIATION_SUCCESS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ASSOCIATION_SUCCESS" + } + }, + "DISASSOCIATING": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DISASSOCIATING" + } + }, + "DISASSOCIATION_FAILED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DISASSOCIATION_FAILED" + } + } + } + }, + "com.amazonaws.amplify#WebAclArn": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 512 + }, + "smithy.api#pattern": "^arn:aws:wafv2:" + } + }, "com.amazonaws.amplify#Webhook": { "type": "structure", "members": { @@ -6727,14 +6816,14 @@ "createTime": { "target": "com.amazonaws.amplify#CreateTime", "traits": { - "smithy.api#documentation": "

The create date and time for a webhook.

", + "smithy.api#documentation": "

A timestamp of when Amplify created the webhook in your Git repository.

", "smithy.api#required": {} } }, "updateTime": { "target": "com.amazonaws.amplify#UpdateTime", "traits": { - "smithy.api#documentation": "

Updates the date and time for a webhook.

", + "smithy.api#documentation": "

A timestamp of when Amplify updated the webhook in your Git repository.

", "smithy.api#required": {} } } @@ -6776,6 +6865,9 @@ "member": { "target": "com.amazonaws.amplify#Webhook" } + }, + "com.amazonaws.amplify#webhookCreateTime": { + "type": "timestamp" } } } diff --git a/models/appstream.json b/models/appstream.json index f60bb59d4d..11b0cb92d3 100644 --- a/models/appstream.json +++ b/models/appstream.json @@ -8186,6 +8186,12 @@ "traits": { "smithy.api#enumValue": "RHEL8" } + }, + "ROCKY_LINUX8": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ROCKY_LINUX8" + } } } }, diff --git a/models/bcm-pricing-calculator.json b/models/bcm-pricing-calculator.json index e1576c9781..5e283231c2 100644 --- a/models/bcm-pricing-calculator.json +++ b/models/bcm-pricing-calculator.json @@ -3737,6 +3737,9 @@ "target": "com.amazonaws.bcmpricingcalculator#DeleteBillEstimateResponse" }, "errors": [ + { + "target": "com.amazonaws.bcmpricingcalculator#ConflictException" + }, { "target": "com.amazonaws.bcmpricingcalculator#DataUnavailableException" } diff --git a/models/bedrock-agent-runtime.json b/models/bedrock-agent-runtime.json index 9cea6ed60e..2627baf336 100644 --- a/models/bedrock-agent-runtime.json +++ b/models/bedrock-agent-runtime.json @@ -225,7 +225,7 @@ "parentActionGroupSignature": { "target": "com.amazonaws.bedrockagentruntime#ActionGroupSignature", "traits": { - "smithy.api#documentation": "

\n To allow your agent to request the user for additional information when trying to complete a task, set this field to AMAZON.UserInput. \n You must leave the description, apiSchema, and actionGroupExecutor fields blank for this action group.\n

\n

To allow your agent to generate, run, and troubleshoot code when trying to complete a task, set this field to AMAZON.CodeInterpreter. You must \n leave the description, apiSchema, and actionGroupExecutor fields blank for this action group.

\n

During orchestration, if your agent determines that it needs to invoke an API in an action group, but doesn't have enough information to complete the API request,\n it will invoke this action group instead and return an Observation reprompting the user for more information.

" + "smithy.api#documentation": "

\n To allow your agent to request the user for additional information when trying to complete a task, set this field to AMAZON.UserInput. \n You must leave the description, apiSchema, and actionGroupExecutor fields blank for this action group.\n

\n

To allow your agent to generate, run, and troubleshoot code when trying to complete a task, set this field to AMAZON.CodeInterpreter. You must \n leave the description, apiSchema, and actionGroupExecutor fields blank for this action group.

\n

During orchestration, if your agent determines that it needs to invoke an API in an action group, but doesn't have enough information to complete the API request,\n it will invoke this action group instead and return an Observation reprompting the user for more information.

" } }, "actionGroupExecutor": { @@ -1279,10 +1279,10 @@ "smithy.api#documentation": "

Controls the API operations or functions to invoke based on the user confirmation.

" } }, - "responseBody": { - "target": "com.amazonaws.bedrockagentruntime#ResponseBody", + "responseState": { + "target": "com.amazonaws.bedrockagentruntime#ResponseState", "traits": { - "smithy.api#documentation": "

The response body from the API operation. The key of the object is the content type (currently, only TEXT is supported). The response may be returned directly or from the Lambda function.

" + "smithy.api#documentation": "

Controls the final response state returned to end user when API/Function execution failed. When this state is FAILURE, the request would fail with dependency failure exception. When this state is REPROMPT, the API/function response will be sent to model for re-prompt

" } }, "httpStatusCode": { @@ -1291,10 +1291,10 @@ "smithy.api#documentation": "

http status code from API execution response (for example: 200, 400, 500).

" } }, - "responseState": { - "target": "com.amazonaws.bedrockagentruntime#ResponseState", + "responseBody": { + "target": "com.amazonaws.bedrockagentruntime#ResponseBody", "traits": { - "smithy.api#documentation": "

Controls the final response state returned to end user when API/Function execution failed. When this state is FAILURE, the request would fail with dependency failure exception. When this state is REPROMPT, the API/function response will be sent to model for re-prompt

" + "smithy.api#documentation": "

The response body from the API operation. The key of the object is the content type (currently, only TEXT is supported). The response may be returned directly or from the Lambda function.

" } }, "agentId": { @@ -1390,6 +1390,20 @@ "smithy.api#pattern": "^(arn:aws(-[^:]+)?:(bedrock|sagemaker):[a-z0-9-]{1,20}:([0-9]{12})?:([a-z-]+/)?)?([a-z0-9.-]{1,63}){0,2}(([:][a-z0-9-]{1,63}){0,2})?(/[a-z0-9]{1,12})?$" } }, + "com.amazonaws.bedrockagentruntime#BedrockModelConfigurations": { + "type": "structure", + "members": { + "performanceConfig": { + "target": "com.amazonaws.bedrockagentruntime#PerformanceConfiguration", + "traits": { + "smithy.api#documentation": "

The performance configuration for the model.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Settings for a model called with InvokeAgent.

" + } + }, "com.amazonaws.bedrockagentruntime#BedrockRerankingConfiguration": { "type": "structure", "members": { @@ -1873,6 +1887,13 @@ "smithy.api#documentation": "

The unique identifier of the memory.

", "smithy.api#httpQuery": "memoryId" } + }, + "sessionId": { + "target": "com.amazonaws.bedrockagentruntime#SessionId", + "traits": { + "smithy.api#documentation": "

The unique session identifier of the memory.

", + "smithy.api#httpQuery": "sessionId" + } } }, "traits": { @@ -2004,6 +2025,12 @@ "traits": { "smithy.api#documentation": "

Additional model parameters and their corresponding values not included in the textInferenceConfig structure for an external source. Takes in custom model parameters specific to the language model being used.

" } + }, + "performanceConfig": { + "target": "com.amazonaws.bedrockagentruntime#PerformanceConfiguration", + "traits": { + "smithy.api#documentation": "

The latency configuration for the model.

" + } } }, "traits": { @@ -3098,6 +3125,12 @@ "traits": { "smithy.api#documentation": "

Additional model parameters and corresponding values not included in the textInferenceConfig structure for a knowledge base. This allows users to provide custom model parameters specific to the language model being used.

" } + }, + "performanceConfig": { + "target": "com.amazonaws.bedrockagentruntime#PerformanceConfiguration", + "traits": { + "smithy.api#documentation": "

The latency configuration for the model.

" + } } }, "traits": { @@ -4308,6 +4341,20 @@ "smithy.api#sensitive": {} } }, + "com.amazonaws.bedrockagentruntime#InlineBedrockModelConfigurations": { + "type": "structure", + "members": { + "performanceConfig": { + "target": "com.amazonaws.bedrockagentruntime#PerformanceConfiguration", + "traits": { + "smithy.api#documentation": "

The latency configuration for the model.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Settings for a model called with InvokeInlineAgent.

" + } + }, "com.amazonaws.bedrockagentruntime#InlineSessionState": { "type": "structure", "members": { @@ -4593,6 +4640,9 @@ { "target": "com.amazonaws.bedrockagentruntime#InternalServerException" }, + { + "target": "com.amazonaws.bedrockagentruntime#ModelNotReadyException" + }, { "target": "com.amazonaws.bedrockagentruntime#ResourceNotFoundException" }, @@ -4607,7 +4657,7 @@ } ], "traits": { - "smithy.api#documentation": "\n

The CLI doesn't support streaming operations in Amazon Bedrock, including InvokeAgent.

\n
\n

Sends a prompt for the agent to process and respond to. Note the following fields for the request:

\n
    \n
  • \n

    To continue the same conversation with an agent, use the same sessionId value in the request.

    \n
  • \n
  • \n

    To activate trace enablement, turn enableTrace to true. Trace enablement helps you follow the agent's reasoning process that led it to the information it processed, the actions it took, and the final result it yielded. For more information, see Trace enablement.

    \n
  • \n
  • \n

    End a conversation by setting endSession to true.

    \n
  • \n
  • \n

    In the sessionState object, you can include attributes for the session or prompt or, if you configured an action group to return control, results from invocation of the action group.

    \n
  • \n
\n

The response is returned in the bytes field of the chunk object.

\n
    \n
  • \n

    The attribution object contains citations for parts of the response.

    \n
  • \n
  • \n

    If you set enableTrace to true in the request, you can trace the agent's steps and reasoning process that led it to the response.

    \n
  • \n
  • \n

    If the action predicted was configured to return control, the response returns parameters for the action, elicited from the user, in the returnControl field.

    \n
  • \n
  • \n

    Errors are also surfaced in the response.

    \n
  • \n
", + "smithy.api#documentation": "\n

The CLI doesn't support streaming operations in Amazon Bedrock, including InvokeAgent.

\n
\n

Sends a prompt for the agent to process and respond to. Note the following fields for the request:

\n
    \n
  • \n

    To continue the same conversation with an agent, use the same sessionId value in the request.

    \n
  • \n
  • \n

    To activate trace enablement, turn enableTrace to true. Trace enablement helps you follow the agent's reasoning process that led it to the information it processed, the actions it took, and the final result it yielded. For more information, see Trace enablement.

    \n
  • \n
  • \n

    To stream agent responses, make sure that only orchestration prompt is enabled. Agent streaming is not supported for the following steps:\n

    \n
      \n
    • \n

      \n Pre-processing\n

      \n
    • \n
    • \n

      \n Post-processing\n

      \n
    • \n
    • \n

      Agent with 1 Knowledge base and User Input not enabled

      \n
    • \n
    \n
  • \n
  • \n

    End a conversation by setting endSession to true.

    \n
  • \n
  • \n

    In the sessionState object, you can include attributes for the session or prompt or, if you configured an action group to return control, results from invocation of the action group.

    \n
  • \n
\n

The response is returned in the bytes field of the chunk object.

\n
    \n
  • \n

    The attribution object contains citations for parts of the response.

    \n
  • \n
  • \n

    If you set enableTrace to true in the request, you can trace the agent's steps and reasoning process that led it to the response.

    \n
  • \n
  • \n

    If the action predicted was configured to return control, the response returns parameters for the action, elicited from the user, in the returnControl field.

    \n
  • \n
  • \n

    Errors are also surfaced in the response.

    \n
  • \n
", "smithy.api#http": { "code": 200, "method": "POST", @@ -4672,10 +4722,16 @@ "smithy.api#documentation": "

The unique identifier of the agent memory.

" } }, + "bedrockModelConfigurations": { + "target": "com.amazonaws.bedrockagentruntime#BedrockModelConfigurations", + "traits": { + "smithy.api#documentation": "

Model performance settings for the request.

" + } + }, "streamingConfigurations": { "target": "com.amazonaws.bedrockagentruntime#StreamingConfigurations", "traits": { - "smithy.api#documentation": "

\n Specifies the configurations for streaming.\n

" + "smithy.api#documentation": "

\n Specifies the configurations for streaming.\n

\n \n

To use agent streaming, you need permissions to perform the bedrock:InvokeModelWithResponseStream action.

\n
" } }, "sourceArn": { @@ -4806,6 +4862,12 @@ "traits": { "smithy.api#documentation": "

Specifies whether to return the trace for the flow or not. Traces track inputs and outputs for nodes in the flow. For more information, see Track each step in your prompt flow by viewing its trace in Amazon Bedrock.

" } + }, + "modelPerformanceConfiguration": { + "target": "com.amazonaws.bedrockagentruntime#ModelPerformanceConfiguration", + "traits": { + "smithy.api#documentation": "

Model performance settings for the request.

" + } } }, "traits": { @@ -4958,6 +5020,12 @@ "traits": { "smithy.api#documentation": "

\n Configurations for advanced prompts used to override the default prompts to enhance the accuracy of the inline agent.\n

" } + }, + "bedrockModelConfigurations": { + "target": "com.amazonaws.bedrockagentruntime#InlineBedrockModelConfigurations", + "traits": { + "smithy.api#documentation": "

Model settings for the request.

" + } } }, "traits": { @@ -5607,6 +5675,33 @@ "smithy.api#sensitive": {} } }, + "com.amazonaws.bedrockagentruntime#ModelNotReadyException": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.bedrockagentruntime#NonBlankString" + } + }, + "traits": { + "smithy.api#documentation": "

\n The model specified in the request is not ready to serve inference requests. The AWS SDK\n will automatically retry the operation up to 5 times. For information about configuring\n automatic retries, see Retry behavior in the AWS SDKs and Tools\n reference guide.\n

", + "smithy.api#error": "client", + "smithy.api#httpError": 424 + } + }, + "com.amazonaws.bedrockagentruntime#ModelPerformanceConfiguration": { + "type": "structure", + "members": { + "performanceConfig": { + "target": "com.amazonaws.bedrockagentruntime#PerformanceConfiguration", + "traits": { + "smithy.api#documentation": "

The latency configuration for the model.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The performance configuration for a model called with InvokeFlow.

" + } + }, "com.amazonaws.bedrockagentruntime#Name": { "type": "string", "traits": { @@ -5952,6 +6047,12 @@ "traits": { "smithy.api#documentation": "

To split up the prompt and retrieve multiple sources, set the transformation type to\n QUERY_DECOMPOSITION.

" } + }, + "performanceConfig": { + "target": "com.amazonaws.bedrockagentruntime#PerformanceConfiguration", + "traits": { + "smithy.api#documentation": "

The latency configuration for the model.

" + } } }, "traits": { @@ -6246,6 +6347,38 @@ } } }, + "com.amazonaws.bedrockagentruntime#PerformanceConfigLatency": { + "type": "enum", + "members": { + "STANDARD": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "standard" + } + }, + "OPTIMIZED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "optimized" + } + } + } + }, + "com.amazonaws.bedrockagentruntime#PerformanceConfiguration": { + "type": "structure", + "members": { + "latency": { + "target": "com.amazonaws.bedrockagentruntime#PerformanceConfigLatency", + "traits": { + "smithy.api#default": "standard", + "smithy.api#documentation": "

To use a latency-optimized version of the model, set to optimized.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Performance settings for a model.

" + } + }, "com.amazonaws.bedrockagentruntime#PostProcessingModelInvocationOutput": { "type": "structure", "members": { @@ -7251,6 +7384,12 @@ "smithy.api#documentation": "

There was an issue with a dependency due to a server issue. Retry your request.

" } }, + "modelNotReadyException": { + "target": "com.amazonaws.bedrockagentruntime#ModelNotReadyException", + "traits": { + "smithy.api#documentation": "

\n The model specified in the request is not ready to serve Inference requests. The AWS SDK\n will automatically retry the operation up to 5 times. For information about configuring\n automatic retries, see Retry behavior in the AWS SDKs and Tools\n reference guide.\n

" + } + }, "files": { "target": "com.amazonaws.bedrockagentruntime#FilePart", "traits": { @@ -8721,7 +8860,7 @@ } }, "traits": { - "smithy.api#documentation": "

\n Configurations for streaming.\n

" + "smithy.api#documentation": "

\n Configurations for streaming.

" } }, "com.amazonaws.bedrockagentruntime#SummaryText": { diff --git a/models/bedrock-agent.json b/models/bedrock-agent.json index e6601004b9..3a4a736889 100644 --- a/models/bedrock-agent.json +++ b/models/bedrock-agent.json @@ -11871,6 +11871,14 @@ "smithy.api#documentation": "

Details about a malformed input expression in a node.

" } }, + "com.amazonaws.bedrockagent#MaxRecentSessions": { + "type": "integer", + "traits": { + "smithy.api#range": { + "min": 1 + } + } + }, "com.amazonaws.bedrockagent#MaxResults": { "type": "integer", "traits": { @@ -11905,6 +11913,12 @@ "smithy.api#default": 30, "smithy.api#documentation": "

The number of days the agent is configured to retain the conversational context.

" } + }, + "sessionSummaryConfiguration": { + "target": "com.amazonaws.bedrockagent#SessionSummaryConfiguration", + "traits": { + "smithy.api#documentation": "

Contains the configuration for SESSION_SUMMARY memory type enabled for the agent.

" + } } }, "traits": { @@ -13571,6 +13585,12 @@ "traits": { "smithy.api#enumValue": "KNOWLEDGE_BASE_RESPONSE_GENERATION" } + }, + "MEMORY_SUMMARIZATION": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "MEMORY_SUMMARIZATION" + } } } }, @@ -14744,6 +14764,21 @@ "smithy.api#httpError": 402 } }, + "com.amazonaws.bedrockagent#SessionSummaryConfiguration": { + "type": "structure", + "members": { + "maxRecentSessions": { + "target": "com.amazonaws.bedrockagent#MaxRecentSessions", + "traits": { + "smithy.api#default": null, + "smithy.api#documentation": "

Maximum number of recent session summaries to include in the agent's prompt context.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Configuration for SESSION_SUMMARY memory type enabled for the agent.

" + } + }, "com.amazonaws.bedrockagent#SessionTTL": { "type": "integer", "traits": { @@ -14761,6 +14796,12 @@ "traits": { "smithy.api#enumValue": "OAUTH2_CLIENT_CREDENTIALS" } + }, + "OAUTH2_SHAREPOINT_APP_ONLY_CLIENT_CREDENTIALS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "OAUTH2_SHAREPOINT_APP_ONLY_CLIENT_CREDENTIALS" + } } } }, @@ -15200,7 +15241,7 @@ "smithy.api#default": 30, "smithy.api#range": { "min": 0, - "max": 30 + "max": 365 } } }, @@ -17332,6 +17373,16 @@ "smithy.api#documentation": "

The configuration of web URLs that you want to crawl. \n You should be authorized to crawl the URLs.

" } }, + "com.amazonaws.bedrockagent#UserAgent": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 15, + "max": 40 + }, + "smithy.api#sensitive": {} + } + }, "com.amazonaws.bedrockagent#ValidateFlowDefinition": { "type": "operation", "input": { @@ -17547,6 +17598,12 @@ "traits": { "smithy.api#documentation": "

The scope of what is crawled for your URLs.

\n

You can choose to crawl only web pages that belong to the same host or primary \n domain. For example, only web pages that contain the seed URL \n \"https://docs.aws.amazon.com/bedrock/latest/userguide/\" and no other domains. \n You can choose to include sub domains in addition to the host or primary domain. \n For example, web pages that contain \"aws.amazon.com\" can also include sub domain \n \"docs.aws.amazon.com\".

" } + }, + "userAgent": { + "target": "com.amazonaws.bedrockagent#UserAgent", + "traits": { + "smithy.api#documentation": "

A string used for identifying the crawler or a bot when it accesses a web server. By default, \n this is set to bedrockbot_UUID for your crawler. You can optionally append a custom \n string to bedrockbot_UUID to allowlist a specific user agent permitted to access your source URLs. \n

" + } } }, "traits": { @@ -17565,6 +17622,15 @@ "max": 300 } } + }, + "maxPages": { + "target": "smithy.api#Integer", + "traits": { + "smithy.api#documentation": "

\n The max number of web pages crawled from your source URLs, up to 25,000 pages. If \n the web pages exceed this limit, the data source sync will fail and no web pages will be ingested.\n

", + "smithy.api#range": { + "min": 1 + } + } } }, "traits": { diff --git a/models/bedrock-data-automation-runtime.json b/models/bedrock-data-automation-runtime.json index 9b1ef3fa06..e6257f5d67 100644 --- a/models/bedrock-data-automation-runtime.json +++ b/models/bedrock-data-automation-runtime.json @@ -33,7 +33,7 @@ "name": "bedrock" }, "aws.protocols#awsJson1_1": {}, - "smithy.api#documentation": "Amazon Bedrock Keystone Runtime", + "smithy.api#documentation": "Amazon Bedrock Data Automation Runtime", "smithy.api#title": "Runtime for Amazon Bedrock Data Automation", "smithy.rules#endpointRuleSet": { "version": "1.0", diff --git a/models/bedrock-data-automation.json b/models/bedrock-data-automation.json index 2c2c8ab2df..625c2b8853 100644 --- a/models/bedrock-data-automation.json +++ b/models/bedrock-data-automation.json @@ -41,7 +41,7 @@ "name": "bedrock" }, "aws.protocols#restJson1": {}, - "smithy.api#documentation": "Amazon Bedrock Keystone Build", + "smithy.api#documentation": "Amazon Bedrock Data Automation BuildTime", "smithy.api#title": "Data Automation for Amazon Bedrock", "smithy.rules#endpointRuleSet": { "version": "1.0", @@ -1148,7 +1148,7 @@ } ], "traits": { - "smithy.api#documentation": "Creates an Amazon Bedrock Keystone Blueprint", + "smithy.api#documentation": "Creates an Amazon Bedrock Data Automation Blueprint", "smithy.api#http": { "code": 201, "method": "PUT", @@ -1241,7 +1241,7 @@ } ], "traits": { - "smithy.api#documentation": "Creates a new version of an existing Amazon Bedrock Keystone Blueprint", + "smithy.api#documentation": "Creates a new version of an existing Amazon Bedrock Data Automation Blueprint", "smithy.api#http": { "code": 201, "method": "POST", @@ -1318,7 +1318,7 @@ } ], "traits": { - "smithy.api#documentation": "Creates an Amazon Bedrock Keystone DataAutomationProject", + "smithy.api#documentation": "Creates an Amazon Bedrock Data Automation Project", "smithy.api#http": { "code": 201, "method": "PUT", @@ -1669,7 +1669,7 @@ } ], "traits": { - "smithy.api#documentation": "Deletes an existing Amazon Bedrock Keystone Blueprint", + "smithy.api#documentation": "Deletes an existing Amazon Bedrock Data Automation Blueprint", "smithy.api#http": { "code": 204, "method": "DELETE", @@ -1736,7 +1736,7 @@ } ], "traits": { - "smithy.api#documentation": "Deletes an existing Amazon Bedrock Keystone DataAutomationProject", + "smithy.api#documentation": "Deletes an existing Amazon Bedrock Data Automation Project", "smithy.api#http": { "code": 204, "method": "DELETE", @@ -2037,7 +2037,7 @@ } ], "traits": { - "smithy.api#documentation": "Gets an existing Amazon Bedrock Keystone Blueprint", + "smithy.api#documentation": "Gets an existing Amazon Bedrock Data Automation Blueprint", "smithy.api#http": { "code": 200, "method": "POST", @@ -2117,7 +2117,7 @@ } ], "traits": { - "smithy.api#documentation": "Gets an existing Amazon Bedrock Keystone DataAutomationProject", + "smithy.api#documentation": "Gets an existing Amazon Bedrock Data Automation Project", "smithy.api#http": { "code": 200, "method": "POST", @@ -2363,7 +2363,7 @@ } ], "traits": { - "smithy.api#documentation": "Lists all existing Amazon Bedrock Keystone Blueprints", + "smithy.api#documentation": "Lists all existing Amazon Bedrock Data Automation Blueprints", "smithy.api#http": { "code": 200, "method": "POST", @@ -2449,7 +2449,7 @@ } ], "traits": { - "smithy.api#documentation": "Lists all existing Amazon Bedrock Keystone DataAutomationProjects", + "smithy.api#documentation": "Lists all existing Amazon Bedrock Data Automation Projects", "smithy.api#http": { "code": 200, "method": "POST", @@ -2704,7 +2704,7 @@ } ], "traits": { - "smithy.api#documentation": "Updates an existing Amazon Bedrock Blueprint", + "smithy.api#documentation": "Updates an existing Amazon Bedrock Data Automation Blueprint", "smithy.api#http": { "code": 200, "method": "PUT", @@ -2783,7 +2783,7 @@ } ], "traits": { - "smithy.api#documentation": "Updates an existing Amazon Bedrock DataAutomationProject", + "smithy.api#documentation": "Updates an existing Amazon Bedrock Data Automation Project", "smithy.api#http": { "code": 200, "method": "PUT", diff --git a/models/billing.json b/models/billing.json index 5b6c49d7f8..4e4f50ad2c 100644 --- a/models/billing.json +++ b/models/billing.json @@ -5,8 +5,35 @@ "type": "service", "version": "2023-09-07", "operations": [ + { + "target": "com.amazonaws.billing#CreateBillingView" + }, + { + "target": "com.amazonaws.billing#DeleteBillingView" + }, + { + "target": "com.amazonaws.billing#GetBillingView" + }, + { + "target": "com.amazonaws.billing#GetResourcePolicy" + }, { "target": "com.amazonaws.billing#ListBillingViews" + }, + { + "target": "com.amazonaws.billing#ListSourceViewsForBillingView" + }, + { + "target": "com.amazonaws.billing#ListTagsForResource" + }, + { + "target": "com.amazonaws.billing#TagResource" + }, + { + "target": "com.amazonaws.billing#UntagResource" + }, + { + "target": "com.amazonaws.billing#UpdateBillingView" } ], "traits": { @@ -543,7 +570,86 @@ "com.amazonaws.billing#BillingViewArn": { "type": "string", "traits": { - "smithy.api#pattern": "^arn:aws[a-z-]*:(billing)::[0-9]{12}:billingview/[a-zA-Z0-9_\\+=\\.\\-@]{1,43}$" + "smithy.api#pattern": "^arn:aws[a-z-]*:(billing)::[0-9]{12}:billingview/[a-zA-Z0-9/:_\\+=\\.\\-@]{0,59}[a-zA-Z0-9]$" + } + }, + "com.amazonaws.billing#BillingViewArnList": { + "type": "list", + "member": { + "target": "com.amazonaws.billing#BillingViewArn" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 10 + } + } + }, + "com.amazonaws.billing#BillingViewDescription": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 1024 + }, + "smithy.api#pattern": "^([ a-zA-Z0-9_\\+=\\.\\-@]+)?$", + "smithy.api#sensitive": {} + } + }, + "com.amazonaws.billing#BillingViewElement": { + "type": "structure", + "members": { + "arn": { + "target": "com.amazonaws.billing#BillingViewArn", + "traits": { + "smithy.api#documentation": "

\n The Amazon Resource Name (ARN) that can be used to uniquely identify the billing view.\n

" + } + }, + "name": { + "target": "com.amazonaws.billing#BillingViewName", + "traits": { + "smithy.api#documentation": "

\n A list of names of the billing view.\n

" + } + }, + "description": { + "target": "com.amazonaws.billing#BillingViewDescription", + "traits": { + "smithy.api#documentation": "

\n The description of the billing view.\n

" + } + }, + "billingViewType": { + "target": "com.amazonaws.billing#BillingViewType", + "traits": { + "smithy.api#documentation": "

The type of billing group.\n

" + } + }, + "ownerAccountId": { + "target": "com.amazonaws.billing#AccountId", + "traits": { + "smithy.api#documentation": "

\n The list of owners of the billing view.\n

" + } + }, + "dataFilterExpression": { + "target": "com.amazonaws.billing#Expression", + "traits": { + "smithy.api#documentation": "

\n See Expression. Billing view only supports LINKED_ACCOUNT and Tags.\n

" + } + }, + "createdAt": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The time when the billing view was created.\n

" + } + }, + "updatedAt": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The time when the billing view was last updated.\n

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The metadata associated to the billing view.\n

" } }, "com.amazonaws.billing#BillingViewList": { @@ -567,6 +673,12 @@ "smithy.api#documentation": "

\n A list of names of the Billing view.\n

" } }, + "description": { + "target": "com.amazonaws.billing#BillingViewDescription", + "traits": { + "smithy.api#documentation": "

\n The description of the billing view.\n

" + } + }, "ownerAccountId": { "target": "com.amazonaws.billing#AccountId", "traits": { @@ -587,10 +699,26 @@ "com.amazonaws.billing#BillingViewName": { "type": "string", "traits": { + "smithy.api#length": { + "min": 1, + "max": 128 + }, "smithy.api#pattern": "^[ a-zA-Z0-9_\\+=\\.\\-@]+$", "smithy.api#sensitive": {} } }, + "com.amazonaws.billing#BillingViewSourceViewsList": { + "type": "list", + "member": { + "target": "com.amazonaws.billing#BillingViewArn" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 1 + } + } + }, "com.amazonaws.billing#BillingViewType": { "type": "enum", "members": { @@ -605,9 +733,21 @@ "traits": { "smithy.api#enumValue": "BILLING_GROUP" } + }, + "CUSTOM": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "CUSTOM" + } } } }, + "com.amazonaws.billing#BillingViewTypeList": { + "type": "list", + "member": { + "target": "com.amazonaws.billing#BillingViewType" + } + }, "com.amazonaws.billing#BillingViewsMaxResults": { "type": "integer", "traits": { @@ -617,23 +757,13 @@ } } }, - "com.amazonaws.billing#ErrorMessage": { - "type": "string", - "traits": { - "smithy.api#length": { - "max": 1024 - } - } - }, - "com.amazonaws.billing#FieldName": { + "com.amazonaws.billing#ClientToken": { "type": "string", "traits": { - "smithy.api#length": { - "max": 100 - } + "smithy.api#pattern": "^[a-zA-Z0-9-]+$" } }, - "com.amazonaws.billing#InternalServerException": { + "com.amazonaws.billing#ConflictException": { "type": "structure", "members": { "message": { @@ -641,33 +771,53 @@ "traits": { "smithy.api#required": {} } + }, + "resourceId": { + "target": "com.amazonaws.billing#ResourceId", + "traits": { + "smithy.api#documentation": "

\n The identifier for the service resource associated with the request.\n

", + "smithy.api#required": {} + } + }, + "resourceType": { + "target": "com.amazonaws.billing#ResourceType", + "traits": { + "smithy.api#documentation": "

\n The type of resource associated with the request.\n

", + "smithy.api#required": {} + } } }, "traits": { "aws.protocols#awsQueryError": { - "code": "BillingInternalServer", - "httpResponseCode": 500 + "code": "BillingConflict", + "httpResponseCode": 409 }, - "smithy.api#documentation": "

The request processing failed because of an unknown error, exception, or failure.\n

", - "smithy.api#error": "server", - "smithy.api#httpError": 500 + "smithy.api#documentation": "

\n The requested operation would cause a conflict with the current state of a service resource associated with the request. Resolve the conflict before retrying this request.\n

", + "smithy.api#error": "client", + "smithy.api#httpError": 409 } }, - "com.amazonaws.billing#ListBillingViews": { + "com.amazonaws.billing#CreateBillingView": { "type": "operation", "input": { - "target": "com.amazonaws.billing#ListBillingViewsRequest" + "target": "com.amazonaws.billing#CreateBillingViewRequest" }, "output": { - "target": "com.amazonaws.billing#ListBillingViewsResponse" + "target": "com.amazonaws.billing#CreateBillingViewResponse" }, "errors": [ { "target": "com.amazonaws.billing#AccessDeniedException" }, + { + "target": "com.amazonaws.billing#ConflictException" + }, { "target": "com.amazonaws.billing#InternalServerException" }, + { + "target": "com.amazonaws.billing#ServiceQuotaExceededException" + }, { "target": "com.amazonaws.billing#ThrottlingException" }, @@ -676,97 +826,75 @@ } ], "traits": { - "smithy.api#documentation": "

Lists the billing views available for a given time period.\n

\n

Every Amazon Web Services account has a unique PRIMARY billing view that represents the billing data available by default. Accounts that use Billing Conductor also have BILLING_GROUP billing views representing pro forma costs associated with each created billing group.

", + "smithy.api#documentation": "

\nCreates a billing view with the specified billing view attributes.\n

", "smithy.api#examples": [ { - "title": "Invoke ListBillingViews", + "title": "Invoke CreateBillingView", "input": { - "activeTimeRange": { - "activeAfterInclusive": 1719792000, - "activeBeforeInclusive": 1.722470399999E9 + "name": "Example Custom Billing View", + "sourceViews": [ + "arn:aws:billing::123456789101:billingview/primary" + ], + "description": "Custom Billing View Example", + "dataFilterExpression": { + "dimensions": { + "key": "LINKED_ACCOUNT", + "values": [ + "000000000000" + ] + } } }, "output": { - "billingViews": [ - { - "arn": "arn:aws:billing::123456789101:billingview/primary", - "billingViewType": "PRIMARY", - "name": "Primary Billing View Account 123456789101", - "ownerAccountId": "123456789101" - } - ] + "arn": "arn:aws:billing::123456789101:billingview/custom-46f47cb2-a11d-43f3-983d-470b5708a899", + "createdAt": 1719792001 } - }, - { - "title": "Error example for ListBillingViews", - "input": { - "activeTimeRange": { - "activeAfterInclusive": 1719792001, - "activeBeforeInclusive": 1719792000 - } - }, - "error": { - "shapeId": "com.amazonaws.billing#ValidationException", - "content": { - "message": "Failed to get billing view data for an invalid time range.", - "reason": "other" - } - }, - "allowConstraintErrors": true } ], - "smithy.api#http": { - "method": "POST", - "uri": "/", - "code": 200 - }, - "smithy.api#paginated": { - "inputToken": "nextToken", - "outputToken": "nextToken", - "pageSize": "maxResults", - "items": "billingViews" - }, - "smithy.api#readonly": {}, - "smithy.test#smokeTests": [ - { - "id": "ListBillingViewsSuccess", - "params": { - "activeTimeRange": { - "activeAfterInclusive": 1719792000, - "activeBeforeInclusive": 1.722470399999E9 - } - }, - "expect": { - "success": {} - }, - "vendorParamsShape": "aws.test#AwsVendorParams", - "vendorParams": { - "region": "us-east-1" - } - } - ] + "smithy.api#idempotent": {} } }, - "com.amazonaws.billing#ListBillingViewsRequest": { + "com.amazonaws.billing#CreateBillingViewRequest": { "type": "structure", "members": { - "activeTimeRange": { - "target": "com.amazonaws.billing#ActiveTimeRange", + "name": { + "target": "com.amazonaws.billing#BillingViewName", "traits": { - "smithy.api#documentation": "

\n The time range for the billing views listed. PRIMARY billing view is always listed. BILLING_GROUP billing views are listed for time ranges when the associated billing group resource in Billing Conductor is active. The time range must be within one calendar month.\n

", + "smithy.api#documentation": "

\n The name of the billing view.\n

", "smithy.api#required": {} } }, - "maxResults": { - "target": "com.amazonaws.billing#BillingViewsMaxResults", + "description": { + "target": "com.amazonaws.billing#BillingViewDescription", "traits": { - "smithy.api#documentation": "

The maximum number of billing views to retrieve. Default is 100.\n

" + "smithy.api#documentation": "

\n The description of the billing view.\n

" } }, - "nextToken": { - "target": "com.amazonaws.billing#PageToken", + "sourceViews": { + "target": "com.amazonaws.billing#BillingViewSourceViewsList", "traits": { - "smithy.api#documentation": "

The pagination token that is used on subsequent calls to list billing views.

" + "smithy.api#documentation": "

A list of billing views used as the data source for the custom billing view.

", + "smithy.api#required": {} + } + }, + "dataFilterExpression": { + "target": "com.amazonaws.billing#Expression", + "traits": { + "smithy.api#documentation": "

\n See Expression. Billing view only supports LINKED_ACCOUNT and Tags.\n

" + } + }, + "clientToken": { + "target": "com.amazonaws.billing#ClientToken", + "traits": { + "smithy.api#documentation": "

A unique, case-sensitive identifier you specify to ensure idempotency of the request. Idempotency ensures that an API request completes no more than one time. If the original request completes successfully, any subsequent retries complete successfully without performing any further actions with an idempotent request.\n

", + "smithy.api#httpHeader": "X-Amzn-Client-Token", + "smithy.api#idempotencyToken": {} + } + }, + "resourceTags": { + "target": "com.amazonaws.billing#ResourceTagList", + "traits": { + "smithy.api#documentation": "

A list of key value map specifying tags associated to the billing view being created.\n

" } } }, @@ -774,20 +902,20 @@ "smithy.api#input": {} } }, - "com.amazonaws.billing#ListBillingViewsResponse": { + "com.amazonaws.billing#CreateBillingViewResponse": { "type": "structure", "members": { - "billingViews": { - "target": "com.amazonaws.billing#BillingViewList", + "arn": { + "target": "com.amazonaws.billing#BillingViewArn", "traits": { - "smithy.api#documentation": "

A list of BillingViewListElement retrieved.

", + "smithy.api#documentation": "

\n The Amazon Resource Name (ARN) that can be used to uniquely identify the billing view.\n

", "smithy.api#required": {} } }, - "nextToken": { - "target": "com.amazonaws.billing#PageToken", + "createdAt": { + "target": "smithy.api#Timestamp", "traits": { - "smithy.api#documentation": "

The pagination token to use on subsequent calls to list billing views.\n

" + "smithy.api#documentation": "

\n The time when the billing view was created.\n

" } } }, @@ -795,35 +923,1166 @@ "smithy.api#output": {} } }, - "com.amazonaws.billing#PageToken": { - "type": "string", - "traits": { - "smithy.api#length": { - "min": 1, - "max": 2047 - } - } - }, - "com.amazonaws.billing#ThrottlingException": { - "type": "structure", - "members": { - "message": { - "target": "com.amazonaws.billing#ErrorMessage", - "traits": { - "smithy.api#required": {} - } - } + "com.amazonaws.billing#DeleteBillingView": { + "type": "operation", + "input": { + "target": "com.amazonaws.billing#DeleteBillingViewRequest" }, - "traits": { - "aws.protocols#awsQueryError": { - "code": "BillingThrottling", - "httpResponseCode": 429 + "output": { + "target": "com.amazonaws.billing#DeleteBillingViewResponse" + }, + "errors": [ + { + "target": "com.amazonaws.billing#AccessDeniedException" }, - "smithy.api#documentation": "

The request was denied due to request throttling.\n

", - "smithy.api#error": "client", + { + "target": "com.amazonaws.billing#ConflictException" + }, + { + "target": "com.amazonaws.billing#InternalServerException" + }, + { + "target": "com.amazonaws.billing#ThrottlingException" + }, + { + "target": "com.amazonaws.billing#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Deletes the specified billing view.

", + "smithy.api#examples": [ + { + "title": "Invoke DeleteBillingView", + "input": { + "arn": "arn:aws:billing::123456789101:billingview/custom-46f47cb2-a11d-43f3-983d-470b5708a899" + }, + "output": { + "arn": "arn:aws:billing::123456789101:billingview/custom-46f47cb2-a11d-43f3-983d-470b5708a899" + } + } + ], + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.billing#DeleteBillingViewRequest": { + "type": "structure", + "members": { + "arn": { + "target": "com.amazonaws.billing#BillingViewArn", + "traits": { + "smithy.api#documentation": "

\n The Amazon Resource Name (ARN) that can be used to uniquely identify the billing view.\n

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.billing#DeleteBillingViewResponse": { + "type": "structure", + "members": { + "arn": { + "target": "com.amazonaws.billing#BillingViewArn", + "traits": { + "smithy.api#documentation": "

\n The Amazon Resource Name (ARN) that can be used to uniquely identify the billing view.\n

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.billing#Dimension": { + "type": "enum", + "members": { + "LINKED_ACCOUNT": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "LINKED_ACCOUNT" + } + } + } + }, + "com.amazonaws.billing#DimensionValues": { + "type": "structure", + "members": { + "key": { + "target": "com.amazonaws.billing#Dimension", + "traits": { + "smithy.api#documentation": "

\n The names of the metadata types that you can use to filter and group your results. \n

", + "smithy.api#required": {} + } + }, + "values": { + "target": "com.amazonaws.billing#Values", + "traits": { + "smithy.api#documentation": "

\n The metadata values that you can use to filter and group your results. \n

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

\n The metadata that you can use to filter and group your results.\n

" + } + }, + "com.amazonaws.billing#ErrorMessage": { + "type": "string", + "traits": { + "smithy.api#length": { + "max": 1024 + } + } + }, + "com.amazonaws.billing#Expression": { + "type": "structure", + "members": { + "dimensions": { + "target": "com.amazonaws.billing#DimensionValues", + "traits": { + "smithy.api#documentation": "

\n The specific Dimension to use for Expression.\n

" + } + }, + "tags": { + "target": "com.amazonaws.billing#TagValues", + "traits": { + "smithy.api#documentation": "

\n The specific Tag to use for Expression.\n

" + } + } + }, + "traits": { + "smithy.api#documentation": "

\n See Expression. Billing view only supports LINKED_ACCOUNT and Tags.\n

" + } + }, + "com.amazonaws.billing#FieldName": { + "type": "string", + "traits": { + "smithy.api#length": { + "max": 100 + } + } + }, + "com.amazonaws.billing#GetBillingView": { + "type": "operation", + "input": { + "target": "com.amazonaws.billing#GetBillingViewRequest" + }, + "output": { + "target": "com.amazonaws.billing#GetBillingViewResponse" + }, + "errors": [ + { + "target": "com.amazonaws.billing#AccessDeniedException" + }, + { + "target": "com.amazonaws.billing#InternalServerException" + }, + { + "target": "com.amazonaws.billing#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.billing#ThrottlingException" + }, + { + "target": "com.amazonaws.billing#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Returns the metadata associated to the specified billing view ARN.\n

", + "smithy.api#examples": [ + { + "title": "Invoke GetBillingView", + "input": { + "arn": "arn:aws:billing::123456789101:billingview/custom-46f47cb2-a11d-43f3-983d-470b5708a899" + }, + "output": { + "billingView": { + "arn": "arn:aws:billing::123456789101:billingview/custom-46f47cb2-a11d-43f3-983d-470b5708a899", + "name": "Example Custom Billing View", + "description": "Custom Billing View Example -- updated description", + "dataFilterExpression": { + "dimensions": { + "key": "LINKED_ACCOUNT", + "values": [ + "000000000000" + ] + } + }, + "ownerAccountId": "123456789101", + "billingViewType": "CUSTOM" + } + } + } + ], + "smithy.api#readonly": {} + } + }, + "com.amazonaws.billing#GetBillingViewRequest": { + "type": "structure", + "members": { + "arn": { + "target": "com.amazonaws.billing#BillingViewArn", + "traits": { + "smithy.api#documentation": "

\n The Amazon Resource Name (ARN) that can be used to uniquely identify the billing view.\n

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.billing#GetBillingViewResponse": { + "type": "structure", + "members": { + "billingView": { + "target": "com.amazonaws.billing#BillingViewElement", + "traits": { + "smithy.api#documentation": "

The billing view element associated with the specified ARN.\n

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.billing#GetResourcePolicy": { + "type": "operation", + "input": { + "target": "com.amazonaws.billing#GetResourcePolicyRequest" + }, + "output": { + "target": "com.amazonaws.billing#GetResourcePolicyResponse" + }, + "errors": [ + { + "target": "com.amazonaws.billing#AccessDeniedException" + }, + { + "target": "com.amazonaws.billing#InternalServerException" + }, + { + "target": "com.amazonaws.billing#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.billing#ThrottlingException" + }, + { + "target": "com.amazonaws.billing#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Returns the resource-based policy document attached to the resource in JSON format.\n

", + "smithy.api#examples": [ + { + "title": "Invoke GetResourcePolicy", + "input": { + "resourceArn": "arn:aws:billing::123456789101:billingview/custom-46f47cb2-a11d-43f3-983d-470b5708a899" + }, + "output": { + "resourceArn": "arn:aws:billing::123456789101:billingview/custom-46f47cb2-a11d-43f3-983d-470b5708a899", + "policy": "{\"Version\":\"2012-10-17\",\"Id\":\"46f47cb2-a11d-43f3-983d-470b5708a899\",\"Statement\":[{\"Sid\":\"ExampleStatement1\",\"Effect\":\"Allow\",\"Principal\":{\"AWS\":\"arn:aws:iam::000000000000:root\"},\"Action\":[\"ce:GetDimensionValues\",\"ce:GetCostAndUsageWithResources\",\"ce:GetCostAndUsage\",\"ce:GetCostForecast\",\"ce:GetTags\",\"ce:GetUsageForecast\",\"ce:GetCostCategories\",\"billing:GetBillingView\"],\"Resource\":\"arn:aws:billing::123456789101:billingview/custom-46f47cb2-a11d-43f3-983d-470b5708a899\"}]}" + } + } + ], + "smithy.api#readonly": {} + } + }, + "com.amazonaws.billing#GetResourcePolicyRequest": { + "type": "structure", + "members": { + "resourceArn": { + "target": "com.amazonaws.billing#ResourceArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the billing view resource to which the policy is attached to.\n

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.billing#GetResourcePolicyResponse": { + "type": "structure", + "members": { + "resourceArn": { + "target": "com.amazonaws.billing#ResourceArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the billing view resource to which the policy is attached to.\n

", + "smithy.api#required": {} + } + }, + "policy": { + "target": "com.amazonaws.billing#PolicyDocument", + "traits": { + "smithy.api#documentation": "

The resource-based policy document attached to the resource in JSON format.\n

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.billing#InternalServerException": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.billing#ErrorMessage", + "traits": { + "smithy.api#required": {} + } + } + }, + "traits": { + "aws.protocols#awsQueryError": { + "code": "BillingInternalServer", + "httpResponseCode": 500 + }, + "smithy.api#documentation": "

The request processing failed because of an unknown error, exception, or failure.\n

", + "smithy.api#error": "server", + "smithy.api#httpError": 500 + } + }, + "com.amazonaws.billing#ListBillingViews": { + "type": "operation", + "input": { + "target": "com.amazonaws.billing#ListBillingViewsRequest" + }, + "output": { + "target": "com.amazonaws.billing#ListBillingViewsResponse" + }, + "errors": [ + { + "target": "com.amazonaws.billing#AccessDeniedException" + }, + { + "target": "com.amazonaws.billing#InternalServerException" + }, + { + "target": "com.amazonaws.billing#ThrottlingException" + }, + { + "target": "com.amazonaws.billing#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Lists the billing views available for a given time period.\n

\n

Every Amazon Web Services account has a unique PRIMARY billing view that represents the billing data available by default. Accounts that use Billing Conductor also have BILLING_GROUP billing views representing pro forma costs associated with each created billing group.

", + "smithy.api#examples": [ + { + "title": "Invoke ListBillingViews", + "input": { + "activeTimeRange": { + "activeAfterInclusive": 1719792000, + "activeBeforeInclusive": 1.722470399999E9 + } + }, + "output": { + "billingViews": [ + { + "arn": "arn:aws:billing::123456789101:billingview/primary", + "billingViewType": "PRIMARY", + "name": "Primary Billing View Account 123456789101", + "ownerAccountId": "123456789101" + } + ] + } + }, + { + "title": "Error example for ListBillingViews", + "input": { + "activeTimeRange": { + "activeAfterInclusive": 1719792001, + "activeBeforeInclusive": 1719792000 + } + }, + "error": { + "shapeId": "com.amazonaws.billing#ValidationException", + "content": { + "message": "Failed to get billing view data for an invalid time range.", + "reason": "other" + } + }, + "allowConstraintErrors": true + } + ], + "smithy.api#http": { + "method": "POST", + "uri": "/", + "code": 200 + }, + "smithy.api#paginated": { + "inputToken": "nextToken", + "outputToken": "nextToken", + "pageSize": "maxResults", + "items": "billingViews" + }, + "smithy.api#readonly": {}, + "smithy.test#smokeTests": [ + { + "id": "ListBillingViewsSuccess", + "params": { + "activeTimeRange": { + "activeAfterInclusive": 1719792000, + "activeBeforeInclusive": 1.722470399999E9 + } + }, + "expect": { + "success": {} + }, + "vendorParamsShape": "aws.test#AwsVendorParams", + "vendorParams": { + "region": "us-east-1" + } + } + ] + } + }, + "com.amazonaws.billing#ListBillingViewsRequest": { + "type": "structure", + "members": { + "activeTimeRange": { + "target": "com.amazonaws.billing#ActiveTimeRange", + "traits": { + "smithy.api#documentation": "

\n The time range for the billing views listed. PRIMARY billing view is always listed. BILLING_GROUP billing views are listed for time ranges when the associated billing group resource in Billing Conductor is active. The time range must be within one calendar month.\n

" + } + }, + "arns": { + "target": "com.amazonaws.billing#BillingViewArnList", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) that can be used to uniquely identify the billing view.\n

" + } + }, + "billingViewTypes": { + "target": "com.amazonaws.billing#BillingViewTypeList", + "traits": { + "smithy.api#documentation": "

The type of billing view.

" + } + }, + "ownerAccountId": { + "target": "com.amazonaws.billing#AccountId", + "traits": { + "smithy.api#documentation": "

\n The list of owners of the billing view.\n

" + } + }, + "maxResults": { + "target": "com.amazonaws.billing#BillingViewsMaxResults", + "traits": { + "smithy.api#documentation": "

The maximum number of billing views to retrieve. Default is 100.\n

" + } + }, + "nextToken": { + "target": "com.amazonaws.billing#PageToken", + "traits": { + "smithy.api#documentation": "

The pagination token that is used on subsequent calls to list billing views.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.billing#ListBillingViewsResponse": { + "type": "structure", + "members": { + "billingViews": { + "target": "com.amazonaws.billing#BillingViewList", + "traits": { + "smithy.api#documentation": "

A list of BillingViewListElement retrieved.

", + "smithy.api#required": {} + } + }, + "nextToken": { + "target": "com.amazonaws.billing#PageToken", + "traits": { + "smithy.api#documentation": "

The pagination token to use on subsequent calls to list billing views.\n

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.billing#ListSourceViewsForBillingView": { + "type": "operation", + "input": { + "target": "com.amazonaws.billing#ListSourceViewsForBillingViewRequest" + }, + "output": { + "target": "com.amazonaws.billing#ListSourceViewsForBillingViewResponse" + }, + "errors": [ + { + "target": "com.amazonaws.billing#AccessDeniedException" + }, + { + "target": "com.amazonaws.billing#InternalServerException" + }, + { + "target": "com.amazonaws.billing#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.billing#ThrottlingException" + }, + { + "target": "com.amazonaws.billing#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Lists the source views (managed Amazon Web Services billing views) associated with the billing view.\n

", + "smithy.api#examples": [ + { + "title": "Invoke ListSourceViewsForBillingView", + "input": { + "arn": "arn:aws:billing::123456789101:billingview/custom-46f47cb2-a11d-43f3-983d-470b5708a899" + }, + "output": { + "sourceViews": [ + "arn:aws:billing::123456789101:billingview/primary" + ] + } + } + ], + "smithy.api#paginated": { + "inputToken": "nextToken", + "outputToken": "nextToken", + "pageSize": "maxResults", + "items": "sourceViews" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.billing#ListSourceViewsForBillingViewRequest": { + "type": "structure", + "members": { + "arn": { + "target": "com.amazonaws.billing#BillingViewArn", + "traits": { + "smithy.api#documentation": "

\n The Amazon Resource Name (ARN) that can be used to uniquely identify the billing view.\n

", + "smithy.api#required": {} + } + }, + "maxResults": { + "target": "com.amazonaws.billing#BillingViewsMaxResults", + "traits": { + "smithy.api#documentation": "

\n The number of entries a paginated response contains.\n

" + } + }, + "nextToken": { + "target": "com.amazonaws.billing#PageToken", + "traits": { + "smithy.api#documentation": "

\n The pagination token that is used on subsequent calls to list billing views.\n

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.billing#ListSourceViewsForBillingViewResponse": { + "type": "structure", + "members": { + "sourceViews": { + "target": "com.amazonaws.billing#BillingViewSourceViewsList", + "traits": { + "smithy.api#documentation": "

A list of billing views used as the data source for the custom billing view.\n

", + "smithy.api#required": {} + } + }, + "nextToken": { + "target": "com.amazonaws.billing#PageToken", + "traits": { + "smithy.api#documentation": "

\n The pagination token that is used on subsequent calls to list billing views.\n

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.billing#ListTagsForResource": { + "type": "operation", + "input": { + "target": "com.amazonaws.billing#ListTagsForResourceRequest" + }, + "output": { + "target": "com.amazonaws.billing#ListTagsForResourceResponse" + }, + "errors": [ + { + "target": "com.amazonaws.billing#AccessDeniedException" + }, + { + "target": "com.amazonaws.billing#InternalServerException" + }, + { + "target": "com.amazonaws.billing#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.billing#ThrottlingException" + }, + { + "target": "com.amazonaws.billing#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Lists tags associated with the billing view resource.\n

", + "smithy.api#examples": [ + { + "title": "Invoke ListTagsForResource", + "input": { + "resourceArn": "arn:aws:billing::123456789101:billingview/custom-46f47cb2-a11d-43f3-983d-470b5708a899" + }, + "output": { + "resourceTags": [ + { + "key": "ExampleTagKey", + "value": "ExampleTagValue" + } + ] + } + } + ], + "smithy.api#readonly": {} + } + }, + "com.amazonaws.billing#ListTagsForResourceRequest": { + "type": "structure", + "members": { + "resourceArn": { + "target": "com.amazonaws.billing#ResourceArn", + "traits": { + "smithy.api#documentation": "

\n The Amazon Resource Name (ARN) of the resource. \n

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.billing#ListTagsForResourceResponse": { + "type": "structure", + "members": { + "resourceTags": { + "target": "com.amazonaws.billing#ResourceTagList", + "traits": { + "smithy.api#documentation": "

\n A list of tag key value pairs that are associated with the resource.\n

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.billing#PageToken": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 2047 + } + } + }, + "com.amazonaws.billing#PolicyDocument": { + "type": "string" + }, + "com.amazonaws.billing#QuotaCode": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 1024 + } + } + }, + "com.amazonaws.billing#ResourceArn": { + "type": "string", + "traits": { + "smithy.api#pattern": "^arn:aws[a-z-]*:(billing)::[0-9]{12}:[a-zA-Z0-9/:_\\+=\\.\\@-]{0,70}[a-zA-Z0-9]$" + } + }, + "com.amazonaws.billing#ResourceId": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 1024 + } + } + }, + "com.amazonaws.billing#ResourceNotFoundException": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.billing#ErrorMessage", + "traits": { + "smithy.api#required": {} + } + }, + "resourceId": { + "target": "com.amazonaws.billing#ResourceId", + "traits": { + "smithy.api#documentation": "

\n Value is a list of resource IDs that were not found.\n

", + "smithy.api#required": {} + } + }, + "resourceType": { + "target": "com.amazonaws.billing#ResourceType", + "traits": { + "smithy.api#documentation": "

\n Value is the type of resource that was not found.\n

", + "smithy.api#required": {} + } + } + }, + "traits": { + "aws.protocols#awsQueryError": { + "code": "BillingResourceNotFound", + "httpResponseCode": 404 + }, + "smithy.api#documentation": "

\n The specified ARN in the request doesn't exist.\n

", + "smithy.api#error": "client", + "smithy.api#httpError": 404 + } + }, + "com.amazonaws.billing#ResourceTag": { + "type": "structure", + "members": { + "key": { + "target": "com.amazonaws.billing#ResourceTagKey", + "traits": { + "smithy.api#documentation": "

\n The key that's associated with the tag.\n

", + "smithy.api#required": {} + } + }, + "value": { + "target": "com.amazonaws.billing#ResourceTagValue", + "traits": { + "smithy.api#documentation": "

\n The value that's associated with the tag.\n

" + } + } + }, + "traits": { + "smithy.api#documentation": "

\n The tag structure that contains a tag key and value.\n

" + } + }, + "com.amazonaws.billing#ResourceTagKey": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 128 + } + } + }, + "com.amazonaws.billing#ResourceTagKeyList": { + "type": "list", + "member": { + "target": "com.amazonaws.billing#ResourceTagKey" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 200 + } + } + }, + "com.amazonaws.billing#ResourceTagList": { + "type": "list", + "member": { + "target": "com.amazonaws.billing#ResourceTag" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 200 + } + } + }, + "com.amazonaws.billing#ResourceTagValue": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 256 + } + } + }, + "com.amazonaws.billing#ResourceType": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 1024 + } + } + }, + "com.amazonaws.billing#ServiceCode": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 1024 + } + } + }, + "com.amazonaws.billing#ServiceQuotaExceededException": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.billing#ErrorMessage", + "traits": { + "smithy.api#required": {} + } + }, + "resourceId": { + "target": "com.amazonaws.billing#ResourceId", + "traits": { + "smithy.api#documentation": "

\n The ID of the resource.\n

", + "smithy.api#required": {} + } + }, + "resourceType": { + "target": "com.amazonaws.billing#ResourceType", + "traits": { + "smithy.api#documentation": "

\n The type of Amazon Web Services resource.\n

", + "smithy.api#required": {} + } + }, + "serviceCode": { + "target": "com.amazonaws.billing#ServiceCode", + "traits": { + "smithy.api#documentation": "

\n The container for the serviceCode.\n

", + "smithy.api#required": {} + } + }, + "quotaCode": { + "target": "com.amazonaws.billing#QuotaCode", + "traits": { + "smithy.api#documentation": "

\n The container for the quotaCode.\n

", + "smithy.api#required": {} + } + } + }, + "traits": { + "aws.protocols#awsQueryError": { + "code": "BillingServiceQuotaExceeded", + "httpResponseCode": 402 + }, + "smithy.api#documentation": "

\n You've reached the limit of resources you can create, or exceeded the size of an individual resource.\n

", + "smithy.api#error": "client", + "smithy.api#httpError": 402 + } + }, + "com.amazonaws.billing#TagKey": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 1024 + }, + "smithy.api#pattern": "^[\\S\\s]*$" + } + }, + "com.amazonaws.billing#TagResource": { + "type": "operation", + "input": { + "target": "com.amazonaws.billing#TagResourceRequest" + }, + "output": { + "target": "com.amazonaws.billing#TagResourceResponse" + }, + "errors": [ + { + "target": "com.amazonaws.billing#AccessDeniedException" + }, + { + "target": "com.amazonaws.billing#InternalServerException" + }, + { + "target": "com.amazonaws.billing#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.billing#ThrottlingException" + }, + { + "target": "com.amazonaws.billing#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

\n An API operation for adding one or more tags (key-value pairs) to a resource.\n

", + "smithy.api#examples": [ + { + "title": "Invoke TagResource", + "input": { + "resourceArn": "arn:aws:billing::123456789101:billingview/custom-46f47cb2-a11d-43f3-983d-470b5708a899", + "resourceTags": [ + { + "key": "ExampleTagKey", + "value": "ExampleTagValue" + } + ] + }, + "output": {} + } + ] + } + }, + "com.amazonaws.billing#TagResourceRequest": { + "type": "structure", + "members": { + "resourceArn": { + "target": "com.amazonaws.billing#ResourceArn", + "traits": { + "smithy.api#documentation": "

\n The Amazon Resource Name (ARN) of the resource.\n

", + "smithy.api#required": {} + } + }, + "resourceTags": { + "target": "com.amazonaws.billing#ResourceTagList", + "traits": { + "smithy.api#documentation": "

\n A list of tag key value pairs that are associated with the resource.\n

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.billing#TagResourceResponse": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.billing#TagValues": { + "type": "structure", + "members": { + "key": { + "target": "com.amazonaws.billing#TagKey", + "traits": { + "smithy.api#documentation": "

\n The key for the tag.\n

", + "smithy.api#required": {} + } + }, + "values": { + "target": "com.amazonaws.billing#Values", + "traits": { + "smithy.api#documentation": "

\n The specific value of the tag.\n

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

\n The values that are available for a tag.\n

" + } + }, + "com.amazonaws.billing#ThrottlingException": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.billing#ErrorMessage", + "traits": { + "smithy.api#required": {} + } + } + }, + "traits": { + "aws.protocols#awsQueryError": { + "code": "BillingThrottling", + "httpResponseCode": 429 + }, + "smithy.api#documentation": "

The request was denied due to request throttling.\n

", + "smithy.api#error": "client", "smithy.api#httpError": 429 } }, + "com.amazonaws.billing#UntagResource": { + "type": "operation", + "input": { + "target": "com.amazonaws.billing#UntagResourceRequest" + }, + "output": { + "target": "com.amazonaws.billing#UntagResourceResponse" + }, + "errors": [ + { + "target": "com.amazonaws.billing#AccessDeniedException" + }, + { + "target": "com.amazonaws.billing#InternalServerException" + }, + { + "target": "com.amazonaws.billing#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.billing#ThrottlingException" + }, + { + "target": "com.amazonaws.billing#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

\n Removes one or more tags from a resource. Specify only tag keys in your request. Don't specify the value.\n

", + "smithy.api#examples": [ + { + "title": "Invoke UntagResource", + "input": { + "resourceArn": "arn:aws:billing::123456789101:billingview/custom-46f47cb2-a11d-43f3-983d-470b5708a899", + "resourceTagKeys": [ + "ExampleTagKey" + ] + }, + "output": {} + } + ] + } + }, + "com.amazonaws.billing#UntagResourceRequest": { + "type": "structure", + "members": { + "resourceArn": { + "target": "com.amazonaws.billing#ResourceArn", + "traits": { + "smithy.api#documentation": "

\n The Amazon Resource Name (ARN) of the resource.\n

", + "smithy.api#required": {} + } + }, + "resourceTagKeys": { + "target": "com.amazonaws.billing#ResourceTagKeyList", + "traits": { + "smithy.api#documentation": "

\n A list of tag key value pairs that are associated with the resource.\n

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.billing#UntagResourceResponse": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.billing#UpdateBillingView": { + "type": "operation", + "input": { + "target": "com.amazonaws.billing#UpdateBillingViewRequest" + }, + "output": { + "target": "com.amazonaws.billing#UpdateBillingViewResponse" + }, + "errors": [ + { + "target": "com.amazonaws.billing#AccessDeniedException" + }, + { + "target": "com.amazonaws.billing#ConflictException" + }, + { + "target": "com.amazonaws.billing#InternalServerException" + }, + { + "target": "com.amazonaws.billing#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.billing#ServiceQuotaExceededException" + }, + { + "target": "com.amazonaws.billing#ThrottlingException" + }, + { + "target": "com.amazonaws.billing#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

An API to update the attributes of the billing view.\n

", + "smithy.api#examples": [ + { + "title": "Invoke UpdateBillingView", + "input": { + "name": "Example Custom Billing View", + "arn": "arn:aws:billing::123456789101:billingview/custom-46f47cb2-a11d-43f3-983d-470b5708a899", + "description": "Custom Billing View Example -- updated description", + "dataFilterExpression": { + "dimensions": { + "key": "LINKED_ACCOUNT", + "values": [ + "000000000000" + ] + } + } + }, + "output": { + "arn": "arn:aws:billing::123456789101:billingview/custom-46f47cb2-a11d-43f3-983d-470b5708a899", + "updatedAt": 1719792001 + } + } + ], + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.billing#UpdateBillingViewRequest": { + "type": "structure", + "members": { + "arn": { + "target": "com.amazonaws.billing#BillingViewArn", + "traits": { + "smithy.api#documentation": "

\n The Amazon Resource Name (ARN) that can be used to uniquely identify the billing view.\n

", + "smithy.api#required": {} + } + }, + "name": { + "target": "com.amazonaws.billing#BillingViewName", + "traits": { + "smithy.api#documentation": "

\n The name of the billing view.\n

" + } + }, + "description": { + "target": "com.amazonaws.billing#BillingViewDescription", + "traits": { + "smithy.api#documentation": "

\n The description of the billing view.\n

" + } + }, + "dataFilterExpression": { + "target": "com.amazonaws.billing#Expression", + "traits": { + "smithy.api#documentation": "

See Expression. Billing view only supports LINKED_ACCOUNT and Tags.\n

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.billing#UpdateBillingViewResponse": { + "type": "structure", + "members": { + "arn": { + "target": "com.amazonaws.billing#BillingViewArn", + "traits": { + "smithy.api#documentation": "

\n The Amazon Resource Name (ARN) that can be used to uniquely identify the billing view.\n

", + "smithy.api#required": {} + } + }, + "updatedAt": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

\n The time when the billing view was last updated.\n

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.billing#ValidationException": { "type": "structure", "members": { @@ -913,6 +2172,28 @@ } } } + }, + "com.amazonaws.billing#Value": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 1024 + }, + "smithy.api#pattern": "^[\\S\\s]*$" + } + }, + "com.amazonaws.billing#Values": { + "type": "list", + "member": { + "target": "com.amazonaws.billing#Value" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 200 + } + } } } } \ No newline at end of file diff --git a/models/budgets.json b/models/budgets.json index 0fea2c0505..abf52523d0 100644 --- a/models/budgets.json +++ b/models/budgets.json @@ -340,6 +340,108 @@ }, "type": "endpoint" }, + { + "conditions": [ + { + "fn": "stringEquals", + "argv": [ + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "name" + ] + }, + "aws-iso" + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + false + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + false + ] + } + ], + "endpoint": { + "url": "https://budgets.c2s.ic.gov", + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "budgets", + "signingRegion": "us-iso-east-1" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [ + { + "fn": "stringEquals", + "argv": [ + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "name" + ] + }, + "aws-iso-b" + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + false + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + false + ] + } + ], + "endpoint": { + "url": "https://budgets.global.sc2s.sgov.gov", + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "budgets", + "signingRegion": "us-isob-east-1" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + }, { "conditions": [ { @@ -864,6 +966,28 @@ "UseDualStack": false } }, + { + "documentation": "For region aws-iso-global with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "budgets", + "signingRegion": "us-iso-east-1" + } + ] + }, + "url": "https://budgets.c2s.ic.gov" + } + }, + "params": { + "Region": "aws-iso-global", + "UseFIPS": false, + "UseDualStack": false + } + }, { "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", "expect": { @@ -903,7 +1027,16 @@ "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://budgets.us-iso-east-1.c2s.ic.gov" + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "budgets", + "signingRegion": "us-iso-east-1" + } + ] + }, + "url": "https://budgets.c2s.ic.gov" } }, "params": { @@ -912,6 +1045,28 @@ "UseDualStack": false } }, + { + "documentation": "For region aws-iso-b-global with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "budgets", + "signingRegion": "us-isob-east-1" + } + ] + }, + "url": "https://budgets.global.sc2s.sgov.gov" + } + }, + "params": { + "Region": "aws-iso-b-global", + "UseFIPS": false, + "UseDualStack": false + } + }, { "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", "expect": { @@ -951,7 +1106,16 @@ "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://budgets.us-isob-east-1.sc2s.sgov.gov" + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "budgets", + "signingRegion": "us-isob-east-1" + } + ] + }, + "url": "https://budgets.global.sc2s.sgov.gov" } }, "params": { diff --git a/models/connect.json b/models/connect.json index b7d3ffc65e..0dd364d082 100644 --- a/models/connect.json +++ b/models/connect.json @@ -1630,6 +1630,9 @@ { "target": "com.amazonaws.connect#UpdateInstanceStorageConfig" }, + { + "target": "com.amazonaws.connect#UpdateParticipantAuthentication" + }, { "target": "com.amazonaws.connect#UpdateParticipantRoleConfig" }, @@ -4127,6 +4130,12 @@ "smithy.api#documentation": "

The proficiency level of the condition.

" } }, + "Range": { + "target": "com.amazonaws.connect#Range", + "traits": { + "smithy.api#documentation": "

An Object to define the minimum and maximum proficiency levels.

" + } + }, "MatchCriteria": { "target": "com.amazonaws.connect#MatchCriteria", "traits": { @@ -4224,6 +4233,28 @@ "smithy.api#default": 0 } }, + "com.amazonaws.connect#AuthenticationError": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 2048 + }, + "smithy.api#pattern": "^[\\x20-\\x21\\x23-\\x5B\\x5D-\\x7E]*$", + "smithy.api#sensitive": {} + } + }, + "com.amazonaws.connect#AuthenticationErrorDescription": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 2048 + }, + "smithy.api#pattern": "^[\\x20-\\x21\\x23-\\x5B\\x5D-\\x7E]*$", + "smithy.api#sensitive": {} + } + }, "com.amazonaws.connect#AuthenticationProfile": { "type": "structure", "members": { @@ -4383,6 +4414,16 @@ "target": "com.amazonaws.connect#AuthenticationProfileSummary" } }, + "com.amazonaws.connect#AuthorizationCode": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 2048 + }, + "smithy.api#sensitive": {} + } + }, "com.amazonaws.connect#AutoAccept": { "type": "boolean", "traits": { @@ -5656,6 +5697,12 @@ "smithy.api#documentation": "

Information about Amazon Connect Wisdom.

" } }, + "CustomerId": { + "target": "com.amazonaws.connect#CustomerId", + "traits": { + "smithy.api#documentation": "

The customer's identification number. For example, the CustomerId may be a\n customer number from your CRM. You can create a Lambda function to pull the unique customer ID of\n the caller from your CRM system. If you enable Amazon Connect Voice ID capability, this\n attribute is populated with the CustomerSpeakerId of the caller.

" + } + }, "CustomerEndpoint": { "target": "com.amazonaws.connect#EndpointInfo", "traits": { @@ -8640,7 +8687,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates registration for a device token and a chat contact to receive real-time push\n notifications. For more information about push notifications, see Set up push\n notifications in Amazon Connect for mobile chat in the Amazon Connect\n Administrator Guide.

", + "smithy.api#documentation": "

Creates registration for a device token and a chat contact to receive real-time push\n notifications. For more information about push notifications, see Set up push\n notifications in Amazon Connect for mobile chat in the Amazon Connect\n Administrator Guide.

", "smithy.api#http": { "method": "PUT", "uri": "/push-notification/{InstanceId}/registrations", @@ -10489,6 +10536,25 @@ "smithy.api#documentation": "

Information about the Customer on the contact.

" } }, + "com.amazonaws.connect#CustomerId": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 128 + } + } + }, + "com.amazonaws.connect#CustomerIdNonEmpty": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 128 + }, + "smithy.api#sensitive": {} + } + }, "com.amazonaws.connect#CustomerProfileAttributesSerialized": { "type": "string" }, @@ -10594,7 +10660,7 @@ "ComparisonType": { "target": "com.amazonaws.connect#DateComparisonType", "traits": { - "smithy.api#documentation": "

An object to specify the hours of operation override date condition\n comparisonType.

" + "smithy.api#documentation": "

An object to specify the hours of operation override date condition\n comparisonType.

" } } }, @@ -17624,6 +17690,9 @@ "traits": { "smithy.api#documentation": "

List of routing expressions which will be OR-ed together.

" } + }, + "NotAttributeCondition": { + "target": "com.amazonaws.connect#AttributeCondition" } }, "traits": { @@ -20957,6 +21026,12 @@ "traits": { "smithy.api#enumValue": "ENHANCED_CHAT_MONITORING" } + }, + "MULTI_PARTY_CHAT_CONFERENCE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "MULTI_PARTY_CHAT_CONFERENCE" + } } } }, @@ -21439,6 +21514,12 @@ "traits": { "smithy.api#enumValue": "CALL_TRANSFER_CONNECTOR" } + }, + "COGNITO_USER_POOL": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "COGNITO_USER_POOL" + } } } }, @@ -22331,7 +22412,7 @@ } ], "traits": { - "smithy.api#documentation": "

This API is in preview release for Amazon Connect and is subject to change.

\n

For the specified version of Amazon Lex, returns a paginated list of all the Amazon Lex bots currently associated with the instance. Use this API to returns both Amazon Lex V1 and V2 bots.

", + "smithy.api#documentation": "

This API is in preview release for Amazon Connect and is subject to change.

\n

For the specified version of Amazon Lex, returns a paginated list of all the Amazon Lex bots currently associated with the instance. Use this API to return both Amazon Lex V1 and V2 bots.

", "smithy.api#http": { "method": "GET", "uri": "/instance/{InstanceId}/bots", @@ -31069,6 +31150,26 @@ } } }, + "com.amazonaws.connect#Range": { + "type": "structure", + "members": { + "MinProficiencyLevel": { + "target": "com.amazonaws.connect#NullableProficiencyLevel", + "traits": { + "smithy.api#documentation": "

The minimum proficiency level of the range.

" + } + }, + "MaxProficiencyLevel": { + "target": "com.amazonaws.connect#NullableProficiencyLevel", + "traits": { + "smithy.api#documentation": "

The maximum proficiency level of the range.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

An Object to define the minimum and maximum proficiency levels.

" + } + }, "com.amazonaws.connect#ReadOnlyFieldInfo": { "type": "structure", "members": { @@ -36808,7 +36909,7 @@ "UploadUrlMetadata": { "target": "com.amazonaws.connect#UploadUrlMetadata", "traits": { - "smithy.api#documentation": "

Information to be used while uploading the attached file.

" + "smithy.api#documentation": "

The headers to be provided while uploading the file to the URL.

" } } }, @@ -36923,6 +37024,12 @@ "traits": { "smithy.api#documentation": "

A set of system defined key-value pairs stored on individual contact segments using an\n attribute map. The attributes are standard Amazon Connect attributes. They can be accessed in\n flows.

\n

Attribute keys can include only alphanumeric, -, and _.

\n

This field can be used to show channel subtype, such as connect:Guide.

\n \n

The types application/vnd.amazonaws.connect.message.interactive and\n application/vnd.amazonaws.connect.message.interactive.response must be present in\n the SupportedMessagingContentTypes field of this API in order to set\n SegmentAttributes as { \"connect:Subtype\": {\"valueString\" : \"connect:Guide\"\n }}.

\n
" } + }, + "CustomerId": { + "target": "com.amazonaws.connect#CustomerIdNonEmpty", + "traits": { + "smithy.api#documentation": "

The customer's identification number. For example, the CustomerId may be a\n customer number from your CRM.

" + } } }, "traits": { @@ -41814,6 +41921,90 @@ "smithy.api#input": {} } }, + "com.amazonaws.connect#UpdateParticipantAuthentication": { + "type": "operation", + "input": { + "target": "com.amazonaws.connect#UpdateParticipantAuthenticationRequest" + }, + "output": { + "target": "com.amazonaws.connect#UpdateParticipantAuthenticationResponse" + }, + "errors": [ + { + "target": "com.amazonaws.connect#AccessDeniedException" + }, + { + "target": "com.amazonaws.connect#ConflictException" + }, + { + "target": "com.amazonaws.connect#InternalServiceException" + }, + { + "target": "com.amazonaws.connect#InvalidParameterException" + }, + { + "target": "com.amazonaws.connect#InvalidRequestException" + }, + { + "target": "com.amazonaws.connect#ThrottlingException" + } + ], + "traits": { + "smithy.api#documentation": "

Instructs Amazon Connect to resume the authentication process. The subsequent actions\n depend on the request body contents:

\n
    \n
  • \n

    \n If a code is provided: Connect retrieves the identity\n information from Amazon Cognito and imports it into Connect Customer Profiles.

    \n
  • \n
  • \n

    \n If an error is provided: The error branch of the\n Authenticate Customer block is executed.

    \n
  • \n
\n \n

The API returns a success response to acknowledge the request. However, the interaction and\n exchange of identity information occur asynchronously after the response is returned.

\n
", + "smithy.api#http": { + "method": "POST", + "uri": "/contact/update-participant-authentication", + "code": 200 + } + } + }, + "com.amazonaws.connect#UpdateParticipantAuthenticationRequest": { + "type": "structure", + "members": { + "State": { + "target": "com.amazonaws.connect#ParticipantToken", + "traits": { + "smithy.api#documentation": "

The state query parameter that was provided by Cognito in the\n redirectUri. This will also match the state parameter provided in the\n AuthenticationUrl from the GetAuthenticationUrl\n response.

", + "smithy.api#required": {} + } + }, + "InstanceId": { + "target": "com.amazonaws.connect#InstanceId", + "traits": { + "smithy.api#documentation": "

The identifier of the Amazon Connect instance. You can find the instance ID in the Amazon Resource Name (ARN) of the instance.

", + "smithy.api#required": {} + } + }, + "Code": { + "target": "com.amazonaws.connect#AuthorizationCode", + "traits": { + "smithy.api#documentation": "

The code query parameter provided by Cognito in the\n redirectUri.

" + } + }, + "Error": { + "target": "com.amazonaws.connect#AuthenticationError", + "traits": { + "smithy.api#documentation": "

The error query parameter provided by Cognito in the\n redirectUri.

" + } + }, + "ErrorDescription": { + "target": "com.amazonaws.connect#AuthenticationErrorDescription", + "traits": { + "smithy.api#documentation": "

The error_description parameter provided by Cognito in the\n redirectUri.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.connect#UpdateParticipantAuthenticationResponse": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.connect#UpdateParticipantRoleConfig": { "type": "operation", "input": { @@ -45890,7 +46081,7 @@ "IvrRecordingTrack": { "target": "com.amazonaws.connect#IvrRecordingTrack", "traits": { - "smithy.api#documentation": "

Identifies which IVR track is being recorded.

" + "smithy.api#documentation": "

Identifies which IVR track is being recorded.

\n

One and only one of the track configurations should be presented in the request.

" } } }, diff --git a/models/connectparticipant.json b/models/connectparticipant.json index 2992b78527..809c6b5ce8 100644 --- a/models/connectparticipant.json +++ b/models/connectparticipant.json @@ -52,6 +52,9 @@ "type": "service", "version": "2018-09-07", "operations": [ + { + "target": "com.amazonaws.connectparticipant#CancelParticipantAuthentication" + }, { "target": "com.amazonaws.connectparticipant#CompleteAttachmentUpload" }, @@ -67,6 +70,9 @@ { "target": "com.amazonaws.connectparticipant#GetAttachment" }, + { + "target": "com.amazonaws.connectparticipant#GetAuthenticationUrl" + }, { "target": "com.amazonaws.connectparticipant#GetTranscript" }, @@ -92,7 +98,7 @@ "name": "execute-api" }, "aws.protocols#restJson1": {}, - "smithy.api#documentation": "

Amazon Connect is an easy-to-use omnichannel cloud contact center service that\n enables companies of any size to deliver superior customer service at a lower cost.\n Amazon Connect communications capabilities make it easy for companies to deliver\n personalized interactions across communication channels, including chat.

\n

Use the Amazon Connect Participant Service to manage participants (for example,\n agents, customers, and managers listening in), and to send messages and events within a\n chat contact. The APIs in the service enable the following: sending chat messages,\n attachment sharing, managing a participant's connection state and message events, and\n retrieving chat transcripts.

", + "smithy.api#documentation": "\n

Amazon Connect is an easy-to-use omnichannel cloud contact center service that\n enables companies of any size to deliver superior customer service at a lower cost.\n Amazon Connect communications capabilities make it easy for companies to deliver\n personalized interactions across communication channels, including chat.

\n

Use the Amazon Connect Participant Service to manage participants (for example,\n agents, customers, and managers listening in), and to send messages and events within a\n chat contact. The APIs in the service enable the following: sending chat messages,\n attachment sharing, managing a participant's connection state and message events, and\n retrieving chat transcripts.

", "smithy.api#title": "Amazon Connect Participant Service", "smithy.rules#endpointRuleSet": { "version": "1.0", @@ -875,9 +881,79 @@ "target": "com.amazonaws.connectparticipant#AttachmentItem" } }, + "com.amazonaws.connectparticipant#AuthenticationUrl": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 2083 + } + } + }, "com.amazonaws.connectparticipant#Bool": { "type": "boolean" }, + "com.amazonaws.connectparticipant#CancelParticipantAuthentication": { + "type": "operation", + "input": { + "target": "com.amazonaws.connectparticipant#CancelParticipantAuthenticationRequest" + }, + "output": { + "target": "com.amazonaws.connectparticipant#CancelParticipantAuthenticationResponse" + }, + "errors": [ + { + "target": "com.amazonaws.connectparticipant#AccessDeniedException" + }, + { + "target": "com.amazonaws.connectparticipant#InternalServerException" + }, + { + "target": "com.amazonaws.connectparticipant#ThrottlingException" + }, + { + "target": "com.amazonaws.connectparticipant#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Cancels the authentication session. The opted out branch of the Authenticate Customer\n flow block will be taken.

\n \n

The current supported channel is chat. This API is not supported for Apple\n Messages for Business, WhatsApp, or SMS chats.

\n
", + "smithy.api#http": { + "method": "POST", + "uri": "/participant/cancel-authentication", + "code": 200 + } + } + }, + "com.amazonaws.connectparticipant#CancelParticipantAuthenticationRequest": { + "type": "structure", + "members": { + "SessionId": { + "target": "com.amazonaws.connectparticipant#SessionId", + "traits": { + "smithy.api#documentation": "

The sessionId provided in the authenticationInitiated\n event.

", + "smithy.api#required": {} + } + }, + "ConnectionToken": { + "target": "com.amazonaws.connectparticipant#ParticipantToken", + "traits": { + "smithy.api#documentation": "

The authentication token associated with the participant's connection.

", + "smithy.api#httpHeader": "X-Amz-Bearer", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.connectparticipant#CancelParticipantAuthenticationResponse": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.connectparticipant#ChatContent": { "type": "string", "traits": { @@ -1020,7 +1096,7 @@ } ], "traits": { - "smithy.api#documentation": "

Allows you to confirm that the attachment has been uploaded using the pre-signed URL\n provided in StartAttachmentUpload API. A conflict exception is thrown when an attachment\n with that identifier is already being uploaded.

\n \n

\n ConnectionToken is used for invoking this API instead of\n ParticipantToken.

\n
\n

The Amazon Connect Participant Service APIs do not use Signature Version 4\n authentication.

", + "smithy.api#documentation": "

Allows you to confirm that the attachment has been uploaded using the pre-signed URL\n provided in StartAttachmentUpload API. A conflict exception is thrown when an attachment\n with that identifier is already being uploaded.

\n

For security recommendations, see Amazon Connect Chat security best practices.

\n \n

\n ConnectionToken is used for invoking this API instead of\n ParticipantToken.

\n
\n

The Amazon Connect Participant Service APIs do not use Signature Version 4\n authentication.

", "smithy.api#http": { "method": "POST", "uri": "/participant/complete-attachment-upload", @@ -1077,7 +1153,7 @@ } }, "traits": { - "smithy.api#documentation": "

The requested operation conflicts with the current state of a service\n resource associated with the request.

", + "smithy.api#documentation": "

The requested operation conflicts with the current state of a service resource\n associated with the request.

", "smithy.api#error": "client", "smithy.api#httpError": 409 } @@ -1171,7 +1247,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates the participant's connection.

\n \n

\n ParticipantToken is used for invoking this API instead of\n ConnectionToken.

\n
\n

The participant token is valid for the lifetime of the participant – until they are\n part of a contact.

\n

The response URL for WEBSOCKET Type has a connect expiry timeout of 100s.\n Clients must manually connect to the returned websocket URL and subscribe to the desired\n topic.

\n

For chat, you need to publish the following on the established websocket\n connection:

\n

\n {\"topic\":\"aws/subscribe\",\"content\":{\"topics\":[\"aws/chat\"]}}\n

\n

Upon websocket URL expiry, as specified in the response ConnectionExpiry parameter,\n clients need to call this API again to obtain a new websocket URL and perform the same\n steps as before.

\n

\n Message streaming support: This API can also be used\n together with the StartContactStreaming API to create a participant connection for chat\n contacts that are not using a websocket. For more information about message streaming,\n Enable real-time chat\n message streaming in the Amazon Connect Administrator\n Guide.

\n

\n Feature specifications: For information about feature\n specifications, such as the allowed number of open websocket connections per\n participant, see Feature specifications in the Amazon Connect Administrator\n Guide.

\n \n

The Amazon Connect Participant Service APIs do not use Signature Version 4\n authentication.

\n
", + "smithy.api#documentation": "

Creates the participant's connection.

\n

For security recommendations, see Amazon Connect Chat security best practices.

\n \n

\n ParticipantToken is used for invoking this API instead of\n ConnectionToken.

\n
\n

The participant token is valid for the lifetime of the participant – until they are\n part of a contact.

\n

The response URL for WEBSOCKET Type has a connect expiry timeout of 100s.\n Clients must manually connect to the returned websocket URL and subscribe to the desired\n topic.

\n

For chat, you need to publish the following on the established websocket\n connection:

\n

\n {\"topic\":\"aws/subscribe\",\"content\":{\"topics\":[\"aws/chat\"]}}\n

\n

Upon websocket URL expiry, as specified in the response ConnectionExpiry parameter,\n clients need to call this API again to obtain a new websocket URL and perform the same\n steps as before.

\n

\n Message streaming support: This API can also be used\n together with the StartContactStreaming API to create a participant connection for chat\n contacts that are not using a websocket. For more information about message streaming,\n Enable real-time chat\n message streaming in the Amazon Connect Administrator\n Guide.

\n

\n Feature specifications: For information about feature\n specifications, such as the allowed number of open websocket connections per\n participant, see Feature specifications in the Amazon Connect Administrator\n Guide.

\n \n

The Amazon Connect Participant Service APIs do not use Signature Version 4\n authentication.

\n
", "smithy.api#http": { "method": "POST", "uri": "/participant/connection", @@ -1253,7 +1329,7 @@ } ], "traits": { - "smithy.api#documentation": "

Retrieves the view for the specified view token.

", + "smithy.api#documentation": "

Retrieves the view for the specified view token.

\n

For security recommendations, see Amazon Connect Chat security best practices.

", "smithy.api#http": { "method": "GET", "uri": "/participant/views/{ViewToken}", @@ -1322,7 +1398,7 @@ } ], "traits": { - "smithy.api#documentation": "

Disconnects a participant.

\n \n

\n ConnectionToken is used for invoking this API instead of\n ParticipantToken.

\n
\n

The Amazon Connect Participant Service APIs do not use Signature Version 4\n authentication.

", + "smithy.api#documentation": "

Disconnects a participant.

\n

For security recommendations, see Amazon Connect Chat security best practices.

\n \n

\n ConnectionToken is used for invoking this API instead of\n ParticipantToken.

\n
\n

The Amazon Connect Participant Service APIs do not use Signature Version 4\n authentication.

", "smithy.api#http": { "method": "POST", "uri": "/participant/disconnect", @@ -1392,7 +1468,7 @@ } ], "traits": { - "smithy.api#documentation": "

Provides a pre-signed URL for download of a completed attachment. This is an\n asynchronous API for use with active contacts.

\n \n

\n ConnectionToken is used for invoking this API instead of\n ParticipantToken.

\n
\n

The Amazon Connect Participant Service APIs do not use Signature Version 4\n authentication.

", + "smithy.api#documentation": "

Provides a pre-signed URL for download of a completed attachment. This is an\n asynchronous API for use with active contacts.

\n

For security recommendations, see Amazon Connect Chat security best practices.

\n \n

\n ConnectionToken is used for invoking this API instead of\n ParticipantToken.

\n
\n

The Amazon Connect Participant Service APIs do not use Signature Version 4\n authentication.

", "smithy.api#http": { "method": "POST", "uri": "/participant/attachment", @@ -1417,6 +1493,12 @@ "smithy.api#httpHeader": "X-Amz-Bearer", "smithy.api#required": {} } + }, + "UrlExpiryInSeconds": { + "target": "com.amazonaws.connectparticipant#URLExpiryInSeconds", + "traits": { + "smithy.api#documentation": "

The expiration time of the URL in ISO timestamp. It's specified in ISO 8601 format:\n yyyy-MM-ddThh:mm:ss.SSSZ. For example, 2019-11-08T02:41:28.172Z.

" + } } }, "traits": { @@ -1437,6 +1519,89 @@ "traits": { "smithy.api#documentation": "

The expiration time of the URL in ISO timestamp. It's specified in ISO 8601 format: yyyy-MM-ddThh:mm:ss.SSSZ. For example, 2019-11-08T02:41:28.172Z.

" } + }, + "AttachmentSizeInBytes": { + "target": "com.amazonaws.connectparticipant#AttachmentSizeInBytes", + "traits": { + "smithy.api#default": null, + "smithy.api#documentation": "

The size of the attachment in bytes.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.connectparticipant#GetAuthenticationUrl": { + "type": "operation", + "input": { + "target": "com.amazonaws.connectparticipant#GetAuthenticationUrlRequest" + }, + "output": { + "target": "com.amazonaws.connectparticipant#GetAuthenticationUrlResponse" + }, + "errors": [ + { + "target": "com.amazonaws.connectparticipant#AccessDeniedException" + }, + { + "target": "com.amazonaws.connectparticipant#InternalServerException" + }, + { + "target": "com.amazonaws.connectparticipant#ThrottlingException" + }, + { + "target": "com.amazonaws.connectparticipant#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Retrieves the AuthenticationUrl for the current authentication session for the\n AuthenticateCustomer flow block.

\n

For security recommendations, see Amazon Connect Chat security best practices.

\n \n
    \n
  • \n

    This API can only be called within one minute of receiving the\n authenticationInitiated event.

    \n
  • \n
  • \n

    The current supported channel is chat. This API is not supported for Apple\n Messages for Business, WhatsApp, or SMS chats.

    \n
  • \n
\n
", + "smithy.api#http": { + "method": "POST", + "uri": "/participant/authentication-url", + "code": 200 + } + } + }, + "com.amazonaws.connectparticipant#GetAuthenticationUrlRequest": { + "type": "structure", + "members": { + "SessionId": { + "target": "com.amazonaws.connectparticipant#SessionId", + "traits": { + "smithy.api#documentation": "

The sessionId provided in the authenticationInitiated event.

", + "smithy.api#required": {} + } + }, + "RedirectUri": { + "target": "com.amazonaws.connectparticipant#RedirectURI", + "traits": { + "smithy.api#documentation": "

The URL where the customer will be redirected after Amazon Cognito authorizes the\n user.

", + "smithy.api#required": {} + } + }, + "ConnectionToken": { + "target": "com.amazonaws.connectparticipant#ParticipantToken", + "traits": { + "smithy.api#documentation": "

The authentication token associated with the participant's connection.

", + "smithy.api#httpHeader": "X-Amz-Bearer", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.connectparticipant#GetAuthenticationUrlResponse": { + "type": "structure", + "members": { + "AuthenticationUrl": { + "target": "com.amazonaws.connectparticipant#AuthenticationUrl", + "traits": { + "smithy.api#documentation": "

The URL where the customer will sign in to the identity provider. This URL contains\n the authorize endpoint for the Cognito UserPool used in the authentication.

" + } } }, "traits": { @@ -1466,7 +1631,7 @@ } ], "traits": { - "smithy.api#documentation": "

Retrieves a transcript of the session, including details about any attachments. For\n information about accessing past chat contact transcripts for a persistent chat, see\n Enable persistent chat.

\n

If you have a process that consumes events in the transcript of an chat that has ended, note that chat\n transcripts contain the following event content types if the event has occurred\n during the chat session:

\n
    \n
  • \n

    \n application/vnd.amazonaws.connect.event.participant.left\n

    \n
  • \n
  • \n

    \n application/vnd.amazonaws.connect.event.participant.joined\n

    \n
  • \n
  • \n

    \n application/vnd.amazonaws.connect.event.chat.ended\n

    \n
  • \n
  • \n

    \n application/vnd.amazonaws.connect.event.transfer.succeeded\n

    \n
  • \n
  • \n

    \n application/vnd.amazonaws.connect.event.transfer.failed\n

    \n
  • \n
\n \n

\n ConnectionToken is used for invoking this API instead of\n ParticipantToken.

\n
\n

The Amazon Connect Participant Service APIs do not use Signature Version 4\n authentication.

", + "smithy.api#documentation": "

Retrieves a transcript of the session, including details about any attachments. For\n information about accessing past chat contact transcripts for a persistent chat, see\n Enable persistent chat.

\n

For security recommendations, see Amazon Connect Chat security best practices.

\n

If you have a process that consumes events in the transcript of an chat that has\n ended, note that chat transcripts contain the following event content types if the event\n has occurred during the chat session:

\n
    \n
  • \n

    \n application/vnd.amazonaws.connect.event.participant.left\n

    \n
  • \n
  • \n

    \n application/vnd.amazonaws.connect.event.participant.joined\n

    \n
  • \n
  • \n

    \n application/vnd.amazonaws.connect.event.chat.ended\n

    \n
  • \n
  • \n

    \n application/vnd.amazonaws.connect.event.transfer.succeeded\n

    \n
  • \n
  • \n

    \n application/vnd.amazonaws.connect.event.transfer.failed\n

    \n
  • \n
\n \n

\n ConnectionToken is used for invoking this API instead of\n ParticipantToken.

\n
\n

The Amazon Connect Participant Service APIs do not use Signature Version 4\n authentication.

", "smithy.api#http": { "method": "POST", "uri": "/participant/transcript", @@ -1839,6 +2004,15 @@ "target": "com.amazonaws.connectparticipant#Receipt" } }, + "com.amazonaws.connectparticipant#RedirectURI": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 1024 + } + } + }, "com.amazonaws.connectparticipant#ResourceId": { "type": "string" }, @@ -1963,7 +2137,7 @@ } ], "traits": { - "smithy.api#documentation": "\n

The application/vnd.amazonaws.connect.event.connection.acknowledged\n ContentType will no longer be supported starting December 31, 2024. This event has\n been migrated to the CreateParticipantConnection API using the\n ConnectParticipant field.

\n
\n

Sends an event. Message receipts are not supported when there are more than two active\n participants in the chat. Using the SendEvent API for message receipts when a supervisor\n is barged-in will result in a conflict exception.

\n \n

\n ConnectionToken is used for invoking this API instead of\n ParticipantToken.

\n
\n

The Amazon Connect Participant Service APIs do not use Signature Version 4\n authentication.

", + "smithy.api#documentation": "\n

The application/vnd.amazonaws.connect.event.connection.acknowledged\n ContentType will no longer be supported starting December 31, 2024. This event has\n been migrated to the CreateParticipantConnection API using the\n ConnectParticipant field.

\n
\n

Sends an event. Message receipts are not supported when there are more than two active\n participants in the chat. Using the SendEvent API for message receipts when a supervisor\n is barged-in will result in a conflict exception.

\n

For security recommendations, see Amazon Connect Chat security best practices.

\n \n

\n ConnectionToken is used for invoking this API instead of\n ParticipantToken.

\n
\n

The Amazon Connect Participant Service APIs do not use Signature Version 4\n authentication.

", "smithy.api#http": { "method": "POST", "uri": "/participant/event", @@ -2050,7 +2224,7 @@ } ], "traits": { - "smithy.api#documentation": "

Sends a message.

\n \n

\n ConnectionToken is used for invoking this API instead of\n ParticipantToken.

\n
\n

The Amazon Connect Participant Service APIs do not use Signature Version 4\n authentication.

", + "smithy.api#documentation": "

Sends a message.

\n

For security recommendations, see Amazon Connect Chat security best practices.

\n \n

\n ConnectionToken is used for invoking this API instead of\n ParticipantToken.

\n
\n

The Amazon Connect Participant Service APIs do not use Signature Version 4\n authentication.

", "smithy.api#http": { "method": "POST", "uri": "/participant/message", @@ -2131,6 +2305,15 @@ "smithy.api#httpError": 402 } }, + "com.amazonaws.connectparticipant#SessionId": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 36, + "max": 36 + } + } + }, "com.amazonaws.connectparticipant#SortKey": { "type": "enum", "members": { @@ -2174,7 +2357,7 @@ } ], "traits": { - "smithy.api#documentation": "

Provides a pre-signed Amazon S3 URL in response for uploading the file directly to\n S3.

\n \n

\n ConnectionToken is used for invoking this API instead of\n ParticipantToken.

\n
\n

The Amazon Connect Participant Service APIs do not use Signature Version 4\n authentication.

", + "smithy.api#documentation": "

Provides a pre-signed Amazon S3 URL in response for uploading the file directly to\n S3.

\n

For security recommendations, see Amazon Connect Chat security best practices.

\n \n

\n ConnectionToken is used for invoking this API instead of\n ParticipantToken.

\n
\n

The Amazon Connect Participant Service APIs do not use Signature Version 4\n authentication.

", "smithy.api#http": { "method": "POST", "uri": "/participant/start-attachment-upload", @@ -2240,7 +2423,7 @@ "UploadMetadata": { "target": "com.amazonaws.connectparticipant#UploadMetadata", "traits": { - "smithy.api#documentation": "

Fields to be used while uploading the attachment.

" + "smithy.api#documentation": "

The headers to be provided while uploading the file to the URL.

" } } }, @@ -2297,6 +2480,15 @@ "target": "com.amazonaws.connectparticipant#Item" } }, + "com.amazonaws.connectparticipant#URLExpiryInSeconds": { + "type": "integer", + "traits": { + "smithy.api#range": { + "min": 5, + "max": 300 + } + } + }, "com.amazonaws.connectparticipant#UploadMetadata": { "type": "structure", "members": { diff --git a/models/cost-explorer.json b/models/cost-explorer.json index f85925cd1f..952662ae11 100644 --- a/models/cost-explorer.json +++ b/models/cost-explorer.json @@ -1529,6 +1529,16 @@ "smithy.api#error": "client" } }, + "com.amazonaws.costexplorer#BillingViewArn": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 20, + "max": 2048 + }, + "smithy.api#pattern": "^arn:aws[a-z-]*:(billing)::[0-9]{12}:billingview/[-a-zA-Z0-9/:_+=.-@]{1,43}$" + } + }, "com.amazonaws.costexplorer#CommitmentPurchaseAnalysisConfiguration": { "type": "structure", "members": { @@ -4288,6 +4298,9 @@ }, { "target": "com.amazonaws.costexplorer#RequestChangedException" + }, + { + "target": "com.amazonaws.costexplorer#ResourceNotFoundException" } ], "traits": { @@ -4330,6 +4343,12 @@ "smithy.api#documentation": "

You can group Amazon Web Services costs using up to two different groups, either\n dimensions, tag keys, cost categories, or any two group by types.

\n

Valid values for the DIMENSION type are AZ,\n INSTANCE_TYPE, LEGAL_ENTITY_NAME, INVOICING_ENTITY,\n LINKED_ACCOUNT, OPERATION, PLATFORM,\n PURCHASE_TYPE, SERVICE, TENANCY,\n RECORD_TYPE, and USAGE_TYPE.

\n

When you group by the TAG type and include a valid tag key, you get all\n tag values, including empty strings.

" } }, + "BillingViewArn": { + "target": "com.amazonaws.costexplorer#BillingViewArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) that uniquely identifies a specific billing view. The ARN\n is used to specify which particular billing view you want to interact with or retrieve\n information from when making API calls related to Amazon Web Services Billing and Cost\n Management features. The BillingViewArn can be retrieved by calling the ListBillingViews\n API.

" + } + }, "NextPageToken": { "target": "com.amazonaws.costexplorer#NextPageToken", "traits": { @@ -4396,6 +4415,9 @@ }, { "target": "com.amazonaws.costexplorer#RequestChangedException" + }, + { + "target": "com.amazonaws.costexplorer#ResourceNotFoundException" } ], "traits": { @@ -4438,6 +4460,12 @@ "smithy.api#documentation": "

You can group Amazon Web Services costs using up to two different groups:\n DIMENSION, TAG, COST_CATEGORY.

" } }, + "BillingViewArn": { + "target": "com.amazonaws.costexplorer#BillingViewArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) that uniquely identifies a specific billing view. The ARN\n is used to specify which particular billing view you want to interact with or retrieve\n information from when making API calls related to Amazon Web Services Billing and Cost\n Management features. The BillingViewArn can be retrieved by calling the ListBillingViews\n API.

" + } + }, "NextPageToken": { "target": "com.amazonaws.costexplorer#NextPageToken", "traits": { @@ -4504,6 +4532,9 @@ }, { "target": "com.amazonaws.costexplorer#RequestChangedException" + }, + { + "target": "com.amazonaws.costexplorer#ResourceNotFoundException" } ], "traits": { @@ -4537,6 +4568,12 @@ "smithy.api#documentation": "

The value that you sort the data by.

\n

The key represents the cost and usage metrics. The following values are supported:

\n
    \n
  • \n

    \n BlendedCost\n

    \n
  • \n
  • \n

    \n UnblendedCost\n

    \n
  • \n
  • \n

    \n AmortizedCost\n

    \n
  • \n
  • \n

    \n NetAmortizedCost\n

    \n
  • \n
  • \n

    \n NetUnblendedCost\n

    \n
  • \n
  • \n

    \n UsageQuantity\n

    \n
  • \n
  • \n

    \n NormalizedUsageAmount\n

    \n
  • \n
\n

The supported key values for the SortOrder value are ASCENDING\n and DESCENDING.

\n

When you use the SortBy value, the NextPageToken and\n SearchString key values aren't supported.

" } }, + "BillingViewArn": { + "target": "com.amazonaws.costexplorer#BillingViewArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) that uniquely identifies a specific billing view. The ARN\n is used to specify which particular billing view you want to interact with or retrieve\n information from when making API calls related to Amazon Web Services Billing and Cost\n Management features. The BillingViewArn can be retrieved by calling the ListBillingViews\n API.

" + } + }, "MaxResults": { "target": "com.amazonaws.costexplorer#MaxResults", "traits": { @@ -4608,6 +4645,9 @@ }, { "target": "com.amazonaws.costexplorer#LimitExceededException" + }, + { + "target": "com.amazonaws.costexplorer#ResourceNotFoundException" } ], "traits": { @@ -4644,6 +4684,12 @@ "smithy.api#documentation": "

The filters that you want to use to filter your forecast. The\n GetCostForecast API supports filtering by the following dimensions:

\n
    \n
  • \n

    \n AZ\n

    \n
  • \n
  • \n

    \n INSTANCE_TYPE\n

    \n
  • \n
  • \n

    \n LINKED_ACCOUNT\n

    \n
  • \n
  • \n

    \n LINKED_ACCOUNT_NAME\n

    \n
  • \n
  • \n

    \n OPERATION\n

    \n
  • \n
  • \n

    \n PURCHASE_TYPE\n

    \n
  • \n
  • \n

    \n REGION\n

    \n
  • \n
  • \n

    \n SERVICE\n

    \n
  • \n
  • \n

    \n USAGE_TYPE\n

    \n
  • \n
  • \n

    \n USAGE_TYPE_GROUP\n

    \n
  • \n
  • \n

    \n RECORD_TYPE\n

    \n
  • \n
  • \n

    \n OPERATING_SYSTEM\n

    \n
  • \n
  • \n

    \n TENANCY\n

    \n
  • \n
  • \n

    \n SCOPE\n

    \n
  • \n
  • \n

    \n PLATFORM\n

    \n
  • \n
  • \n

    \n SUBSCRIPTION_ID\n

    \n
  • \n
  • \n

    \n LEGAL_ENTITY_NAME\n

    \n
  • \n
  • \n

    \n DEPLOYMENT_OPTION\n

    \n
  • \n
  • \n

    \n DATABASE_ENGINE\n

    \n
  • \n
  • \n

    \n INSTANCE_TYPE_FAMILY\n

    \n
  • \n
  • \n

    \n BILLING_ENTITY\n

    \n
  • \n
  • \n

    \n RESERVATION_ID\n

    \n
  • \n
  • \n

    \n SAVINGS_PLAN_ARN\n

    \n
  • \n
" } }, + "BillingViewArn": { + "target": "com.amazonaws.costexplorer#BillingViewArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) that uniquely identifies a specific billing view. The ARN\n is used to specify which particular billing view you want to interact with or retrieve\n information from when making API calls related to Amazon Web Services Billing and Cost\n Management features. The BillingViewArn can be retrieved by calling the ListBillingViews\n API.

" + } + }, "PredictionIntervalLevel": { "target": "com.amazonaws.costexplorer#PredictionIntervalLevel", "traits": { @@ -4698,6 +4744,9 @@ }, { "target": "com.amazonaws.costexplorer#RequestChangedException" + }, + { + "target": "com.amazonaws.costexplorer#ResourceNotFoundException" } ], "traits": { @@ -4742,6 +4791,12 @@ "smithy.api#documentation": "

The value that you want to sort the data by.

\n

The key represents cost and usage metrics. The following values are supported:

\n
    \n
  • \n

    \n BlendedCost\n

    \n
  • \n
  • \n

    \n UnblendedCost\n

    \n
  • \n
  • \n

    \n AmortizedCost\n

    \n
  • \n
  • \n

    \n NetAmortizedCost\n

    \n
  • \n
  • \n

    \n NetUnblendedCost\n

    \n
  • \n
  • \n

    \n UsageQuantity\n

    \n
  • \n
  • \n

    \n NormalizedUsageAmount\n

    \n
  • \n
\n

The supported values for the SortOrder key are ASCENDING or\n DESCENDING.

\n

When you specify a SortBy paramater, the context must be\n COST_AND_USAGE. Further, when using SortBy,\n NextPageToken and SearchString aren't supported.

" } }, + "BillingViewArn": { + "target": "com.amazonaws.costexplorer#BillingViewArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) that uniquely identifies a specific billing view. The ARN\n is used to specify which particular billing view you want to interact with or retrieve\n information from when making API calls related to Amazon Web Services Billing and Cost\n Management features. The BillingViewArn can be retrieved by calling the ListBillingViews\n API.

" + } + }, "MaxResults": { "target": "com.amazonaws.costexplorer#MaxResults", "traits": { @@ -5685,6 +5740,9 @@ }, { "target": "com.amazonaws.costexplorer#RequestChangedException" + }, + { + "target": "com.amazonaws.costexplorer#ResourceNotFoundException" } ], "traits": { @@ -5722,6 +5780,12 @@ "smithy.api#documentation": "

The value that you want to sort the data by.

\n

The key represents cost and usage metrics. The following values are supported:

\n
    \n
  • \n

    \n BlendedCost\n

    \n
  • \n
  • \n

    \n UnblendedCost\n

    \n
  • \n
  • \n

    \n AmortizedCost\n

    \n
  • \n
  • \n

    \n NetAmortizedCost\n

    \n
  • \n
  • \n

    \n NetUnblendedCost\n

    \n
  • \n
  • \n

    \n UsageQuantity\n

    \n
  • \n
  • \n

    \n NormalizedUsageAmount\n

    \n
  • \n
\n

The supported values for SortOrder are ASCENDING and\n DESCENDING.

\n

When you use SortBy, NextPageToken and SearchString\n aren't supported.

" } }, + "BillingViewArn": { + "target": "com.amazonaws.costexplorer#BillingViewArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) that uniquely identifies a specific billing view. The ARN\n is used to specify which particular billing view you want to interact with or retrieve\n information from when making API calls related to Amazon Web Services Billing and Cost\n Management features. The BillingViewArn can be retrieved by calling the ListBillingViews\n API.

" + } + }, "MaxResults": { "target": "com.amazonaws.costexplorer#MaxResults", "traits": { @@ -5789,6 +5853,9 @@ { "target": "com.amazonaws.costexplorer#LimitExceededException" }, + { + "target": "com.amazonaws.costexplorer#ResourceNotFoundException" + }, { "target": "com.amazonaws.costexplorer#UnresolvableUsageUnitException" } @@ -5827,6 +5894,12 @@ "smithy.api#documentation": "

The filters that you want to use to filter your forecast. The\n GetUsageForecast API supports filtering by the following dimensions:

\n
    \n
  • \n

    \n AZ\n

    \n
  • \n
  • \n

    \n INSTANCE_TYPE\n

    \n
  • \n
  • \n

    \n LINKED_ACCOUNT\n

    \n
  • \n
  • \n

    \n LINKED_ACCOUNT_NAME\n

    \n
  • \n
  • \n

    \n OPERATION\n

    \n
  • \n
  • \n

    \n PURCHASE_TYPE\n

    \n
  • \n
  • \n

    \n REGION\n

    \n
  • \n
  • \n

    \n SERVICE\n

    \n
  • \n
  • \n

    \n USAGE_TYPE\n

    \n
  • \n
  • \n

    \n USAGE_TYPE_GROUP\n

    \n
  • \n
  • \n

    \n RECORD_TYPE\n

    \n
  • \n
  • \n

    \n OPERATING_SYSTEM\n

    \n
  • \n
  • \n

    \n TENANCY\n

    \n
  • \n
  • \n

    \n SCOPE\n

    \n
  • \n
  • \n

    \n PLATFORM\n

    \n
  • \n
  • \n

    \n SUBSCRIPTION_ID\n

    \n
  • \n
  • \n

    \n LEGAL_ENTITY_NAME\n

    \n
  • \n
  • \n

    \n DEPLOYMENT_OPTION\n

    \n
  • \n
  • \n

    \n DATABASE_ENGINE\n

    \n
  • \n
  • \n

    \n INSTANCE_TYPE_FAMILY\n

    \n
  • \n
  • \n

    \n BILLING_ENTITY\n

    \n
  • \n
  • \n

    \n RESERVATION_ID\n

    \n
  • \n
  • \n

    \n SAVINGS_PLAN_ARN\n

    \n
  • \n
" } }, + "BillingViewArn": { + "target": "com.amazonaws.costexplorer#BillingViewArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) that uniquely identifies a specific billing view. The ARN\n is used to specify which particular billing view you want to interact with or retrieve\n information from when making API calls related to Amazon Web Services Billing and Cost\n Management features. The BillingViewArn can be retrieved by calling the ListBillingViews\n API.

" + } + }, "PredictionIntervalLevel": { "target": "com.amazonaws.costexplorer#PredictionIntervalLevel", "traits": { diff --git a/models/datasync.json b/models/datasync.json index 4f914e3337..a789c12628 100644 --- a/models/datasync.json +++ b/models/datasync.json @@ -626,7 +626,7 @@ "Subdirectory": { "target": "com.amazonaws.datasync#EfsSubdirectory", "traits": { - "smithy.api#documentation": "

Specifies a mount path for your Amazon EFS file system. This is where DataSync reads or writes data (depending on if this is a source or destination location)\n on your file system.

\n

By default, DataSync uses the root directory (or access point if you provide one by using\n AccessPointArn). You can also include subdirectories using forward slashes (for\n example, /path/to/folder).

" + "smithy.api#documentation": "

Specifies a mount path for your Amazon EFS file system. This is where DataSync reads or writes data on your file system (depending on if this is a source or destination location).

\n

By default, DataSync uses the root directory (or access point if you provide one by using\n AccessPointArn). You can also include subdirectories using forward slashes (for\n example, /path/to/folder).

" } }, "EfsFilesystemArn": { @@ -714,27 +714,27 @@ "FsxFilesystemArn": { "target": "com.amazonaws.datasync#FsxFilesystemArn", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) for the FSx for Lustre file system.

", + "smithy.api#documentation": "

Specifies the Amazon Resource Name (ARN) of the FSx for Lustre file system.

", "smithy.api#required": {} } }, "SecurityGroupArns": { "target": "com.amazonaws.datasync#Ec2SecurityGroupArnList", "traits": { - "smithy.api#documentation": "

The Amazon Resource Names (ARNs) of the security groups that are used to configure the\n FSx for Lustre file system.

", + "smithy.api#documentation": "

Specifies the Amazon Resource Names (ARNs) of up to five security groups that provide access to your\n FSx for Lustre file system.

\n

The security groups must be able to access the file system's ports. The file system must\n also allow access from the security groups. For information about file system access, see the\n \n Amazon FSx for Lustre User Guide\n .

", "smithy.api#required": {} } }, "Subdirectory": { "target": "com.amazonaws.datasync#FsxLustreSubdirectory", "traits": { - "smithy.api#documentation": "

A subdirectory in the location's path. This subdirectory in the FSx for Lustre\n file system is used to read data from the FSx for Lustre source location or write\n data to the FSx for Lustre destination.

" + "smithy.api#documentation": "

Specifies a mount path for your FSx for Lustre file system. The path can include subdirectories.

\n

When the location is used as a source, DataSync reads data from the mount path. When the location is used as a destination, DataSync writes data to the mount path. If you don't include this parameter, DataSync uses the file system's root directory (/).

" } }, "Tags": { "target": "com.amazonaws.datasync#InputTagList", "traits": { - "smithy.api#documentation": "

The key-value pair that represents a tag that you want to add to the resource. The value\n can be an empty string. This value helps you manage, filter, and search for your resources. We\n recommend that you create a name tag for your location.

" + "smithy.api#documentation": "

Specifies labels that help you categorize, filter, and search for your Amazon Web Services resources. We recommend creating at least a name tag for your location.

" } } }, @@ -748,7 +748,7 @@ "LocationArn": { "target": "com.amazonaws.datasync#LocationArn", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the FSx for Lustre file system location that's\n created.

" + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the FSx for Lustre file system location that\n you created.

" } } }, @@ -802,7 +802,7 @@ "Subdirectory": { "target": "com.amazonaws.datasync#FsxOntapSubdirectory", "traits": { - "smithy.api#documentation": "

Specifies a path to the file share in the SVM where you'll copy your data.

\n

You can specify a junction path (also known as a mount point), qtree path (for NFS file\n shares), or share name (for SMB file shares). For example, your mount path might be\n /vol1, /vol1/tree1, or /share1.

\n \n

Don't specify a junction path in the SVM's root volume. For more information, see Managing FSx for ONTAP storage virtual machines in the Amazon FSx for NetApp ONTAP User Guide.

\n
" + "smithy.api#documentation": "

Specifies a path to the file share in the SVM where you want to transfer data to or from.

\n

You can specify a junction path (also known as a mount point), qtree path (for NFS file\n shares), or share name (for SMB file shares). For example, your mount path might be\n /vol1, /vol1/tree1, or /share1.

\n \n

Don't specify a junction path in the SVM's root volume. For more information, see Managing FSx for ONTAP storage virtual machines in the Amazon FSx for NetApp ONTAP User Guide.

\n
" } }, "Tags": { @@ -964,7 +964,7 @@ "Domain": { "target": "com.amazonaws.datasync#SmbDomain", "traits": { - "smithy.api#documentation": "

Specifies the name of the Microsoft Active Directory domain that the FSx for Windows File Server file system belongs to.

\n

If you have multiple Active Directory domains in your environment, configuring this\n parameter makes sure that DataSync connects to the right file system.

" + "smithy.api#documentation": "

Specifies the name of the Windows domain that the FSx for Windows File Server file system belongs to.

\n

If you have multiple Active Directory domains in your environment, configuring this\n parameter makes sure that DataSync connects to the right file system.

" } }, "Password": { @@ -1133,7 +1133,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates a transfer location for a Network File System (NFS) file\n server. DataSync can use this location as a source or destination for\n transferring data.

\n

Before you begin, make sure that you understand how DataSync\n accesses\n NFS file servers.

\n \n

If you're copying data to or from an Snowcone device, you can also use\n CreateLocationNfs to create your transfer location. For more information, see\n Configuring transfers with Snowcone.

\n
" + "smithy.api#documentation": "

Creates a transfer location for a Network File System (NFS) file\n server. DataSync can use this location as a source or destination for\n transferring data.

\n

Before you begin, make sure that you understand how DataSync\n accesses\n NFS file servers.

" } }, "com.amazonaws.datasync#CreateLocationNfsRequest": { @@ -4112,6 +4112,21 @@ { "target": "com.amazonaws.datasync#UpdateLocationAzureBlob" }, + { + "target": "com.amazonaws.datasync#UpdateLocationEfs" + }, + { + "target": "com.amazonaws.datasync#UpdateLocationFsxLustre" + }, + { + "target": "com.amazonaws.datasync#UpdateLocationFsxOntap" + }, + { + "target": "com.amazonaws.datasync#UpdateLocationFsxOpenZfs" + }, + { + "target": "com.amazonaws.datasync#UpdateLocationFsxWindows" + }, { "target": "com.amazonaws.datasync#UpdateLocationHdfs" }, @@ -4121,6 +4136,9 @@ { "target": "com.amazonaws.datasync#UpdateLocationObjectStorage" }, + { + "target": "com.amazonaws.datasync#UpdateLocationS3" + }, { "target": "com.amazonaws.datasync#UpdateLocationSmb" }, @@ -5197,7 +5215,7 @@ } }, "traits": { - "smithy.api#documentation": "

Specifies the Network File System (NFS) protocol configuration that DataSync\n uses to access your Amazon FSx for OpenZFS or Amazon FSx for NetApp ONTAP file\n system.

" + "smithy.api#documentation": "

Specifies the Network File System (NFS) protocol configuration that DataSync\n uses to access your FSx for OpenZFS file system or FSx for ONTAP file\n system's storage virtual machine (SVM).

" } }, "com.amazonaws.datasync#FsxProtocolSmb": { @@ -5206,7 +5224,7 @@ "Domain": { "target": "com.amazonaws.datasync#SmbDomain", "traits": { - "smithy.api#documentation": "

Specifies the fully qualified domain name (FQDN) of the Microsoft Active Directory that\n your storage virtual machine (SVM) belongs to.

\n

If you have multiple domains in your environment, configuring this setting makes sure that\n DataSync connects to the right SVM.

" + "smithy.api#documentation": "

Specifies the name of the Windows domain that your storage virtual machine (SVM) belongs to.

\n

If you have multiple domains in your environment, configuring this setting makes sure that\n DataSync connects to the right SVM.

\n

If you have multiple Active Directory domains in your environment, configuring this parameter makes sure that DataSync connects to the right SVM.

" } }, "MountOptions": { @@ -5228,7 +5246,63 @@ } }, "traits": { - "smithy.api#documentation": "

Specifies the Server Message Block (SMB) protocol configuration that DataSync uses to access your Amazon FSx for NetApp ONTAP file system. For more information, see\n Accessing FSx for ONTAP file systems.

" + "smithy.api#documentation": "

Specifies the Server Message Block (SMB) protocol configuration that DataSync uses to access your Amazon FSx for NetApp ONTAP file system's storage virtual machine (SVM). For more information, see\n Providing DataSync access to FSx for ONTAP file systems.

" + } + }, + "com.amazonaws.datasync#FsxUpdateProtocol": { + "type": "structure", + "members": { + "NFS": { + "target": "com.amazonaws.datasync#FsxProtocolNfs" + }, + "SMB": { + "target": "com.amazonaws.datasync#FsxUpdateProtocolSmb", + "traits": { + "smithy.api#documentation": "

Specifies the Server Message Block (SMB) protocol configuration that DataSync\n uses to access your FSx for ONTAP file system's storage virtual machine (SVM).

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Specifies the data transfer protocol that DataSync uses to access your\n Amazon FSx file system.

\n \n

You can't update the Network File System (NFS) protocol configuration for FSx for ONTAP locations. DataSync currently only supports NFS version 3 with this location type.

\n
" + } + }, + "com.amazonaws.datasync#FsxUpdateProtocolSmb": { + "type": "structure", + "members": { + "Domain": { + "target": "com.amazonaws.datasync#FsxUpdateSmbDomain", + "traits": { + "smithy.api#documentation": "

Specifies the name of the Windows domain that your storage virtual machine (SVM) belongs to.

\n

If you have multiple Active Directory domains in your environment, configuring this parameter makes sure that DataSync connects to the right SVM.

" + } + }, + "MountOptions": { + "target": "com.amazonaws.datasync#SmbMountOptions" + }, + "Password": { + "target": "com.amazonaws.datasync#SmbPassword", + "traits": { + "smithy.api#documentation": "

Specifies the password of a user who has permission to access your SVM.

" + } + }, + "User": { + "target": "com.amazonaws.datasync#SmbUser", + "traits": { + "smithy.api#documentation": "

Specifies a user that can mount and access the files, folders, and metadata in your SVM.

\n

For information about choosing a user with the right level of access for your transfer, see Using\n the SMB protocol.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Specifies the Server Message Block (SMB) protocol configuration that DataSync uses to access your Amazon FSx for NetApp ONTAP file system's storage virtual machine (SVM). For more information, see\n Providing DataSync access to FSx for ONTAP file systems.

" + } + }, + "com.amazonaws.datasync#FsxUpdateSmbDomain": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 253 + }, + "smithy.api#pattern": "^([A-Za-z0-9]((\\.|-+)?[A-Za-z0-9]){0,252})?$" } }, "com.amazonaws.datasync#FsxWindowsSubdirectory": { @@ -7750,7 +7824,7 @@ } }, "traits": { - "smithy.api#documentation": "

Specifies the Amazon Resource Name (ARN) of the Identity and Access Management (IAM) role that DataSync uses to access your S3 bucket.

\n

For more information, see Accessing\n S3 buckets.

" + "smithy.api#documentation": "

Specifies the Amazon Resource Name (ARN) of the Identity and Access Management (IAM) role that DataSync uses to access your S3 bucket.

\n

For more information, see Providing DataSync access to S3 buckets.

" } }, "com.amazonaws.datasync#S3ManifestConfig": { @@ -9174,7 +9248,7 @@ } ], "traits": { - "smithy.api#documentation": "

Modifies some configurations of the Microsoft Azure Blob Storage transfer location that you're using with DataSync.

" + "smithy.api#documentation": "

Modifies the following configurations of the Microsoft Azure Blob Storage transfer location that you're using with DataSync.

\n

For more information, see Configuring DataSync transfers with Azure Blob Storage.

" } }, "com.amazonaws.datasync#UpdateLocationAzureBlobRequest": { @@ -9235,6 +9309,291 @@ "smithy.api#output": {} } }, + "com.amazonaws.datasync#UpdateLocationEfs": { + "type": "operation", + "input": { + "target": "com.amazonaws.datasync#UpdateLocationEfsRequest" + }, + "output": { + "target": "com.amazonaws.datasync#UpdateLocationEfsResponse" + }, + "errors": [ + { + "target": "com.amazonaws.datasync#InternalException" + }, + { + "target": "com.amazonaws.datasync#InvalidRequestException" + } + ], + "traits": { + "smithy.api#documentation": "

Modifies the following configuration parameters of the Amazon EFS transfer location that you're using with DataSync.

\n

For more information, see Configuring DataSync transfers with Amazon EFS.

" + } + }, + "com.amazonaws.datasync#UpdateLocationEfsRequest": { + "type": "structure", + "members": { + "LocationArn": { + "target": "com.amazonaws.datasync#LocationArn", + "traits": { + "smithy.api#documentation": "

Specifies the Amazon Resource Name (ARN) of the Amazon EFS transfer location that you're updating.

", + "smithy.api#required": {} + } + }, + "Subdirectory": { + "target": "com.amazonaws.datasync#EfsSubdirectory", + "traits": { + "smithy.api#documentation": "

Specifies a mount path for your Amazon EFS file system. This is where DataSync reads or writes data on your file system (depending on if this is a source or destination location).

\n

By default, DataSync uses the root directory (or access point if you provide one by using\n AccessPointArn). You can also include subdirectories using forward slashes (for\n example, /path/to/folder).

" + } + }, + "AccessPointArn": { + "target": "com.amazonaws.datasync#UpdatedEfsAccessPointArn", + "traits": { + "smithy.api#documentation": "

Specifies the Amazon Resource Name (ARN) of the access point that DataSync uses\n to mount your Amazon EFS file system.

\n

For more information, see Accessing restricted Amazon EFS file systems.

" + } + }, + "FileSystemAccessRoleArn": { + "target": "com.amazonaws.datasync#UpdatedEfsIamRoleArn", + "traits": { + "smithy.api#documentation": "

Specifies an Identity and Access Management (IAM) role that allows DataSync to access your Amazon EFS file system.

\n

For information on creating this role, see Creating a DataSync IAM role for Amazon EFS file system access.

" + } + }, + "InTransitEncryption": { + "target": "com.amazonaws.datasync#EfsInTransitEncryption", + "traits": { + "smithy.api#documentation": "

Specifies whether you want DataSync to use Transport Layer Security (TLS) 1.2\n encryption when it transfers data to or from your Amazon EFS file system.

\n

If you specify an access point using AccessPointArn or an IAM\n role using FileSystemAccessRoleArn, you must set this parameter to\n TLS1_2.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.datasync#UpdateLocationEfsResponse": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.datasync#UpdateLocationFsxLustre": { + "type": "operation", + "input": { + "target": "com.amazonaws.datasync#UpdateLocationFsxLustreRequest" + }, + "output": { + "target": "com.amazonaws.datasync#UpdateLocationFsxLustreResponse" + }, + "errors": [ + { + "target": "com.amazonaws.datasync#InternalException" + }, + { + "target": "com.amazonaws.datasync#InvalidRequestException" + } + ], + "traits": { + "smithy.api#documentation": "

Modifies the following configuration parameters of the Amazon FSx for Lustre transfer location that you're using with DataSync.

\n

For more information, see Configuring DataSync transfers with FSx for Lustre.

" + } + }, + "com.amazonaws.datasync#UpdateLocationFsxLustreRequest": { + "type": "structure", + "members": { + "LocationArn": { + "target": "com.amazonaws.datasync#LocationArn", + "traits": { + "smithy.api#documentation": "

Specifies the Amazon Resource Name (ARN) of the FSx for Lustre transfer location that you're updating.

", + "smithy.api#required": {} + } + }, + "Subdirectory": { + "target": "com.amazonaws.datasync#SmbSubdirectory", + "traits": { + "smithy.api#documentation": "

Specifies a mount path for your FSx for Lustre file system. The path can include subdirectories.

\n

When the location is used as a source, DataSync reads data from the mount path. When the location is used as a destination, DataSync writes data to the mount path. If you don't include this parameter, DataSync uses the file system's root directory (/).

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.datasync#UpdateLocationFsxLustreResponse": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.datasync#UpdateLocationFsxOntap": { + "type": "operation", + "input": { + "target": "com.amazonaws.datasync#UpdateLocationFsxOntapRequest" + }, + "output": { + "target": "com.amazonaws.datasync#UpdateLocationFsxOntapResponse" + }, + "errors": [ + { + "target": "com.amazonaws.datasync#InternalException" + }, + { + "target": "com.amazonaws.datasync#InvalidRequestException" + } + ], + "traits": { + "smithy.api#documentation": "

Modifies the following configuration parameters of the Amazon FSx for NetApp ONTAP transfer location that you're using with DataSync.

\n

For more information, see Configuring DataSync transfers with FSx for ONTAP.

" + } + }, + "com.amazonaws.datasync#UpdateLocationFsxOntapRequest": { + "type": "structure", + "members": { + "LocationArn": { + "target": "com.amazonaws.datasync#LocationArn", + "traits": { + "smithy.api#documentation": "

Specifies the Amazon Resource Name (ARN) of the FSx for ONTAP transfer location that you're updating.

", + "smithy.api#required": {} + } + }, + "Protocol": { + "target": "com.amazonaws.datasync#FsxUpdateProtocol", + "traits": { + "smithy.api#documentation": "

Specifies the data transfer protocol that DataSync uses to access your Amazon FSx file system.

" + } + }, + "Subdirectory": { + "target": "com.amazonaws.datasync#FsxOntapSubdirectory", + "traits": { + "smithy.api#documentation": "

Specifies a path to the file share in the storage virtual machine (SVM) where you want to transfer data to or from.

\n

You can specify a junction path (also known as a mount point), qtree path (for NFS file\n shares), or share name (for SMB file shares). For example, your mount path might be\n /vol1, /vol1/tree1, or /share1.

\n \n

Don't specify a junction path in the SVM's root volume. For more information, see Managing FSx for ONTAP storage virtual machines in the Amazon FSx for NetApp ONTAP User Guide.

\n
" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.datasync#UpdateLocationFsxOntapResponse": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.datasync#UpdateLocationFsxOpenZfs": { + "type": "operation", + "input": { + "target": "com.amazonaws.datasync#UpdateLocationFsxOpenZfsRequest" + }, + "output": { + "target": "com.amazonaws.datasync#UpdateLocationFsxOpenZfsResponse" + }, + "errors": [ + { + "target": "com.amazonaws.datasync#InternalException" + }, + { + "target": "com.amazonaws.datasync#InvalidRequestException" + } + ], + "traits": { + "smithy.api#documentation": "

Modifies the following configuration parameters of the Amazon FSx for OpenZFS transfer location that you're using with DataSync.

\n

For more information, see Configuring DataSync transfers with FSx for OpenZFS.

\n \n

Request parameters related to SMB aren't supported with the\n UpdateLocationFsxOpenZfs operation.

\n
" + } + }, + "com.amazonaws.datasync#UpdateLocationFsxOpenZfsRequest": { + "type": "structure", + "members": { + "LocationArn": { + "target": "com.amazonaws.datasync#LocationArn", + "traits": { + "smithy.api#documentation": "

Specifies the Amazon Resource Name (ARN) of the FSx for OpenZFS transfer location that you're updating.

", + "smithy.api#required": {} + } + }, + "Protocol": { + "target": "com.amazonaws.datasync#FsxProtocol" + }, + "Subdirectory": { + "target": "com.amazonaws.datasync#SmbSubdirectory", + "traits": { + "smithy.api#documentation": "

Specifies a subdirectory in the location's path that must begin with /fsx. DataSync uses this subdirectory to read or write data (depending on whether the file\n system is a source or destination location).

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.datasync#UpdateLocationFsxOpenZfsResponse": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.datasync#UpdateLocationFsxWindows": { + "type": "operation", + "input": { + "target": "com.amazonaws.datasync#UpdateLocationFsxWindowsRequest" + }, + "output": { + "target": "com.amazonaws.datasync#UpdateLocationFsxWindowsResponse" + }, + "errors": [ + { + "target": "com.amazonaws.datasync#InternalException" + }, + { + "target": "com.amazonaws.datasync#InvalidRequestException" + } + ], + "traits": { + "smithy.api#documentation": "

Modifies the following configuration parameters of the Amazon FSx for Windows File Server transfer location that you're using with DataSync.

\n

For more information, see Configuring DataSync transfers with FSx for Windows File Server.

" + } + }, + "com.amazonaws.datasync#UpdateLocationFsxWindowsRequest": { + "type": "structure", + "members": { + "LocationArn": { + "target": "com.amazonaws.datasync#LocationArn", + "traits": { + "smithy.api#documentation": "

Specifies the ARN of the FSx for Windows File Server transfer location that you're updating.

", + "smithy.api#required": {} + } + }, + "Subdirectory": { + "target": "com.amazonaws.datasync#FsxWindowsSubdirectory", + "traits": { + "smithy.api#documentation": "

Specifies a mount path for your file system using forward slashes. DataSync uses this subdirectory to read or write data (depending on whether the file\n system is a source or destination location).

" + } + }, + "Domain": { + "target": "com.amazonaws.datasync#FsxUpdateSmbDomain", + "traits": { + "smithy.api#documentation": "

Specifies the name of the Windows domain that your FSx for Windows File Server file system belongs to.

\n

If you have multiple Active Directory domains in your environment, configuring this parameter makes sure that DataSync connects to the right file system.

" + } + }, + "User": { + "target": "com.amazonaws.datasync#SmbUser", + "traits": { + "smithy.api#documentation": "

Specifies the user with the permissions to mount and access the files, folders, and file\n metadata in your FSx for Windows File Server file system.

\n

For information about choosing a user with the right level of access for your transfer, see required permissions for FSx for Windows File Server locations.

" + } + }, + "Password": { + "target": "com.amazonaws.datasync#SmbPassword", + "traits": { + "smithy.api#documentation": "

Specifies the password of the user with the permissions to mount and access the files,\n folders, and file metadata in your FSx for Windows File Server file system.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.datasync#UpdateLocationFsxWindowsResponse": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.datasync#UpdateLocationHdfs": { "type": "operation", "input": { @@ -9252,7 +9611,7 @@ } ], "traits": { - "smithy.api#documentation": "

Updates some parameters of a previously created location for a Hadoop Distributed File\n System cluster.

" + "smithy.api#documentation": "

Modifies the following configuration parameters of the Hadoop Distributed File\n System (HDFS) transfer location that you're using with DataSync.

\n

For more information, see Configuring DataSync transfers with an HDFS cluster.

" } }, "com.amazonaws.datasync#UpdateLocationHdfsRequest": { @@ -9366,7 +9725,7 @@ } ], "traits": { - "smithy.api#documentation": "

Modifies some configurations of the Network File System (NFS) transfer location that\n you're using with DataSync.

\n

For more information, see Configuring transfers to or from an\n NFS file server.

" + "smithy.api#documentation": "

Modifies the following configuration parameters of the Network File System (NFS) transfer location that you're using with DataSync.

\n

For more information, see Configuring transfers with an\n NFS file server.

" } }, "com.amazonaws.datasync#UpdateLocationNfsRequest": { @@ -9420,7 +9779,7 @@ } ], "traits": { - "smithy.api#documentation": "

Updates some parameters of an existing DataSync location for an object\n storage system.

" + "smithy.api#documentation": "

Modifies the following configuration parameters of the object storage transfer location that you're using with DataSync.

\n

For more information, see Configuring DataSync transfers with an object storage system.

" } }, "com.amazonaws.datasync#UpdateLocationObjectStorageRequest": { @@ -9487,6 +9846,63 @@ "smithy.api#output": {} } }, + "com.amazonaws.datasync#UpdateLocationS3": { + "type": "operation", + "input": { + "target": "com.amazonaws.datasync#UpdateLocationS3Request" + }, + "output": { + "target": "com.amazonaws.datasync#UpdateLocationS3Response" + }, + "errors": [ + { + "target": "com.amazonaws.datasync#InternalException" + }, + { + "target": "com.amazonaws.datasync#InvalidRequestException" + } + ], + "traits": { + "smithy.api#documentation": "

Modifies the following configuration parameters of the Amazon S3 transfer location that you're using with DataSync.

\n \n

Before you begin, make sure that you read the following topics:

\n \n
" + } + }, + "com.amazonaws.datasync#UpdateLocationS3Request": { + "type": "structure", + "members": { + "LocationArn": { + "target": "com.amazonaws.datasync#LocationArn", + "traits": { + "smithy.api#documentation": "

Specifies the Amazon Resource Name (ARN) of the Amazon S3 transfer location that you're updating.

", + "smithy.api#required": {} + } + }, + "Subdirectory": { + "target": "com.amazonaws.datasync#S3Subdirectory", + "traits": { + "smithy.api#documentation": "

Specifies a prefix in the S3 bucket that DataSync reads from or writes to\n (depending on whether the bucket is a source or destination location).

\n \n

DataSync can't transfer objects with a prefix that begins with a slash\n (/) or includes //, /./, or\n /../ patterns. For example:

\n
    \n
  • \n

    \n /photos\n

    \n
  • \n
  • \n

    \n photos//2006/January\n

    \n
  • \n
  • \n

    \n photos/./2006/February\n

    \n
  • \n
  • \n

    \n photos/../2006/March\n

    \n
  • \n
\n
" + } + }, + "S3StorageClass": { + "target": "com.amazonaws.datasync#S3StorageClass", + "traits": { + "smithy.api#documentation": "

Specifies the storage class that you want your objects to use when Amazon S3 is a\n transfer destination.

\n

For buckets in Amazon Web Services Regions, the storage class defaults to\n STANDARD. For buckets on Outposts, the storage class defaults to\n OUTPOSTS.

\n

For more information, see Storage class\n considerations with Amazon S3 transfers.

" + } + }, + "S3Config": { + "target": "com.amazonaws.datasync#S3Config" + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.datasync#UpdateLocationS3Response": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.datasync#UpdateLocationSmb": { "type": "operation", "input": { @@ -9504,7 +9920,7 @@ } ], "traits": { - "smithy.api#documentation": "

Updates some of the parameters of a Server Message Block\n (SMB) file server location that you can use for DataSync transfers.

" + "smithy.api#documentation": "

Modifies the following configuration parameters of the Server Message Block\n (SMB) transfer location that you're using with DataSync.

\n

For more information, see Configuring DataSync transfers with an SMB file server.

" } }, "com.amazonaws.datasync#UpdateLocationSmbRequest": { @@ -9773,6 +10189,26 @@ "smithy.api#output": {} } }, + "com.amazonaws.datasync#UpdatedEfsAccessPointArn": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 128 + }, + "smithy.api#pattern": "^(^arn:(aws|aws-cn|aws-us-gov|aws-iso|aws-iso-b):elasticfilesystem:[a-z\\-0-9]+:[0-9]{12}:access-point/fsap-[0-9a-f]{8,40}$)|(^$)$" + } + }, + "com.amazonaws.datasync#UpdatedEfsIamRoleArn": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 2048 + }, + "smithy.api#pattern": "^(^arn:(aws|aws-cn|aws-us-gov|aws-iso|aws-iso-b):iam::[0-9]{12}:role/.*$)|(^$)$" + } + }, "com.amazonaws.datasync#VerifyMode": { "type": "enum", "members": { diff --git a/models/docdb.json b/models/docdb.json index bad56a44ce..cd716554b9 100644 --- a/models/docdb.json +++ b/models/docdb.json @@ -1636,6 +1636,32 @@ "smithy.api#documentation": "

The configuration setting for the log types to be enabled for export to Amazon\n CloudWatch Logs for a specific instance or cluster.

\n

The EnableLogTypes and DisableLogTypes arrays determine\n which logs are exported (or not exported) to CloudWatch Logs. The values within these\n arrays depend on the engine that is being used.

" } }, + "com.amazonaws.docdb#ClusterMasterUserSecret": { + "type": "structure", + "members": { + "SecretArn": { + "target": "com.amazonaws.docdb#String", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the secret.

" + } + }, + "SecretStatus": { + "target": "com.amazonaws.docdb#String", + "traits": { + "smithy.api#documentation": "

The status of the secret.

\n

The possible status values include the following:

\n
    \n
  • \n

    creating - The secret is being created.

    \n
  • \n
  • \n

    active - The secret is available for normal use and rotation.

    \n
  • \n
  • \n

    rotating - The secret is being rotated.

    \n
  • \n
  • \n

    impaired - The secret can be used to access database credentials, but it can't be rotated. \n A secret might have this status if, for example, permissions are changed so that Amazon DocumentDB can no longer access either the secret or the KMS key for the secret.

    \n

    When a secret has this status, you can correct the condition that caused the status. \n Alternatively, modify the instance to turn off automatic management of database credentials, and then modify the instance again to turn on automatic management of database credentials.

    \n
  • \n
" + } + }, + "KmsKeyId": { + "target": "com.amazonaws.docdb#String", + "traits": { + "smithy.api#documentation": "

The Amazon Web Services KMS key identifier that is used to encrypt the secret.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Contains the secret managed by Amazon DocumentDB in Amazon Web Services Secrets Manager for the master user password.

" + } + }, "com.amazonaws.docdb#CopyDBClusterParameterGroup": { "type": "operation", "input": { @@ -1998,6 +2024,18 @@ "traits": { "smithy.api#documentation": "

The storage type to associate with the DB cluster.

\n

For information on storage types for Amazon DocumentDB clusters, see \n Cluster storage configurations in the Amazon DocumentDB Developer Guide.

\n

Valid values for storage type - standard | iopt1\n

\n

Default value is standard \n

\n \n

When you create a DocumentDB DB cluster with the storage type set to iopt1, the storage type is returned\n in the response. The storage type isn't returned when you set it to standard.

\n
" } + }, + "ManageMasterUserPassword": { + "target": "com.amazonaws.docdb#BooleanOptional", + "traits": { + "smithy.api#documentation": "

Specifies whether to manage the master user password with Amazon Web Services Secrets Manager.

\n

Constraint: You can't manage the master user password with Amazon Web Services Secrets Manager if MasterUserPassword is specified.

" + } + }, + "MasterUserSecretKmsKeyId": { + "target": "com.amazonaws.docdb#String", + "traits": { + "smithy.api#documentation": "

The Amazon Web Services KMS key identifier to encrypt a secret that is automatically generated and managed in Amazon Web Services Secrets Manager.\n This setting is valid only if the master user password is managed by Amazon DocumentDB in Amazon Web Services Secrets Manager for the DB cluster.

\n

The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key. \n To use a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN.

\n

If you don't specify MasterUserSecretKmsKeyId, then the aws/secretsmanager KMS key is used to encrypt the secret. \n If the secret is in a different Amazon Web Services account, then you can't use the aws/secretsmanager KMS key to encrypt the secret, and you must use a customer managed KMS key.

\n

There is a default KMS key for your Amazon Web Services account. \n Your Amazon Web Services account has a different default KMS key for each Amazon Web Services Region.

" + } } }, "traits": { @@ -2792,6 +2830,12 @@ "traits": { "smithy.api#documentation": "

Storage type associated with your cluster

\n

Storage type associated with your cluster

\n

For information on storage types for Amazon DocumentDB clusters, see \n Cluster storage configurations in the Amazon DocumentDB Developer Guide.

\n

Valid values for storage type - standard | iopt1\n

\n

Default value is standard \n

" } + }, + "MasterUserSecret": { + "target": "com.amazonaws.docdb#ClusterMasterUserSecret", + "traits": { + "smithy.api#documentation": "

The secret managed by Amazon DocumentDB in Amazon Web Services Secrets Manager for the master user password.

" + } } }, "traits": { @@ -4791,6 +4835,21 @@ "smithy.api#suppress": [ "WaitableTraitInvalidErrorType" ], + "smithy.test#smokeTests": [ + { + "id": "DescribeDBInstancesFailure", + "params": { + "DBInstanceIdentifier": "fake-id" + }, + "vendorParams": { + "region": "us-west-2" + }, + "vendorParamsShape": "aws.test#AwsVendorParams", + "expect": { + "failure": {} + } + } + ], "smithy.waiters#waitable": { "DBInstanceAvailable": { "acceptors": [ @@ -6597,6 +6656,24 @@ "traits": { "smithy.api#documentation": "

The storage type to associate with the DB cluster.

\n

For information on storage types for Amazon DocumentDB clusters, see \n Cluster storage configurations in the Amazon DocumentDB Developer Guide.

\n

Valid values for storage type - standard | iopt1\n

\n

Default value is standard \n

" } + }, + "ManageMasterUserPassword": { + "target": "com.amazonaws.docdb#BooleanOptional", + "traits": { + "smithy.api#documentation": "

Specifies whether to manage the master user password with Amazon Web Services Secrets Manager.\n If the cluster doesn't manage the master user password with Amazon Web Services Secrets Manager, you can turn on this management. \n In this case, you can't specify MasterUserPassword.\n If the cluster already manages the master user password with Amazon Web Services Secrets Manager, and you specify that the master user password is not managed with Amazon Web Services Secrets Manager, then you must specify MasterUserPassword. \n In this case, Amazon DocumentDB deletes the secret and uses the new password for the master user specified by MasterUserPassword.

" + } + }, + "MasterUserSecretKmsKeyId": { + "target": "com.amazonaws.docdb#String", + "traits": { + "smithy.api#documentation": "

The Amazon Web Services KMS key identifier to encrypt a secret that is automatically generated and managed in Amazon Web Services Secrets Manager.

\n

This setting is valid only if both of the following conditions are met:

\n
    \n
  • \n

    The cluster doesn't manage the master user password in Amazon Web Services Secrets Manager. \n If the cluster already manages the master user password in Amazon Web Services Secrets Manager, you can't change the KMS key that is used to encrypt the secret.

    \n
  • \n
  • \n

    You are enabling ManageMasterUserPassword to manage the master user password in Amazon Web Services Secrets Manager. \n If you are turning on ManageMasterUserPassword and don't specify MasterUserSecretKmsKeyId, then the aws/secretsmanager KMS key is used to encrypt the secret. \n If the secret is in a different Amazon Web Services account, then you can't use the aws/secretsmanager KMS key to encrypt the secret, and you must use a customer managed KMS key.

    \n
  • \n
\n

The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key. \n To use a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN.

\n

There is a default KMS key for your Amazon Web Services account. \n Your Amazon Web Services account has a different default KMS key for each Amazon Web Services Region.

" + } + }, + "RotateMasterUserPassword": { + "target": "com.amazonaws.docdb#BooleanOptional", + "traits": { + "smithy.api#documentation": "

Specifies whether to rotate the secret managed by Amazon Web Services Secrets Manager for the master user password.

\n

This setting is valid only if the master user password is managed by Amazon DocumentDB in Amazon Web Services Secrets Manager for the cluster. \n The secret value contains the updated password.

\n

Constraint: You must apply the change immediately when rotating the master user password.

" + } } }, "traits": { diff --git a/models/ecr-public.json b/models/ecr-public.json index c0d80f9bc1..1144dedbd1 100644 --- a/models/ecr-public.json +++ b/models/ecr-public.json @@ -2901,7 +2901,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -2944,7 +2943,8 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -2957,7 +2957,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -2971,7 +2970,6 @@ "assign": "PartitionResult" } ], - "type": "tree", "rules": [ { "conditions": [ @@ -2994,7 +2992,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -3029,7 +3026,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -3040,14 +3036,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS and DualStack are enabled, but this partition does not support one or both", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -3061,14 +3059,12 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { "fn": "getAttr", "argv": [ @@ -3077,11 +3073,11 @@ }, "supportsFIPS" ] - } + }, + true ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -3092,14 +3088,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS is enabled but this partition does not support FIPS", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -3113,7 +3111,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -3133,7 +3130,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -3144,14 +3140,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "DualStack is enabled but this partition does not support DualStack", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [], @@ -3162,9 +3160,11 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" }, { "conditions": [], diff --git a/models/ecr.json b/models/ecr.json index fe5e547ed2..d0ae29a1b6 100644 --- a/models/ecr.json +++ b/models/ecr.json @@ -3499,7 +3499,7 @@ "encryptionType": { "target": "com.amazonaws.ecr#EncryptionType", "traits": { - "smithy.api#documentation": "

The encryption type to use.

\n

If you use the KMS encryption type, the contents of the repository will\n be encrypted using server-side encryption with Key Management Service key stored in KMS. When you\n use KMS to encrypt your data, you can either use the default Amazon Web Services managed KMS key\n for Amazon ECR, or specify your own KMS key, which you already created.

\n

If you use the KMS_DSSE encryption type, the contents of the repository\n will be encrypted with two layers of encryption using server-side encryption with the\n KMS Management Service key stored in KMS. Similar to the KMS encryption type, you\n can either use the default Amazon Web Services managed KMS key for Amazon ECR, or specify your own KMS\n key, which you've already created.

\n

If you use the AES256 encryption type, Amazon ECR uses server-side encryption\n with Amazon S3-managed encryption keys which encrypts the images in the repository using an\n AES256 encryption algorithm.

\n

For more information, see Amazon ECR encryption at\n rest in the Amazon Elastic Container Registry User Guide.

", + "smithy.api#documentation": "

The encryption type to use.

\n

If you use the KMS encryption type, the contents of the repository will\n be encrypted using server-side encryption with Key Management Service key stored in KMS. When you\n use KMS to encrypt your data, you can either use the default Amazon Web Services managed KMS key\n for Amazon ECR, or specify your own KMS key, which you already created.

\n

If you use the KMS_DSSE encryption type, the contents of the repository\n will be encrypted with two layers of encryption using server-side encryption with the\n KMS Management Service key stored in KMS. Similar to the KMS encryption\n type, you can either use the default Amazon Web Services managed KMS key for Amazon ECR, or specify your\n own KMS key, which you've already created.

\n

If you use the AES256 encryption type, Amazon ECR uses server-side encryption\n with Amazon S3-managed encryption keys which encrypts the images in the repository using an\n AES256 encryption algorithm.

\n

For more information, see Amazon ECR encryption at\n rest in the Amazon Elastic Container Registry User Guide.

", "smithy.api#required": {} } }, @@ -3784,7 +3784,7 @@ } ], "traits": { - "smithy.api#documentation": "

Retrieves the basic scan type version name.

" + "smithy.api#documentation": "

Retrieves the account setting value for the specified setting name.

" } }, "com.amazonaws.ecr#GetAccountSettingRequest": { @@ -3793,7 +3793,7 @@ "name": { "target": "com.amazonaws.ecr#AccountSettingName", "traits": { - "smithy.api#documentation": "

Basic scan type version name.

", + "smithy.api#documentation": "

The name of the account setting, such as BASIC_SCAN_TYPE_VERSION or\n REGISTRY_POLICY_SCOPE.

", "smithy.api#required": {} } } @@ -3808,13 +3808,13 @@ "name": { "target": "com.amazonaws.ecr#AccountSettingName", "traits": { - "smithy.api#documentation": "

Retrieves the basic scan type version name.

" + "smithy.api#documentation": "

Retrieves the name of the account setting.

" } }, "value": { "target": "com.amazonaws.ecr#AccountSettingName", "traits": { - "smithy.api#documentation": "

Retrieves the value that specifies what basic scan type is being used:\n AWS_NATIVE or CLAIR.

" + "smithy.api#documentation": "

The setting value for the setting name. The following are valid values for the basic scan\n type being used: AWS_NATIVE or CLAIR. The following are valid\n values for the registry policy scope being used: V1 or\n V2.

" } } }, @@ -5995,7 +5995,7 @@ } ], "traits": { - "smithy.api#documentation": "

Allows you to change the basic scan type version by setting the name\n parameter to either CLAIR to AWS_NATIVE.

" + "smithy.api#documentation": "

Allows you to change the basic scan type version or registry policy scope.

" } }, "com.amazonaws.ecr#PutAccountSettingRequest": { @@ -6004,14 +6004,14 @@ "name": { "target": "com.amazonaws.ecr#AccountSettingName", "traits": { - "smithy.api#documentation": "

Basic scan type version name.

", + "smithy.api#documentation": "

The name of the account setting, such as BASIC_SCAN_TYPE_VERSION or\n REGISTRY_POLICY_SCOPE.

", "smithy.api#required": {} } }, "value": { "target": "com.amazonaws.ecr#AccountSettingValue", "traits": { - "smithy.api#documentation": "

Setting value that determines what basic scan type is being used:\n AWS_NATIVE or CLAIR.

", + "smithy.api#documentation": "

Setting value that is specified. The following are valid values for the basic scan\n type being used: AWS_NATIVE or CLAIR. The following are valid\n values for the registry policy scope being used: V1 or\n V2.

", "smithy.api#required": {} } } @@ -6026,13 +6026,13 @@ "name": { "target": "com.amazonaws.ecr#AccountSettingName", "traits": { - "smithy.api#documentation": "

Retrieves the the basic scan type version name.

" + "smithy.api#documentation": "

Retrieves the name of the account setting.

" } }, "value": { "target": "com.amazonaws.ecr#AccountSettingValue", "traits": { - "smithy.api#documentation": "

Retrieves the basic scan type value, either AWS_NATIVE or\n -.

" + "smithy.api#documentation": "

Retrieves the value of the specified account setting.

" } } }, diff --git a/models/eks.json b/models/eks.json index d4cc65bd5e..edca7c041c 100644 --- a/models/eks.json +++ b/models/eks.json @@ -203,6 +203,9 @@ { "target": "com.amazonaws.eks#DescribeCluster" }, + { + "target": "com.amazonaws.eks#DescribeClusterVersions" + }, { "target": "com.amazonaws.eks#DescribeEksAnywhereSubscription" }, @@ -1616,6 +1619,32 @@ "smithy.api#documentation": "

An Amazon EKS add-on. For more information, see Amazon EKS add-ons in\n the Amazon EKS User Guide.

" } }, + "com.amazonaws.eks#AddonCompatibilityDetail": { + "type": "structure", + "members": { + "name": { + "target": "com.amazonaws.eks#String", + "traits": { + "smithy.api#documentation": "

The name of the Amazon EKS add-on.

" + } + }, + "compatibleVersions": { + "target": "com.amazonaws.eks#StringList", + "traits": { + "smithy.api#documentation": "

A list of compatible add-on versions.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Contains compatibility information for an Amazon EKS add-on.

" + } + }, + "com.amazonaws.eks#AddonCompatibilityDetails": { + "type": "list", + "member": { + "target": "com.amazonaws.eks#AddonCompatibilityDetail" + } + }, "com.amazonaws.eks#AddonHealth": { "type": "structure", "members": { @@ -2837,6 +2866,98 @@ } } }, + "com.amazonaws.eks#ClusterVersionInformation": { + "type": "structure", + "members": { + "clusterVersion": { + "target": "com.amazonaws.eks#String", + "traits": { + "smithy.api#documentation": "

The Kubernetes version for the cluster.

" + } + }, + "clusterType": { + "target": "com.amazonaws.eks#String", + "traits": { + "smithy.api#documentation": "

The type of cluster this version is for.

" + } + }, + "defaultPlatformVersion": { + "target": "com.amazonaws.eks#String", + "traits": { + "smithy.api#documentation": "

Default platform version for this Kubernetes version.

" + } + }, + "defaultVersion": { + "target": "com.amazonaws.eks#Boolean", + "traits": { + "smithy.api#default": false, + "smithy.api#documentation": "

Indicates if this is a default version.

" + } + }, + "releaseDate": { + "target": "com.amazonaws.eks#Timestamp", + "traits": { + "smithy.api#documentation": "

The release date of this cluster version.

" + } + }, + "endOfStandardSupportDate": { + "target": "com.amazonaws.eks#Timestamp", + "traits": { + "smithy.api#documentation": "

Date when standard support ends for this version.

" + } + }, + "endOfExtendedSupportDate": { + "target": "com.amazonaws.eks#Timestamp", + "traits": { + "smithy.api#documentation": "

Date when extended support ends for this version.

" + } + }, + "status": { + "target": "com.amazonaws.eks#ClusterVersionStatus", + "traits": { + "smithy.api#documentation": "

Current status of this cluster version.

" + } + }, + "kubernetesPatchVersion": { + "target": "com.amazonaws.eks#String", + "traits": { + "smithy.api#documentation": "

The patch version of Kubernetes for this cluster version.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Contains details about a specific EKS cluster version.

" + } + }, + "com.amazonaws.eks#ClusterVersionList": { + "type": "list", + "member": { + "target": "com.amazonaws.eks#ClusterVersionInformation" + } + }, + "com.amazonaws.eks#ClusterVersionStatus": { + "type": "enum", + "members": { + "unsupported": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "unsupported" + } + }, + "standard_support": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "standard-support" + } + }, + "extended_support": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "extended-support" + } + } + } + }, "com.amazonaws.eks#Compatibilities": { "type": "list", "member": { @@ -5148,6 +5269,126 @@ "smithy.api#output": {} } }, + "com.amazonaws.eks#DescribeClusterVersionMaxResults": { + "type": "integer", + "traits": { + "smithy.api#range": { + "min": 1, + "max": 100 + } + } + }, + "com.amazonaws.eks#DescribeClusterVersions": { + "type": "operation", + "input": { + "target": "com.amazonaws.eks#DescribeClusterVersionsRequest" + }, + "output": { + "target": "com.amazonaws.eks#DescribeClusterVersionsResponse" + }, + "errors": [ + { + "target": "com.amazonaws.eks#InvalidParameterException" + }, + { + "target": "com.amazonaws.eks#InvalidRequestException" + }, + { + "target": "com.amazonaws.eks#ServerException" + } + ], + "traits": { + "smithy.api#documentation": "

Lists available Kubernetes versions for Amazon EKS clusters.

", + "smithy.api#http": { + "method": "GET", + "uri": "/cluster-versions", + "code": 200 + }, + "smithy.api#paginated": { + "inputToken": "nextToken", + "outputToken": "nextToken", + "items": "clusterVersions", + "pageSize": "maxResults" + } + } + }, + "com.amazonaws.eks#DescribeClusterVersionsRequest": { + "type": "structure", + "members": { + "clusterType": { + "target": "com.amazonaws.eks#String", + "traits": { + "smithy.api#documentation": "

The type of cluster to filter versions by.

", + "smithy.api#httpQuery": "clusterType" + } + }, + "maxResults": { + "target": "com.amazonaws.eks#DescribeClusterVersionMaxResults", + "traits": { + "smithy.api#documentation": "

Maximum number of results to return.

", + "smithy.api#httpQuery": "maxResults" + } + }, + "nextToken": { + "target": "com.amazonaws.eks#String", + "traits": { + "smithy.api#documentation": "

Pagination token for the next set of results.

", + "smithy.api#httpQuery": "nextToken" + } + }, + "defaultOnly": { + "target": "com.amazonaws.eks#BoxedBoolean", + "traits": { + "smithy.api#documentation": "

Filter to show only default versions.

", + "smithy.api#httpQuery": "defaultOnly" + } + }, + "includeAll": { + "target": "com.amazonaws.eks#BoxedBoolean", + "traits": { + "smithy.api#documentation": "

Include all available versions in the response.

", + "smithy.api#httpQuery": "includeAll" + } + }, + "clusterVersions": { + "target": "com.amazonaws.eks#StringList", + "traits": { + "smithy.api#documentation": "

List of specific cluster versions to describe.

", + "smithy.api#httpQuery": "clusterVersions" + } + }, + "status": { + "target": "com.amazonaws.eks#ClusterVersionStatus", + "traits": { + "smithy.api#documentation": "

Filter versions by their current status.

", + "smithy.api#httpQuery": "status" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.eks#DescribeClusterVersionsResponse": { + "type": "structure", + "members": { + "nextToken": { + "target": "com.amazonaws.eks#String", + "traits": { + "smithy.api#documentation": "

Pagination token for the next set of results.

" + } + }, + "clusterVersions": { + "target": "com.amazonaws.eks#ClusterVersionList", + "traits": { + "smithy.api#documentation": "

List of cluster version information objects.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.eks#DescribeEksAnywhereSubscription": { "type": "operation", "input": { @@ -6629,6 +6870,12 @@ "traits": { "smithy.api#documentation": "

The summary information about deprecated resource usage for an insight check in the\n UPGRADE_READINESS category.

" } + }, + "addonCompatibilityDetails": { + "target": "com.amazonaws.eks#AddonCompatibilityDetails", + "traits": { + "smithy.api#documentation": "

A list of AddonCompatibilityDetail objects for Amazon EKS add-ons.

" + } } }, "traits": { diff --git a/models/endpoints/endpoints.json b/models/endpoints/endpoints.json index 1c7d34cccf..6e5a6c7d58 100644 --- a/models/endpoints/endpoints.json +++ b/models/endpoints/endpoints.json @@ -675,85 +675,283 @@ "credentialScope" : { "region" : "af-south-1" }, - "hostname" : "api.ecr.af-south-1.amazonaws.com" + "hostname" : "api.ecr.af-south-1.amazonaws.com", + "variants" : [ { + "tags" : [ "dualstack" ] + } ] }, "ap-east-1" : { "credentialScope" : { "region" : "ap-east-1" }, - "hostname" : "api.ecr.ap-east-1.amazonaws.com" + "hostname" : "api.ecr.ap-east-1.amazonaws.com", + "variants" : [ { + "tags" : [ "dualstack" ] + } ] }, "ap-northeast-1" : { "credentialScope" : { "region" : "ap-northeast-1" }, - "hostname" : "api.ecr.ap-northeast-1.amazonaws.com" + "hostname" : "api.ecr.ap-northeast-1.amazonaws.com", + "variants" : [ { + "tags" : [ "dualstack" ] + } ] }, "ap-northeast-2" : { "credentialScope" : { "region" : "ap-northeast-2" }, - "hostname" : "api.ecr.ap-northeast-2.amazonaws.com" + "hostname" : "api.ecr.ap-northeast-2.amazonaws.com", + "variants" : [ { + "tags" : [ "dualstack" ] + } ] }, "ap-northeast-3" : { "credentialScope" : { "region" : "ap-northeast-3" }, - "hostname" : "api.ecr.ap-northeast-3.amazonaws.com" + "hostname" : "api.ecr.ap-northeast-3.amazonaws.com", + "variants" : [ { + "tags" : [ "dualstack" ] + } ] }, "ap-south-1" : { "credentialScope" : { "region" : "ap-south-1" }, - "hostname" : "api.ecr.ap-south-1.amazonaws.com" + "hostname" : "api.ecr.ap-south-1.amazonaws.com", + "variants" : [ { + "tags" : [ "dualstack" ] + } ] }, "ap-south-2" : { "credentialScope" : { "region" : "ap-south-2" }, - "hostname" : "api.ecr.ap-south-2.amazonaws.com" + "hostname" : "api.ecr.ap-south-2.amazonaws.com", + "variants" : [ { + "tags" : [ "dualstack" ] + } ] }, "ap-southeast-1" : { "credentialScope" : { "region" : "ap-southeast-1" }, - "hostname" : "api.ecr.ap-southeast-1.amazonaws.com" + "hostname" : "api.ecr.ap-southeast-1.amazonaws.com", + "variants" : [ { + "tags" : [ "dualstack" ] + } ] }, "ap-southeast-2" : { "credentialScope" : { "region" : "ap-southeast-2" }, - "hostname" : "api.ecr.ap-southeast-2.amazonaws.com" + "hostname" : "api.ecr.ap-southeast-2.amazonaws.com", + "variants" : [ { + "tags" : [ "dualstack" ] + } ] }, "ap-southeast-3" : { "credentialScope" : { "region" : "ap-southeast-3" }, - "hostname" : "api.ecr.ap-southeast-3.amazonaws.com" + "hostname" : "api.ecr.ap-southeast-3.amazonaws.com", + "variants" : [ { + "tags" : [ "dualstack" ] + } ] }, "ap-southeast-4" : { "credentialScope" : { "region" : "ap-southeast-4" }, - "hostname" : "api.ecr.ap-southeast-4.amazonaws.com" + "hostname" : "api.ecr.ap-southeast-4.amazonaws.com", + "variants" : [ { + "tags" : [ "dualstack" ] + } ] }, "ap-southeast-5" : { "credentialScope" : { "region" : "ap-southeast-5" }, - "hostname" : "api.ecr.ap-southeast-5.amazonaws.com" + "hostname" : "api.ecr.ap-southeast-5.amazonaws.com", + "variants" : [ { + "tags" : [ "dualstack" ] + } ] }, "ca-central-1" : { "credentialScope" : { "region" : "ca-central-1" }, - "hostname" : "api.ecr.ca-central-1.amazonaws.com" + "hostname" : "api.ecr.ca-central-1.amazonaws.com", + "variants" : [ { + "tags" : [ "dualstack" ] + } ] }, "ca-west-1" : { "credentialScope" : { "region" : "ca-west-1" }, - "hostname" : "api.ecr.ca-west-1.amazonaws.com" + "hostname" : "api.ecr.ca-west-1.amazonaws.com", + "variants" : [ { + "tags" : [ "dualstack" ] + } ] + }, + "dkr-af-south-1" : { + "deprecated" : true, + "variants" : [ { + "tags" : [ "dualstack" ] + } ] + }, + "dkr-ap-east-1" : { + "deprecated" : true, + "variants" : [ { + "tags" : [ "dualstack" ] + } ] + }, + "dkr-ap-northeast-1" : { + "deprecated" : true, + "variants" : [ { + "tags" : [ "dualstack" ] + } ] + }, + "dkr-ap-northeast-2" : { + "deprecated" : true, + "variants" : [ { + "tags" : [ "dualstack" ] + } ] + }, + "dkr-ap-northeast-3" : { + "deprecated" : true, + "variants" : [ { + "tags" : [ "dualstack" ] + } ] + }, + "dkr-ap-south-1" : { + "deprecated" : true, + "variants" : [ { + "tags" : [ "dualstack" ] + } ] + }, + "dkr-ap-south-2" : { + "deprecated" : true, + "variants" : [ { + "tags" : [ "dualstack" ] + } ] + }, + "dkr-ap-southeast-1" : { + "deprecated" : true, + "variants" : [ { + "tags" : [ "dualstack" ] + } ] + }, + "dkr-ap-southeast-2" : { + "deprecated" : true, + "variants" : [ { + "tags" : [ "dualstack" ] + } ] + }, + "dkr-ap-southeast-3" : { + "deprecated" : true, + "variants" : [ { + "tags" : [ "dualstack" ] + } ] + }, + "dkr-ap-southeast-4" : { + "deprecated" : true, + "variants" : [ { + "tags" : [ "dualstack" ] + } ] + }, + "dkr-ap-southeast-5" : { + "deprecated" : true, + "variants" : [ { + "tags" : [ "dualstack" ] + } ] + }, + "dkr-ca-central-1" : { + "deprecated" : true, + "variants" : [ { + "tags" : [ "dualstack" ] + } ] + }, + "dkr-ca-west-1" : { + "deprecated" : true, + "variants" : [ { + "tags" : [ "dualstack" ] + } ] + }, + "dkr-eu-central-1" : { + "deprecated" : true, + "variants" : [ { + "tags" : [ "dualstack" ] + } ] + }, + "dkr-eu-central-2" : { + "deprecated" : true, + "variants" : [ { + "tags" : [ "dualstack" ] + } ] + }, + "dkr-eu-north-1" : { + "deprecated" : true, + "variants" : [ { + "tags" : [ "dualstack" ] + } ] + }, + "dkr-eu-south-1" : { + "deprecated" : true, + "variants" : [ { + "tags" : [ "dualstack" ] + } ] + }, + "dkr-eu-south-2" : { + "deprecated" : true, + "variants" : [ { + "tags" : [ "dualstack" ] + } ] + }, + "dkr-eu-west-1" : { + "deprecated" : true, + "variants" : [ { + "tags" : [ "dualstack" ] + } ] + }, + "dkr-eu-west-2" : { + "deprecated" : true, + "variants" : [ { + "tags" : [ "dualstack" ] + } ] + }, + "dkr-eu-west-3" : { + "deprecated" : true, + "variants" : [ { + "tags" : [ "dualstack" ] + } ] + }, + "dkr-il-central-1" : { + "deprecated" : true, + "variants" : [ { + "tags" : [ "dualstack" ] + } ] + }, + "dkr-me-central-1" : { + "deprecated" : true, + "variants" : [ { + "tags" : [ "dualstack" ] + } ] + }, + "dkr-me-south-1" : { + "deprecated" : true, + "variants" : [ { + "tags" : [ "dualstack" ] + } ] + }, + "dkr-sa-east-1" : { + "deprecated" : true, + "variants" : [ { + "tags" : [ "dualstack" ] + } ] }, "dkr-us-east-1" : { "credentialScope" : { @@ -761,6 +959,10 @@ }, "deprecated" : true, "variants" : [ { + "tags" : [ "dualstack" ] + }, { + "tags" : [ "dualstack", "fips" ] + }, { "hostname" : "ecr-fips.us-east-1.amazonaws.com", "tags" : [ "fips" ] } ] @@ -771,6 +973,10 @@ }, "deprecated" : true, "variants" : [ { + "tags" : [ "dualstack" ] + }, { + "tags" : [ "dualstack", "fips" ] + }, { "hostname" : "ecr-fips.us-east-2.amazonaws.com", "tags" : [ "fips" ] } ] @@ -781,6 +987,10 @@ }, "deprecated" : true, "variants" : [ { + "tags" : [ "dualstack" ] + }, { + "tags" : [ "dualstack", "fips" ] + }, { "hostname" : "ecr-fips.us-west-1.amazonaws.com", "tags" : [ "fips" ] } ] @@ -791,6 +1001,10 @@ }, "deprecated" : true, "variants" : [ { + "tags" : [ "dualstack" ] + }, { + "tags" : [ "dualstack", "fips" ] + }, { "hostname" : "ecr-fips.us-west-2.amazonaws.com", "tags" : [ "fips" ] } ] @@ -799,49 +1013,73 @@ "credentialScope" : { "region" : "eu-central-1" }, - "hostname" : "api.ecr.eu-central-1.amazonaws.com" + "hostname" : "api.ecr.eu-central-1.amazonaws.com", + "variants" : [ { + "tags" : [ "dualstack" ] + } ] }, "eu-central-2" : { "credentialScope" : { "region" : "eu-central-2" }, - "hostname" : "api.ecr.eu-central-2.amazonaws.com" + "hostname" : "api.ecr.eu-central-2.amazonaws.com", + "variants" : [ { + "tags" : [ "dualstack" ] + } ] }, "eu-north-1" : { "credentialScope" : { "region" : "eu-north-1" }, - "hostname" : "api.ecr.eu-north-1.amazonaws.com" + "hostname" : "api.ecr.eu-north-1.amazonaws.com", + "variants" : [ { + "tags" : [ "dualstack" ] + } ] }, "eu-south-1" : { "credentialScope" : { "region" : "eu-south-1" }, - "hostname" : "api.ecr.eu-south-1.amazonaws.com" + "hostname" : "api.ecr.eu-south-1.amazonaws.com", + "variants" : [ { + "tags" : [ "dualstack" ] + } ] }, "eu-south-2" : { "credentialScope" : { "region" : "eu-south-2" }, - "hostname" : "api.ecr.eu-south-2.amazonaws.com" + "hostname" : "api.ecr.eu-south-2.amazonaws.com", + "variants" : [ { + "tags" : [ "dualstack" ] + } ] }, "eu-west-1" : { "credentialScope" : { "region" : "eu-west-1" }, - "hostname" : "api.ecr.eu-west-1.amazonaws.com" + "hostname" : "api.ecr.eu-west-1.amazonaws.com", + "variants" : [ { + "tags" : [ "dualstack" ] + } ] }, "eu-west-2" : { "credentialScope" : { "region" : "eu-west-2" }, - "hostname" : "api.ecr.eu-west-2.amazonaws.com" + "hostname" : "api.ecr.eu-west-2.amazonaws.com", + "variants" : [ { + "tags" : [ "dualstack" ] + } ] }, "eu-west-3" : { "credentialScope" : { "region" : "eu-west-3" }, - "hostname" : "api.ecr.eu-west-3.amazonaws.com" + "hostname" : "api.ecr.eu-west-3.amazonaws.com", + "variants" : [ { + "tags" : [ "dualstack" ] + } ] }, "fips-dkr-us-east-1" : { "credentialScope" : { @@ -903,25 +1141,37 @@ "credentialScope" : { "region" : "il-central-1" }, - "hostname" : "api.ecr.il-central-1.amazonaws.com" + "hostname" : "api.ecr.il-central-1.amazonaws.com", + "variants" : [ { + "tags" : [ "dualstack" ] + } ] }, "me-central-1" : { "credentialScope" : { "region" : "me-central-1" }, - "hostname" : "api.ecr.me-central-1.amazonaws.com" + "hostname" : "api.ecr.me-central-1.amazonaws.com", + "variants" : [ { + "tags" : [ "dualstack" ] + } ] }, "me-south-1" : { "credentialScope" : { "region" : "me-south-1" }, - "hostname" : "api.ecr.me-south-1.amazonaws.com" + "hostname" : "api.ecr.me-south-1.amazonaws.com", + "variants" : [ { + "tags" : [ "dualstack" ] + } ] }, "sa-east-1" : { "credentialScope" : { "region" : "sa-east-1" }, - "hostname" : "api.ecr.sa-east-1.amazonaws.com" + "hostname" : "api.ecr.sa-east-1.amazonaws.com", + "variants" : [ { + "tags" : [ "dualstack" ] + } ] }, "us-east-1" : { "credentialScope" : { @@ -929,6 +1179,10 @@ }, "hostname" : "api.ecr.us-east-1.amazonaws.com", "variants" : [ { + "tags" : [ "dualstack" ] + }, { + "tags" : [ "dualstack", "fips" ] + }, { "hostname" : "ecr-fips.us-east-1.amazonaws.com", "tags" : [ "fips" ] } ] @@ -939,6 +1193,10 @@ }, "hostname" : "api.ecr.us-east-2.amazonaws.com", "variants" : [ { + "tags" : [ "dualstack" ] + }, { + "tags" : [ "dualstack", "fips" ] + }, { "hostname" : "ecr-fips.us-east-2.amazonaws.com", "tags" : [ "fips" ] } ] @@ -949,6 +1207,10 @@ }, "hostname" : "api.ecr.us-west-1.amazonaws.com", "variants" : [ { + "tags" : [ "dualstack" ] + }, { + "tags" : [ "dualstack", "fips" ] + }, { "hostname" : "ecr-fips.us-west-1.amazonaws.com", "tags" : [ "fips" ] } ] @@ -959,6 +1221,10 @@ }, "hostname" : "api.ecr.us-west-2.amazonaws.com", "variants" : [ { + "tags" : [ "dualstack" ] + }, { + "tags" : [ "dualstack", "fips" ] + }, { "hostname" : "ecr-fips.us-west-2.amazonaws.com", "tags" : [ "fips" ] } ] @@ -971,7 +1237,10 @@ "credentialScope" : { "region" : "us-east-1" }, - "hostname" : "api.ecr-public.us-east-1.amazonaws.com" + "hostname" : "api.ecr-public.us-east-1.amazonaws.com", + "variants" : [ { + "tags" : [ "dualstack" ] + } ] }, "us-west-2" : { "credentialScope" : { @@ -1267,28 +1536,95 @@ "api.tunneling.iot" : { "defaults" : { "variants" : [ { + "hostname" : "api.iot-tunneling-fips.{region}.{dnsSuffix}", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "api.iot-tunneling.{region}.{dnsSuffix}", + "tags" : [ "dualstack" ] + }, { "hostname" : "api.tunneling.iot-fips.{region}.{dnsSuffix}", "tags" : [ "fips" ] } ] }, "endpoints" : { - "ap-east-1" : { }, - "ap-northeast-1" : { }, - "ap-northeast-2" : { }, - "ap-south-1" : { }, - "ap-southeast-1" : { }, - "ap-southeast-2" : { }, + "ap-east-1" : { + "variants" : [ { + "hostname" : "api.iot-tunneling.ap-east-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-northeast-1" : { + "variants" : [ { + "hostname" : "api.iot-tunneling.ap-northeast-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-northeast-2" : { + "variants" : [ { + "hostname" : "api.iot-tunneling.ap-northeast-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-south-1" : { + "variants" : [ { + "hostname" : "api.iot-tunneling.ap-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-1" : { + "variants" : [ { + "hostname" : "api.iot-tunneling.ap-southeast-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-2" : { + "variants" : [ { + "hostname" : "api.iot-tunneling.ap-southeast-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, "ca-central-1" : { "variants" : [ { + "hostname" : "api.iot-tunneling-fips.ca-central-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "api.iot-tunneling.ca-central-1.api.aws", + "tags" : [ "dualstack" ] + }, { "hostname" : "api.tunneling.iot-fips.ca-central-1.amazonaws.com", "tags" : [ "fips" ] } ] }, - "eu-central-1" : { }, - "eu-north-1" : { }, - "eu-west-1" : { }, - "eu-west-2" : { }, - "eu-west-3" : { }, + "eu-central-1" : { + "variants" : [ { + "hostname" : "api.iot-tunneling.eu-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-north-1" : { + "variants" : [ { + "hostname" : "api.iot-tunneling.eu-north-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-west-1" : { + "variants" : [ { + "hostname" : "api.iot-tunneling.eu-west-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-west-2" : { + "variants" : [ { + "hostname" : "api.iot-tunneling.eu-west-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-west-3" : { + "variants" : [ { + "hostname" : "api.iot-tunneling.eu-west-3.api.aws", + "tags" : [ "dualstack" ] + } ] + }, "fips-ca-central-1" : { "credentialScope" : { "region" : "ca-central-1" @@ -1324,29 +1660,68 @@ "deprecated" : true, "hostname" : "api.tunneling.iot-fips.us-west-2.amazonaws.com" }, - "me-central-1" : { }, - "me-south-1" : { }, - "sa-east-1" : { }, + "me-central-1" : { + "variants" : [ { + "hostname" : "api.iot-tunneling.me-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "me-south-1" : { + "variants" : [ { + "hostname" : "api.iot-tunneling.me-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "sa-east-1" : { + "variants" : [ { + "hostname" : "api.iot-tunneling.sa-east-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, "us-east-1" : { "variants" : [ { + "hostname" : "api.iot-tunneling-fips.us-east-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "api.iot-tunneling.us-east-1.api.aws", + "tags" : [ "dualstack" ] + }, { "hostname" : "api.tunneling.iot-fips.us-east-1.amazonaws.com", "tags" : [ "fips" ] } ] }, "us-east-2" : { "variants" : [ { + "hostname" : "api.iot-tunneling-fips.us-east-2.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "api.iot-tunneling.us-east-2.api.aws", + "tags" : [ "dualstack" ] + }, { "hostname" : "api.tunneling.iot-fips.us-east-2.amazonaws.com", "tags" : [ "fips" ] } ] }, "us-west-1" : { "variants" : [ { + "hostname" : "api.iot-tunneling-fips.us-west-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "api.iot-tunneling.us-west-1.api.aws", + "tags" : [ "dualstack" ] + }, { "hostname" : "api.tunneling.iot-fips.us-west-1.amazonaws.com", "tags" : [ "fips" ] } ] }, "us-west-2" : { "variants" : [ { + "hostname" : "api.iot-tunneling-fips.us-west-2.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "api.iot-tunneling.us-west-2.api.aws", + "tags" : [ "dualstack" ] + }, { "hostname" : "api.tunneling.iot-fips.us-west-2.amazonaws.com", "tags" : [ "fips" ] } ] @@ -4741,27 +5116,132 @@ }, "cognito-idp" : { "endpoints" : { - "af-south-1" : { }, - "ap-east-1" : { }, - "ap-northeast-1" : { }, - "ap-northeast-2" : { }, - "ap-northeast-3" : { }, - "ap-south-1" : { }, - "ap-south-2" : { }, - "ap-southeast-1" : { }, - "ap-southeast-2" : { }, - "ap-southeast-3" : { }, - "ap-southeast-4" : { }, - "ca-central-1" : { }, - "ca-west-1" : { }, - "eu-central-1" : { }, - "eu-central-2" : { }, - "eu-north-1" : { }, - "eu-south-1" : { }, - "eu-south-2" : { }, - "eu-west-1" : { }, - "eu-west-2" : { }, - "eu-west-3" : { }, + "af-south-1" : { + "variants" : [ { + "hostname" : "cognito-idp.af-south-1.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, + "ap-east-1" : { + "variants" : [ { + "hostname" : "cognito-idp.ap-east-1.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, + "ap-northeast-1" : { + "variants" : [ { + "hostname" : "cognito-idp.ap-northeast-1.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, + "ap-northeast-2" : { + "variants" : [ { + "hostname" : "cognito-idp.ap-northeast-2.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, + "ap-northeast-3" : { + "variants" : [ { + "hostname" : "cognito-idp.ap-northeast-3.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, + "ap-south-1" : { + "variants" : [ { + "hostname" : "cognito-idp.ap-south-1.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, + "ap-south-2" : { + "variants" : [ { + "hostname" : "cognito-idp.ap-south-2.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-1" : { + "variants" : [ { + "hostname" : "cognito-idp.ap-southeast-1.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-2" : { + "variants" : [ { + "hostname" : "cognito-idp.ap-southeast-2.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-3" : { + "variants" : [ { + "hostname" : "cognito-idp.ap-southeast-3.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-4" : { + "variants" : [ { + "hostname" : "cognito-idp.ap-southeast-4.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, + "ca-central-1" : { + "variants" : [ { + "hostname" : "cognito-idp.ca-central-1.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, + "ca-west-1" : { + "variants" : [ { + "hostname" : "cognito-idp.ca-west-1.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, + "eu-central-1" : { + "variants" : [ { + "hostname" : "cognito-idp.eu-central-1.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, + "eu-central-2" : { + "variants" : [ { + "hostname" : "cognito-idp.eu-central-2.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, + "eu-north-1" : { + "variants" : [ { + "hostname" : "cognito-idp.eu-north-1.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, + "eu-south-1" : { + "variants" : [ { + "hostname" : "cognito-idp.eu-south-1.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, + "eu-south-2" : { + "variants" : [ { + "hostname" : "cognito-idp.eu-south-2.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, + "eu-west-1" : { + "variants" : [ { + "hostname" : "cognito-idp.eu-west-1.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, + "eu-west-2" : { + "variants" : [ { + "hostname" : "cognito-idp.eu-west-2.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, + "eu-west-3" : { + "variants" : [ { + "hostname" : "cognito-idp.eu-west-3.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, "fips-us-east-1" : { "credentialScope" : { "region" : "us-east-1" @@ -4790,32 +5270,76 @@ "deprecated" : true, "hostname" : "cognito-idp-fips.us-west-2.amazonaws.com" }, - "il-central-1" : { }, - "me-central-1" : { }, - "me-south-1" : { }, - "sa-east-1" : { }, + "il-central-1" : { + "variants" : [ { + "hostname" : "cognito-idp.il-central-1.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, + "me-central-1" : { + "variants" : [ { + "hostname" : "cognito-idp.me-central-1.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, + "me-south-1" : { + "variants" : [ { + "hostname" : "cognito-idp.me-south-1.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, + "sa-east-1" : { + "variants" : [ { + "hostname" : "cognito-idp.sa-east-1.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, "us-east-1" : { "variants" : [ { + "hostname" : "cognito-idp-fips.us-east-1.amazonaws.com", + "tags" : [ "dualstack", "fips" ] + }, { "hostname" : "cognito-idp-fips.us-east-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "cognito-idp.us-east-1.amazonaws.com", + "tags" : [ "dualstack" ] } ] }, "us-east-2" : { "variants" : [ { + "hostname" : "cognito-idp-fips.us-east-2.amazonaws.com", + "tags" : [ "dualstack", "fips" ] + }, { "hostname" : "cognito-idp-fips.us-east-2.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "cognito-idp.us-east-2.amazonaws.com", + "tags" : [ "dualstack" ] } ] }, "us-west-1" : { "variants" : [ { + "hostname" : "cognito-idp-fips.us-west-1.amazonaws.com", + "tags" : [ "dualstack", "fips" ] + }, { "hostname" : "cognito-idp-fips.us-west-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "cognito-idp.us-west-1.amazonaws.com", + "tags" : [ "dualstack" ] } ] }, "us-west-2" : { "variants" : [ { + "hostname" : "cognito-idp-fips.us-west-2.amazonaws.com", + "tags" : [ "dualstack", "fips" ] + }, { "hostname" : "cognito-idp-fips.us-west-2.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "cognito-idp.us-west-2.amazonaws.com", + "tags" : [ "dualstack" ] } ] } } @@ -5780,38 +6304,150 @@ }, "datasync" : { "endpoints" : { - "af-south-1" : { }, - "ap-east-1" : { }, - "ap-northeast-1" : { }, - "ap-northeast-2" : { }, - "ap-northeast-3" : { }, - "ap-south-1" : { }, - "ap-south-2" : { }, - "ap-southeast-1" : { }, - "ap-southeast-2" : { }, - "ap-southeast-3" : { }, - "ap-southeast-4" : { }, - "ap-southeast-5" : { }, + "af-south-1" : { + "variants" : [ { + "hostname" : "datasync.af-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-east-1" : { + "variants" : [ { + "hostname" : "datasync.ap-east-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-northeast-1" : { + "variants" : [ { + "hostname" : "datasync.ap-northeast-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-northeast-2" : { + "variants" : [ { + "hostname" : "datasync.ap-northeast-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-northeast-3" : { + "variants" : [ { + "hostname" : "datasync.ap-northeast-3.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-south-1" : { + "variants" : [ { + "hostname" : "datasync.ap-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-south-2" : { + "variants" : [ { + "hostname" : "datasync.ap-south-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-1" : { + "variants" : [ { + "hostname" : "datasync.ap-southeast-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-2" : { + "variants" : [ { + "hostname" : "datasync.ap-southeast-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-3" : { + "variants" : [ { + "hostname" : "datasync.ap-southeast-3.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-4" : { + "variants" : [ { + "hostname" : "datasync.ap-southeast-4.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-5" : { + "variants" : [ { + "hostname" : "datasync.ap-southeast-5.api.aws", + "tags" : [ "dualstack" ] + } ] + }, "ca-central-1" : { "variants" : [ { "hostname" : "datasync-fips.ca-central-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "datasync-fips.ca-central-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "datasync.ca-central-1.api.aws", + "tags" : [ "dualstack" ] } ] }, "ca-west-1" : { "variants" : [ { "hostname" : "datasync-fips.ca-west-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "datasync-fips.ca-west-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "datasync.ca-west-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-central-1" : { + "variants" : [ { + "hostname" : "datasync.eu-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-central-2" : { + "variants" : [ { + "hostname" : "datasync.eu-central-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-north-1" : { + "variants" : [ { + "hostname" : "datasync.eu-north-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-south-1" : { + "variants" : [ { + "hostname" : "datasync.eu-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-south-2" : { + "variants" : [ { + "hostname" : "datasync.eu-south-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-west-1" : { + "variants" : [ { + "hostname" : "datasync.eu-west-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-west-2" : { + "variants" : [ { + "hostname" : "datasync.eu-west-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-west-3" : { + "variants" : [ { + "hostname" : "datasync.eu-west-3.api.aws", + "tags" : [ "dualstack" ] } ] }, - "eu-central-1" : { }, - "eu-central-2" : { }, - "eu-north-1" : { }, - "eu-south-1" : { }, - "eu-south-2" : { }, - "eu-west-1" : { }, - "eu-west-2" : { }, - "eu-west-3" : { }, "fips-ca-central-1" : { "credentialScope" : { "region" : "ca-central-1" @@ -5854,32 +6490,76 @@ "deprecated" : true, "hostname" : "datasync-fips.us-west-2.amazonaws.com" }, - "il-central-1" : { }, - "me-central-1" : { }, - "me-south-1" : { }, - "sa-east-1" : { }, + "il-central-1" : { + "variants" : [ { + "hostname" : "datasync.il-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "me-central-1" : { + "variants" : [ { + "hostname" : "datasync.me-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "me-south-1" : { + "variants" : [ { + "hostname" : "datasync.me-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "sa-east-1" : { + "variants" : [ { + "hostname" : "datasync.sa-east-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, "us-east-1" : { "variants" : [ { "hostname" : "datasync-fips.us-east-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "datasync-fips.us-east-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "datasync.us-east-1.api.aws", + "tags" : [ "dualstack" ] } ] }, "us-east-2" : { "variants" : [ { "hostname" : "datasync-fips.us-east-2.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "datasync-fips.us-east-2.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "datasync.us-east-2.api.aws", + "tags" : [ "dualstack" ] } ] }, "us-west-1" : { "variants" : [ { "hostname" : "datasync-fips.us-west-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "datasync-fips.us-west-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "datasync.us-west-1.api.aws", + "tags" : [ "dualstack" ] } ] }, "us-west-2" : { "variants" : [ { "hostname" : "datasync-fips.us-west-2.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "datasync-fips.us-west-2.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "datasync.us-west-2.api.aws", + "tags" : [ "dualstack" ] } ] } } @@ -9910,109 +10590,219 @@ "tags" : [ "fips" ] } ] }, - "eu-central-1" : { }, - "eu-north-1" : { }, - "eu-south-1" : { }, - "eu-west-1" : { }, - "eu-west-2" : { }, - "eu-west-3" : { }, - "fips-ca-central-1" : { - "credentialScope" : { - "region" : "ca-central-1" - }, - "deprecated" : true, - "hostname" : "glacier-fips.ca-central-1.amazonaws.com" - }, - "fips-us-east-1" : { - "credentialScope" : { - "region" : "us-east-1" - }, - "deprecated" : true, - "hostname" : "glacier-fips.us-east-1.amazonaws.com" + "eu-central-1" : { }, + "eu-north-1" : { }, + "eu-south-1" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "fips-ca-central-1" : { + "credentialScope" : { + "region" : "ca-central-1" + }, + "deprecated" : true, + "hostname" : "glacier-fips.ca-central-1.amazonaws.com" + }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "glacier-fips.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "glacier-fips.us-east-2.amazonaws.com" + }, + "fips-us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "deprecated" : true, + "hostname" : "glacier-fips.us-west-1.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "glacier-fips.us-west-2.amazonaws.com" + }, + "me-south-1" : { }, + "sa-east-1" : { }, + "us-east-1" : { + "variants" : [ { + "hostname" : "glacier-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-2" : { + "variants" : [ { + "hostname" : "glacier-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-1" : { + "variants" : [ { + "hostname" : "glacier-fips.us-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "glacier-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "globalaccelerator" : { + "endpoints" : { + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "hostname" : "globalaccelerator-fips.us-west-2.amazonaws.com" + } + } + }, + "glue" : { + "endpoints" : { + "af-south-1" : { + "variants" : [ { + "hostname" : "glue.af-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-east-1" : { + "variants" : [ { + "hostname" : "glue.ap-east-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-northeast-1" : { + "variants" : [ { + "hostname" : "glue.ap-northeast-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-northeast-2" : { + "variants" : [ { + "hostname" : "glue.ap-northeast-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-northeast-3" : { + "variants" : [ { + "hostname" : "glue.ap-northeast-3.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-south-1" : { + "variants" : [ { + "hostname" : "glue.ap-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-south-2" : { + "variants" : [ { + "hostname" : "glue.ap-south-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-1" : { + "variants" : [ { + "hostname" : "glue.ap-southeast-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-2" : { + "variants" : [ { + "hostname" : "glue.ap-southeast-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-3" : { + "variants" : [ { + "hostname" : "glue.ap-southeast-3.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-4" : { + "variants" : [ { + "hostname" : "glue.ap-southeast-4.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-5" : { + "variants" : [ { + "hostname" : "glue.ap-southeast-5.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ca-central-1" : { + "variants" : [ { + "hostname" : "glue.ca-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ca-west-1" : { + "variants" : [ { + "hostname" : "glue.ca-west-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-central-1" : { + "variants" : [ { + "hostname" : "glue.eu-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] }, - "fips-us-east-2" : { - "credentialScope" : { - "region" : "us-east-2" - }, - "deprecated" : true, - "hostname" : "glacier-fips.us-east-2.amazonaws.com" + "eu-central-2" : { + "variants" : [ { + "hostname" : "glue.eu-central-2.api.aws", + "tags" : [ "dualstack" ] + } ] }, - "fips-us-west-1" : { - "credentialScope" : { - "region" : "us-west-1" - }, - "deprecated" : true, - "hostname" : "glacier-fips.us-west-1.amazonaws.com" + "eu-north-1" : { + "variants" : [ { + "hostname" : "glue.eu-north-1.api.aws", + "tags" : [ "dualstack" ] + } ] }, - "fips-us-west-2" : { - "credentialScope" : { - "region" : "us-west-2" - }, - "deprecated" : true, - "hostname" : "glacier-fips.us-west-2.amazonaws.com" + "eu-south-1" : { + "variants" : [ { + "hostname" : "glue.eu-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] }, - "me-south-1" : { }, - "sa-east-1" : { }, - "us-east-1" : { + "eu-south-2" : { "variants" : [ { - "hostname" : "glacier-fips.us-east-1.amazonaws.com", - "tags" : [ "fips" ] + "hostname" : "glue.eu-south-2.api.aws", + "tags" : [ "dualstack" ] } ] }, - "us-east-2" : { + "eu-west-1" : { "variants" : [ { - "hostname" : "glacier-fips.us-east-2.amazonaws.com", - "tags" : [ "fips" ] + "hostname" : "glue.eu-west-1.api.aws", + "tags" : [ "dualstack" ] } ] }, - "us-west-1" : { + "eu-west-2" : { "variants" : [ { - "hostname" : "glacier-fips.us-west-1.amazonaws.com", - "tags" : [ "fips" ] + "hostname" : "glue.eu-west-2.api.aws", + "tags" : [ "dualstack" ] } ] }, - "us-west-2" : { + "eu-west-3" : { "variants" : [ { - "hostname" : "glacier-fips.us-west-2.amazonaws.com", - "tags" : [ "fips" ] + "hostname" : "glue.eu-west-3.api.aws", + "tags" : [ "dualstack" ] } ] - } - } - }, - "globalaccelerator" : { - "endpoints" : { - "fips-us-west-2" : { - "credentialScope" : { - "region" : "us-west-2" - }, - "hostname" : "globalaccelerator-fips.us-west-2.amazonaws.com" - } - } - }, - "glue" : { - "endpoints" : { - "af-south-1" : { }, - "ap-east-1" : { }, - "ap-northeast-1" : { }, - "ap-northeast-2" : { }, - "ap-northeast-3" : { }, - "ap-south-1" : { }, - "ap-south-2" : { }, - "ap-southeast-1" : { }, - "ap-southeast-2" : { }, - "ap-southeast-3" : { }, - "ap-southeast-4" : { }, - "ap-southeast-5" : { }, - "ca-central-1" : { }, - "ca-west-1" : { }, - "eu-central-1" : { }, - "eu-central-2" : { }, - "eu-north-1" : { }, - "eu-south-1" : { }, - "eu-south-2" : { }, - "eu-west-1" : { }, - "eu-west-2" : { }, - "eu-west-3" : { }, + }, "fips-us-east-1" : { "credentialScope" : { "region" : "us-east-1" @@ -10041,32 +10831,76 @@ "deprecated" : true, "hostname" : "glue-fips.us-west-2.amazonaws.com" }, - "il-central-1" : { }, - "me-central-1" : { }, - "me-south-1" : { }, - "sa-east-1" : { }, + "il-central-1" : { + "variants" : [ { + "hostname" : "glue.il-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "me-central-1" : { + "variants" : [ { + "hostname" : "glue.me-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "me-south-1" : { + "variants" : [ { + "hostname" : "glue.me-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "sa-east-1" : { + "variants" : [ { + "hostname" : "glue.sa-east-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, "us-east-1" : { "variants" : [ { "hostname" : "glue-fips.us-east-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "glue-fips.us-east-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "glue.us-east-1.api.aws", + "tags" : [ "dualstack" ] } ] }, "us-east-2" : { "variants" : [ { "hostname" : "glue-fips.us-east-2.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "glue-fips.us-east-2.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "glue.us-east-2.api.aws", + "tags" : [ "dualstack" ] } ] }, "us-west-1" : { "variants" : [ { "hostname" : "glue-fips.us-west-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "glue-fips.us-west-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "glue.us-west-1.api.aws", + "tags" : [ "dualstack" ] } ] }, "us-west-2" : { "variants" : [ { "hostname" : "glue-fips.us-west-2.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "glue-fips.us-west-2.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "glue.us-west-2.api.aws", + "tags" : [ "dualstack" ] } ] } } @@ -11619,6 +12453,7 @@ "ap-southeast-2" : { }, "ap-southeast-3" : { }, "ap-southeast-4" : { }, + "ap-southeast-5" : { }, "ca-central-1" : { "variants" : [ { "hostname" : "kafka-fips.ca-central-1.amazonaws.com", @@ -13579,21 +14414,96 @@ }, "macie2" : { "endpoints" : { - "af-south-1" : { }, - "ap-east-1" : { }, - "ap-northeast-1" : { }, - "ap-northeast-2" : { }, - "ap-northeast-3" : { }, - "ap-south-1" : { }, - "ap-southeast-1" : { }, - "ap-southeast-2" : { }, - "ca-central-1" : { }, - "eu-central-1" : { }, - "eu-north-1" : { }, - "eu-south-1" : { }, - "eu-west-1" : { }, - "eu-west-2" : { }, - "eu-west-3" : { }, + "af-south-1" : { + "variants" : [ { + "hostname" : "macie2.af-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-east-1" : { + "variants" : [ { + "hostname" : "macie2.ap-east-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-northeast-1" : { + "variants" : [ { + "hostname" : "macie2.ap-northeast-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-northeast-2" : { + "variants" : [ { + "hostname" : "macie2.ap-northeast-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-northeast-3" : { + "variants" : [ { + "hostname" : "macie2.ap-northeast-3.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-south-1" : { + "variants" : [ { + "hostname" : "macie2.ap-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-1" : { + "variants" : [ { + "hostname" : "macie2.ap-southeast-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-2" : { + "variants" : [ { + "hostname" : "macie2.ap-southeast-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ca-central-1" : { + "variants" : [ { + "hostname" : "macie2.ca-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-central-1" : { + "variants" : [ { + "hostname" : "macie2.eu-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-north-1" : { + "variants" : [ { + "hostname" : "macie2.eu-north-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-south-1" : { + "variants" : [ { + "hostname" : "macie2.eu-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-west-1" : { + "variants" : [ { + "hostname" : "macie2.eu-west-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-west-2" : { + "variants" : [ { + "hostname" : "macie2.eu-west-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-west-3" : { + "variants" : [ { + "hostname" : "macie2.eu-west-3.api.aws", + "tags" : [ "dualstack" ] + } ] + }, "fips-us-east-1" : { "credentialScope" : { "region" : "us-east-1" @@ -13622,31 +14532,70 @@ "deprecated" : true, "hostname" : "macie2-fips.us-west-2.amazonaws.com" }, - "il-central-1" : { }, - "me-south-1" : { }, - "sa-east-1" : { }, + "il-central-1" : { + "variants" : [ { + "hostname" : "macie2.il-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "me-south-1" : { + "variants" : [ { + "hostname" : "macie2.me-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "sa-east-1" : { + "variants" : [ { + "hostname" : "macie2.sa-east-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, "us-east-1" : { "variants" : [ { "hostname" : "macie2-fips.us-east-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "macie2-fips.us-east-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "macie2.us-east-1.api.aws", + "tags" : [ "dualstack" ] } ] }, "us-east-2" : { "variants" : [ { "hostname" : "macie2-fips.us-east-2.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "macie2-fips.us-east-2.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "macie2.us-east-2.api.aws", + "tags" : [ "dualstack" ] } ] }, "us-west-1" : { "variants" : [ { "hostname" : "macie2-fips.us-west-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "macie2-fips.us-west-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "macie2.us-west-1.api.aws", + "tags" : [ "dualstack" ] } ] }, "us-west-2" : { "variants" : [ { "hostname" : "macie2-fips.us-west-2.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "macie2-fips.us-west-2.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "macie2.us-west-2.api.aws", + "tags" : [ "dualstack" ] } ] } } @@ -13737,25 +14686,96 @@ }, "mediaconvert" : { "endpoints" : { - "af-south-1" : { }, - "ap-northeast-1" : { }, - "ap-northeast-2" : { }, - "ap-northeast-3" : { }, - "ap-south-1" : { }, - "ap-southeast-1" : { }, - "ap-southeast-2" : { }, - "ap-southeast-4" : { }, + "af-south-1" : { + "variants" : [ { + "hostname" : "mediaconvert.af-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-northeast-1" : { + "variants" : [ { + "hostname" : "mediaconvert.ap-northeast-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-northeast-2" : { + "variants" : [ { + "hostname" : "mediaconvert.ap-northeast-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-northeast-3" : { + "variants" : [ { + "hostname" : "mediaconvert.ap-northeast-3.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-south-1" : { + "variants" : [ { + "hostname" : "mediaconvert.ap-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-1" : { + "variants" : [ { + "hostname" : "mediaconvert.ap-southeast-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-2" : { + "variants" : [ { + "hostname" : "mediaconvert.ap-southeast-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-4" : { + "variants" : [ { + "hostname" : "mediaconvert.ap-southeast-4.api.aws", + "tags" : [ "dualstack" ] + } ] + }, "ca-central-1" : { "variants" : [ { "hostname" : "mediaconvert-fips.ca-central-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "mediaconvert-fips.ca-central-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "mediaconvert.ca-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-central-1" : { + "variants" : [ { + "hostname" : "mediaconvert.eu-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-north-1" : { + "variants" : [ { + "hostname" : "mediaconvert.eu-north-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-west-1" : { + "variants" : [ { + "hostname" : "mediaconvert.eu-west-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-west-2" : { + "variants" : [ { + "hostname" : "mediaconvert.eu-west-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-west-3" : { + "variants" : [ { + "hostname" : "mediaconvert.eu-west-3.api.aws", + "tags" : [ "dualstack" ] } ] }, - "eu-central-1" : { }, - "eu-north-1" : { }, - "eu-west-1" : { }, - "eu-west-2" : { }, - "eu-west-3" : { }, "fips-ca-central-1" : { "credentialScope" : { "region" : "ca-central-1" @@ -13791,30 +14811,64 @@ "deprecated" : true, "hostname" : "mediaconvert-fips.us-west-2.amazonaws.com" }, - "me-central-1" : { }, - "sa-east-1" : { }, + "me-central-1" : { + "variants" : [ { + "hostname" : "mediaconvert.me-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "sa-east-1" : { + "variants" : [ { + "hostname" : "mediaconvert.sa-east-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, "us-east-1" : { "variants" : [ { "hostname" : "mediaconvert-fips.us-east-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "mediaconvert-fips.us-east-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "mediaconvert.us-east-1.api.aws", + "tags" : [ "dualstack" ] } ] }, "us-east-2" : { "variants" : [ { "hostname" : "mediaconvert-fips.us-east-2.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "mediaconvert-fips.us-east-2.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "mediaconvert.us-east-2.api.aws", + "tags" : [ "dualstack" ] } ] }, "us-west-1" : { "variants" : [ { "hostname" : "mediaconvert-fips.us-west-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "mediaconvert-fips.us-west-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "mediaconvert.us-west-1.api.aws", + "tags" : [ "dualstack" ] } ] }, "us-west-2" : { "variants" : [ { "hostname" : "mediaconvert-fips.us-west-2.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "mediaconvert-fips.us-west-2.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "mediaconvert.us-west-2.api.aws", + "tags" : [ "dualstack" ] } ] } } @@ -16504,70 +17558,182 @@ "deprecated" : true, "hostname" : "ram-fips.us-west-2.amazonaws.com" }, - "il-central-1" : { }, - "me-central-1" : { }, - "me-south-1" : { }, - "sa-east-1" : { }, - "us-east-1" : { + "il-central-1" : { }, + "me-central-1" : { }, + "me-south-1" : { }, + "sa-east-1" : { }, + "us-east-1" : { + "variants" : [ { + "hostname" : "ram-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-2" : { + "variants" : [ { + "hostname" : "ram-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-1" : { + "variants" : [ { + "hostname" : "ram-fips.us-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "ram-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, + "rbin" : { + "endpoints" : { + "af-south-1" : { + "variants" : [ { + "hostname" : "rbin.af-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-east-1" : { + "variants" : [ { + "hostname" : "rbin.ap-east-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-northeast-1" : { + "variants" : [ { + "hostname" : "rbin.ap-northeast-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-northeast-2" : { + "variants" : [ { + "hostname" : "rbin.ap-northeast-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-northeast-3" : { + "variants" : [ { + "hostname" : "rbin.ap-northeast-3.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-south-1" : { + "variants" : [ { + "hostname" : "rbin.ap-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-south-2" : { + "variants" : [ { + "hostname" : "rbin.ap-south-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-1" : { + "variants" : [ { + "hostname" : "rbin.ap-southeast-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-2" : { + "variants" : [ { + "hostname" : "rbin.ap-southeast-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-3" : { + "variants" : [ { + "hostname" : "rbin.ap-southeast-3.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-4" : { + "variants" : [ { + "hostname" : "rbin.ap-southeast-4.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-5" : { + "variants" : [ { + "hostname" : "rbin.ap-southeast-5.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ca-central-1" : { + "variants" : [ { + "hostname" : "rbin-fips.ca-central-1.amazonaws.com", + "tags" : [ "fips" ] + }, { + "hostname" : "rbin-fips.ca-central-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "rbin.ca-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ca-west-1" : { + "variants" : [ { + "hostname" : "rbin-fips.ca-west-1.amazonaws.com", + "tags" : [ "fips" ] + }, { + "hostname" : "rbin-fips.ca-west-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "rbin.ca-west-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-central-1" : { + "variants" : [ { + "hostname" : "rbin.eu-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-central-2" : { + "variants" : [ { + "hostname" : "rbin.eu-central-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-north-1" : { "variants" : [ { - "hostname" : "ram-fips.us-east-1.amazonaws.com", - "tags" : [ "fips" ] + "hostname" : "rbin.eu-north-1.api.aws", + "tags" : [ "dualstack" ] } ] }, - "us-east-2" : { + "eu-south-1" : { "variants" : [ { - "hostname" : "ram-fips.us-east-2.amazonaws.com", - "tags" : [ "fips" ] + "hostname" : "rbin.eu-south-1.api.aws", + "tags" : [ "dualstack" ] } ] }, - "us-west-1" : { + "eu-south-2" : { "variants" : [ { - "hostname" : "ram-fips.us-west-1.amazonaws.com", - "tags" : [ "fips" ] + "hostname" : "rbin.eu-south-2.api.aws", + "tags" : [ "dualstack" ] } ] }, - "us-west-2" : { + "eu-west-1" : { "variants" : [ { - "hostname" : "ram-fips.us-west-2.amazonaws.com", - "tags" : [ "fips" ] + "hostname" : "rbin.eu-west-1.api.aws", + "tags" : [ "dualstack" ] } ] - } - } - }, - "rbin" : { - "endpoints" : { - "af-south-1" : { }, - "ap-east-1" : { }, - "ap-northeast-1" : { }, - "ap-northeast-2" : { }, - "ap-northeast-3" : { }, - "ap-south-1" : { }, - "ap-south-2" : { }, - "ap-southeast-1" : { }, - "ap-southeast-2" : { }, - "ap-southeast-3" : { }, - "ap-southeast-4" : { }, - "ap-southeast-5" : { }, - "ca-central-1" : { + }, + "eu-west-2" : { "variants" : [ { - "hostname" : "rbin-fips.ca-central-1.amazonaws.com", - "tags" : [ "fips" ] + "hostname" : "rbin.eu-west-2.api.aws", + "tags" : [ "dualstack" ] } ] }, - "ca-west-1" : { + "eu-west-3" : { "variants" : [ { - "hostname" : "rbin-fips.ca-west-1.amazonaws.com", - "tags" : [ "fips" ] + "hostname" : "rbin.eu-west-3.api.aws", + "tags" : [ "dualstack" ] } ] }, - "eu-central-1" : { }, - "eu-central-2" : { }, - "eu-north-1" : { }, - "eu-south-1" : { }, - "eu-south-2" : { }, - "eu-west-1" : { }, - "eu-west-2" : { }, - "eu-west-3" : { }, "fips-ca-central-1" : { "credentialScope" : { "region" : "ca-central-1" @@ -16610,32 +17776,76 @@ "deprecated" : true, "hostname" : "rbin-fips.us-west-2.amazonaws.com" }, - "il-central-1" : { }, - "me-central-1" : { }, - "me-south-1" : { }, - "sa-east-1" : { }, + "il-central-1" : { + "variants" : [ { + "hostname" : "rbin.il-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "me-central-1" : { + "variants" : [ { + "hostname" : "rbin.me-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "me-south-1" : { + "variants" : [ { + "hostname" : "rbin.me-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "sa-east-1" : { + "variants" : [ { + "hostname" : "rbin.sa-east-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, "us-east-1" : { "variants" : [ { "hostname" : "rbin-fips.us-east-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "rbin-fips.us-east-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "rbin.us-east-1.api.aws", + "tags" : [ "dualstack" ] } ] }, "us-east-2" : { "variants" : [ { "hostname" : "rbin-fips.us-east-2.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "rbin-fips.us-east-2.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "rbin.us-east-2.api.aws", + "tags" : [ "dualstack" ] } ] }, "us-west-1" : { "variants" : [ { "hostname" : "rbin-fips.us-west-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "rbin-fips.us-west-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "rbin.us-west-1.api.aws", + "tags" : [ "dualstack" ] } ] }, "us-west-2" : { "variants" : [ { "hostname" : "rbin-fips.us-west-2.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "rbin-fips.us-west-2.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "rbin.us-west-2.api.aws", + "tags" : [ "dualstack" ] } ] } } @@ -23997,13 +25207,31 @@ "credentialScope" : { "region" : "cn-north-1" }, - "hostname" : "api.ecr.cn-north-1.amazonaws.com.cn" + "hostname" : "api.ecr.cn-north-1.amazonaws.com.cn", + "variants" : [ { + "tags" : [ "dualstack" ] + } ] }, "cn-northwest-1" : { "credentialScope" : { "region" : "cn-northwest-1" }, - "hostname" : "api.ecr.cn-northwest-1.amazonaws.com.cn" + "hostname" : "api.ecr.cn-northwest-1.amazonaws.com.cn", + "variants" : [ { + "tags" : [ "dualstack" ] + } ] + }, + "dkr-cn-north-1" : { + "deprecated" : true, + "variants" : [ { + "tags" : [ "dualstack" ] + } ] + }, + "dkr-cn-northwest-1" : { + "deprecated" : true, + "variants" : [ { + "tags" : [ "dualstack" ] + } ] } } }, @@ -24024,9 +25252,31 @@ } }, "api.tunneling.iot" : { + "defaults" : { + "variants" : [ { + "hostname" : "api.iot-tunneling-fips.{region}.{dnsSuffix}", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "api.iot-tunneling.{region}.{dnsSuffix}", + "tags" : [ "dualstack" ] + }, { + "hostname" : "api.tunneling.iot-fips.{region}.{dnsSuffix}", + "tags" : [ "fips" ] + } ] + }, "endpoints" : { - "cn-north-1" : { }, - "cn-northwest-1" : { } + "cn-north-1" : { + "variants" : [ { + "hostname" : "api.iot-tunneling.cn-north-1.api.amazonwebservices.com.cn", + "tags" : [ "dualstack" ] + } ] + }, + "cn-northwest-1" : { + "variants" : [ { + "hostname" : "api.iot-tunneling.cn-northwest-1.api.amazonwebservices.com.cn", + "tags" : [ "dualstack" ] + } ] + } } }, "apigateway" : { @@ -24309,8 +25559,18 @@ }, "datasync" : { "endpoints" : { - "cn-north-1" : { }, - "cn-northwest-1" : { } + "cn-north-1" : { + "variants" : [ { + "hostname" : "datasync.cn-north-1.api.amazonwebservices.com.cn", + "tags" : [ "dualstack" ] + } ] + }, + "cn-northwest-1" : { + "variants" : [ { + "hostname" : "datasync.cn-northwest-1.api.amazonwebservices.com.cn", + "tags" : [ "dualstack" ] + } ] + } } }, "datazone" : { @@ -24345,8 +25605,18 @@ }, "dlm" : { "endpoints" : { - "cn-north-1" : { }, - "cn-northwest-1" : { } + "cn-north-1" : { + "variants" : [ { + "hostname" : "dlm.cn-north-1.api.amazonwebservices.com.cn", + "tags" : [ "dualstack" ] + } ] + }, + "cn-northwest-1" : { + "variants" : [ { + "hostname" : "dlm.cn-northwest-1.api.amazonwebservices.com.cn", + "tags" : [ "dualstack" ] + } ] + } } }, "dms" : { @@ -24601,8 +25871,18 @@ }, "glue" : { "endpoints" : { - "cn-north-1" : { }, - "cn-northwest-1" : { } + "cn-north-1" : { + "variants" : [ { + "hostname" : "glue.cn-north-1.api.amazonwebservices.com.cn", + "tags" : [ "dualstack" ] + } ] + }, + "cn-northwest-1" : { + "variants" : [ { + "hostname" : "glue.cn-northwest-1.api.amazonwebservices.com.cn", + "tags" : [ "dualstack" ] + } ] + } } }, "greengrass" : { @@ -24836,10 +26116,10 @@ "mediaconvert" : { "endpoints" : { "cn-northwest-1" : { - "credentialScope" : { - "region" : "cn-northwest-1" - }, - "hostname" : "mediaconvert.cn-northwest-1.amazonaws.com.cn" + "variants" : [ { + "hostname" : "mediaconvert.cn-northwest-1.api.amazonwebservices.com.cn", + "tags" : [ "dualstack" ] + } ] } } }, @@ -25030,8 +26310,18 @@ }, "rbin" : { "endpoints" : { - "cn-north-1" : { }, - "cn-northwest-1" : { } + "cn-north-1" : { + "variants" : [ { + "hostname" : "rbin.cn-north-1.api.amazonwebservices.com.cn", + "tags" : [ "dualstack" ] + } ] + }, + "cn-northwest-1" : { + "variants" : [ { + "hostname" : "rbin.cn-northwest-1.api.amazonwebservices.com.cn", + "tags" : [ "dualstack" ] + } ] + } } }, "rds" : { @@ -25692,6 +26982,10 @@ }, "deprecated" : true, "variants" : [ { + "tags" : [ "dualstack" ] + }, { + "tags" : [ "dualstack", "fips" ] + }, { "hostname" : "ecr-fips.us-gov-east-1.amazonaws.com", "tags" : [ "fips" ] } ] @@ -25702,6 +26996,10 @@ }, "deprecated" : true, "variants" : [ { + "tags" : [ "dualstack" ] + }, { + "tags" : [ "dualstack", "fips" ] + }, { "hostname" : "ecr-fips.us-gov-west-1.amazonaws.com", "tags" : [ "fips" ] } ] @@ -25740,6 +27038,10 @@ }, "hostname" : "api.ecr.us-gov-east-1.amazonaws.com", "variants" : [ { + "tags" : [ "dualstack" ] + }, { + "tags" : [ "dualstack", "fips" ] + }, { "hostname" : "ecr-fips.us-gov-east-1.amazonaws.com", "tags" : [ "fips" ] } ] @@ -25750,6 +27052,10 @@ }, "hostname" : "api.ecr.us-gov-west-1.amazonaws.com", "variants" : [ { + "tags" : [ "dualstack" ] + }, { + "tags" : [ "dualstack", "fips" ] + }, { "hostname" : "ecr-fips.us-gov-west-1.amazonaws.com", "tags" : [ "fips" ] } ] @@ -25800,6 +27106,12 @@ "api.tunneling.iot" : { "defaults" : { "variants" : [ { + "hostname" : "api.iot-tunneling-fips.{region}.{dnsSuffix}", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "api.iot-tunneling.{region}.{dnsSuffix}", + "tags" : [ "dualstack" ] + }, { "hostname" : "api.tunneling.iot-fips.{region}.{dnsSuffix}", "tags" : [ "fips" ] } ] @@ -25821,12 +27133,24 @@ }, "us-gov-east-1" : { "variants" : [ { + "hostname" : "api.iot-tunneling-fips.us-gov-east-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "api.iot-tunneling.us-gov-east-1.api.aws", + "tags" : [ "dualstack" ] + }, { "hostname" : "api.tunneling.iot-fips.us-gov-east-1.amazonaws.com", "tags" : [ "fips" ] } ] }, "us-gov-west-1" : { "variants" : [ { + "hostname" : "api.iot-tunneling-fips.us-gov-west-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "api.iot-tunneling.us-gov-west-1.api.aws", + "tags" : [ "dualstack" ] + }, { "hostname" : "api.tunneling.iot-fips.us-gov-west-1.amazonaws.com", "tags" : [ "fips" ] } ] @@ -26549,8 +27873,14 @@ }, "us-gov-west-1" : { "variants" : [ { + "hostname" : "cognito-idp-fips.us-gov-west-1.amazonaws.com", + "tags" : [ "dualstack", "fips" ] + }, { "hostname" : "cognito-idp-fips.us-gov-west-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "cognito-idp.us-gov-west-1.amazonaws.com", + "tags" : [ "dualstack" ] } ] } } @@ -26830,12 +28160,24 @@ "variants" : [ { "hostname" : "datasync-fips.us-gov-east-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "datasync-fips.us-gov-east-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "datasync.us-gov-east-1.api.aws", + "tags" : [ "dualstack" ] } ] }, "us-gov-west-1" : { "variants" : [ { "hostname" : "datasync-fips.us-gov-west-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "datasync-fips.us-gov-west-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "datasync.us-gov-west-1.api.aws", + "tags" : [ "dualstack" ] } ] } } @@ -28609,6 +29951,12 @@ "variants" : [ { "hostname" : "mediaconvert.us-gov-west-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "mediaconvert.us-gov-west-1.api.aws", + "tags" : [ "dualstack" ] + }, { + "hostname" : "mediaconvert.us-gov-west-1.api.aws", + "tags" : [ "dualstack", "fips" ] } ] } } @@ -29163,12 +30511,24 @@ "variants" : [ { "hostname" : "rbin-fips.us-gov-east-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "rbin-fips.us-gov-east-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "rbin.us-gov-east-1.api.aws", + "tags" : [ "dualstack" ] } ] }, "us-gov-west-1" : { "variants" : [ { "hostname" : "rbin-fips.us-gov-west-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "rbin-fips.us-gov-west-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "rbin.us-gov-west-1.api.aws", + "tags" : [ "dualstack" ] } ] } } @@ -31389,6 +32749,18 @@ "us-iso-east-1" : { } } }, + "organizations" : { + "endpoints" : { + "aws-iso-global" : { + "credentialScope" : { + "region" : "us-iso-east-1" + }, + "hostname" : "organizations.us-iso-east-1.c2s.ic.gov" + } + }, + "isRegionalized" : false, + "partitionEndpoint" : "aws-iso-global" + }, "outposts" : { "endpoints" : { "us-iso-east-1" : { } diff --git a/models/glue.json b/models/glue.json index 37d25bc7bd..26d394afec 100644 --- a/models/glue.json +++ b/models/glue.json @@ -18982,7 +18982,13 @@ "target": "com.amazonaws.glue#Boolean", "traits": { "smithy.api#default": false, - "smithy.api#documentation": "

When specified as true, iterates through the account and returns all catalog resources (including top-level resources and child resources)

" + "smithy.api#documentation": "

Whether to list all catalogs across the catalog hierarchy, starting from the ParentCatalogId. Defaults to false . When true, all catalog objects in the ParentCatalogID hierarchy are enumerated in the response.

" + } + }, + "IncludeRoot": { + "target": "com.amazonaws.glue#NullableBoolean", + "traits": { + "smithy.api#documentation": "

Whether to list the default catalog in the account and region in the response. Defaults to false. When true and ParentCatalogId = NULL | Amazon Web Services Account ID, all catalogs and the default catalog are enumerated in the response.

\n

When the ParentCatalogId is not equal to null, and this attribute is passed as false or true, an InvalidInputException is thrown.

" } } }, @@ -27414,7 +27420,7 @@ "Timeout": { "target": "com.amazonaws.glue#Timeout", "traits": { - "smithy.api#documentation": "

The JobRun timeout in minutes. This is the maximum time that a job run can\n consume resources before it is terminated and enters TIMEOUT status. This value overrides the timeout value set in the parent job.

\n

Streaming jobs must have timeout values less than 7 days or 10080 minutes. When the value is left blank, the job will be restarted after 7 days based if you have not setup a maintenance window. If you have setup maintenance window, it will be restarted during the maintenance window after 7 days.

" + "smithy.api#documentation": "

The JobRun timeout in minutes. This is the maximum time that a job run can\n consume resources before it is terminated and enters TIMEOUT status. This value overrides the timeout value set in the parent job.

\n

Jobs must have timeout values less than 7 days or 10080 minutes. Otherwise, the jobs will throw an exception.

\n

When the value is left blank, the timeout is defaulted to 2880 minutes.

\n

Any existing Glue jobs that had a timeout value greater than 7 days will be defaulted to 7 days. For instance if you have specified a timeout of 20 days for a batch job, it will be stopped on the 7th day.

" } }, "MaxCapacity": { @@ -38513,7 +38519,7 @@ "Timeout": { "target": "com.amazonaws.glue#Timeout", "traits": { - "smithy.api#documentation": "

The JobRun timeout in minutes. This is the maximum time that a job run can\n consume resources before it is terminated and enters TIMEOUT status. This value overrides the timeout value set in the parent job.

\n

Streaming jobs must have timeout values less than 7 days or 10080 minutes. When the value is left blank, the job will be restarted after 7 days based if you have not setup a maintenance window. If you have setup maintenance window, it will be restarted during the maintenance window after 7 days.

" + "smithy.api#documentation": "

The JobRun timeout in minutes. This is the maximum time that a job run can\n consume resources before it is terminated and enters TIMEOUT status. This value overrides the timeout value set in the parent job.

\n

Jobs must have timeout values less than 7 days or 10080 minutes. Otherwise, the jobs will throw an exception.

\n

When the value is left blank, the timeout is defaulted to 2880 minutes.

\n

Any existing Glue jobs that had a timeout value greater than 7 days will be defaulted to 7 days. For instance if you have specified a timeout of 20 days for a batch job, it will be stopped on the 7th day.

" } }, "MaxCapacity": { diff --git a/models/iot.json b/models/iot.json index 5c67493490..a9c2ad09c8 100644 --- a/models/iot.json +++ b/models/iot.json @@ -468,6 +468,9 @@ { "target": "com.amazonaws.iot#GetStatistics" }, + { + "target": "com.amazonaws.iot#GetThingConnectivityData" + }, { "target": "com.amazonaws.iot#GetTopicRule" }, @@ -6890,6 +6893,17 @@ "smithy.api#pattern": "^[a-zA-Z0-9:.]+$" } }, + "com.amazonaws.iot#ConnectivityApiThingName": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 128 + }, + "smithy.api#pattern": "^[a-zA-Z0-9:_-]+$", + "smithy.api#sensitive": {} + } + }, "com.amazonaws.iot#ConnectivityTimestamp": { "type": "long" }, @@ -7492,7 +7506,7 @@ "roleArn": { "target": "com.amazonaws.iot#RoleArn", "traits": { - "smithy.api#documentation": "

The IAM role that allows access to create the command.

" + "smithy.api#documentation": "

The IAM role that you must provide when using the AWS-IoT-FleetWise namespace.\n The role grants IoT Device Management the permission to access IoT FleetWise resources \n for generating the payload for the command. This field is not required when you use the\n AWS-IoT namespace.

" } }, "tags": { @@ -16802,6 +16816,95 @@ "com.amazonaws.iot#DisconnectReason": { "type": "string" }, + "com.amazonaws.iot#DisconnectReasonValue": { + "type": "enum", + "members": { + "AUTH_ERROR": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "AUTH_ERROR" + } + }, + "CLIENT_INITIATED_DISCONNECT": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "CLIENT_INITIATED_DISCONNECT" + } + }, + "CLIENT_ERROR": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "CLIENT_ERROR" + } + }, + "CONNECTION_LOST": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "CONNECTION_LOST" + } + }, + "DUPLICATE_CLIENTID": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DUPLICATE_CLIENTID" + } + }, + "FORBIDDEN_ACCESS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "FORBIDDEN_ACCESS" + } + }, + "MQTT_KEEP_ALIVE_TIMEOUT": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "MQTT_KEEP_ALIVE_TIMEOUT" + } + }, + "SERVER_ERROR": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SERVER_ERROR" + } + }, + "SERVER_INITIATED_DISCONNECT": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SERVER_INITIATED_DISCONNECT" + } + }, + "THROTTLED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "THROTTLED" + } + }, + "WEBSOCKET_TTL_EXPIRATION": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "WEBSOCKET_TTL_EXPIRATION" + } + }, + "CUSTOMAUTH_TTL_EXPIRATION": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "CUSTOMAUTH_TTL_EXPIRATION" + } + }, + "UNKNOWN": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "UNKNOWN" + } + }, + "NONE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "NONE" + } + } + } + }, "com.amazonaws.iot#DisplayName": { "type": "string", "traits": { @@ -18414,7 +18517,7 @@ "timeToLive": { "target": "com.amazonaws.iot#DateType", "traits": { - "smithy.api#documentation": "

The time to live (TTL) parameter for the GetCommandExecution API.

" + "smithy.api#documentation": "

The time to live (TTL) parameter that indicates the duration for which executions will\n be retained in your account. The default value is six months.

" } } }, @@ -18486,7 +18589,7 @@ "roleArn": { "target": "com.amazonaws.iot#RoleArn", "traits": { - "smithy.api#documentation": "

The IAM role that allows access to retrieve information about the command.

" + "smithy.api#documentation": "

The IAM role that you provided when creating the command with AWS-IoT-FleetWise\n as the namespace.

" } }, "createdAt": { @@ -19605,6 +19708,94 @@ "smithy.api#output": {} } }, + "com.amazonaws.iot#GetThingConnectivityData": { + "type": "operation", + "input": { + "target": "com.amazonaws.iot#GetThingConnectivityDataRequest" + }, + "output": { + "target": "com.amazonaws.iot#GetThingConnectivityDataResponse" + }, + "errors": [ + { + "target": "com.amazonaws.iot#IndexNotReadyException" + }, + { + "target": "com.amazonaws.iot#InternalFailureException" + }, + { + "target": "com.amazonaws.iot#InvalidRequestException" + }, + { + "target": "com.amazonaws.iot#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.iot#ServiceUnavailableException" + }, + { + "target": "com.amazonaws.iot#ThrottlingException" + }, + { + "target": "com.amazonaws.iot#UnauthorizedException" + } + ], + "traits": { + "smithy.api#documentation": "

Retrieves the live connectivity status per device.

", + "smithy.api#http": { + "method": "POST", + "uri": "/things/{thingName}/connectivity-data", + "code": 200 + } + } + }, + "com.amazonaws.iot#GetThingConnectivityDataRequest": { + "type": "structure", + "members": { + "thingName": { + "target": "com.amazonaws.iot#ConnectivityApiThingName", + "traits": { + "smithy.api#documentation": "

The name of your IoT thing.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.iot#GetThingConnectivityDataResponse": { + "type": "structure", + "members": { + "thingName": { + "target": "com.amazonaws.iot#ConnectivityApiThingName", + "traits": { + "smithy.api#documentation": "

The name of your IoT thing.

" + } + }, + "connected": { + "target": "com.amazonaws.iot#Boolean", + "traits": { + "smithy.api#documentation": "

A Boolean that indicates the connectivity status.

" + } + }, + "timestamp": { + "target": "com.amazonaws.iot#Timestamp", + "traits": { + "smithy.api#documentation": "

The timestamp of when the event occurred.

" + } + }, + "disconnectReason": { + "target": "com.amazonaws.iot#DisconnectReasonValue", + "traits": { + "smithy.api#documentation": "

The reason why the client is disconnecting.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.iot#GetTopicRule": { "type": "operation", "input": { @@ -22712,7 +22903,7 @@ } ], "traits": { - "smithy.api#documentation": "

List all command executions.

\n \n

You must provide only the\n startedTimeFilter or the completedTimeFilter information. If you \n provide both time filters, the API will generate an error.\n You can use this information to find command executions that started within\n a specific timeframe.

\n
", + "smithy.api#documentation": "

List all command executions.

\n \n
    \n
  • \n

    You must provide only the startedTimeFilter or \n the completedTimeFilter information. If you provide \n both time filters, the API will generate an error. You can use \n this information to retrieve a list of command executions \n within a specific timeframe.

    \n
  • \n
  • \n

    You must provide only the commandArn or \n the thingArn information depending on whether you want\n to list executions for a specific command or an IoT thing. If you provide \n both fields, the API will generate an error.

    \n
  • \n
\n

For more information about considerations for using this API, see\n List\n command executions in your account (CLI).

\n
", "smithy.api#http": { "method": "POST", "uri": "/command-executions", diff --git a/models/macie2.json b/models/macie2.json index 5ba0b2f3b5..f4ca58a157 100644 --- a/models/macie2.json +++ b/models/macie2.json @@ -417,7 +417,7 @@ "apiServiceName": { "target": "com.amazonaws.macie2#__string", "traits": { - "smithy.api#documentation": "

The URL of the Amazon Web Service that provides the operation, for example: s3.amazonaws.com.

", + "smithy.api#documentation": "

The URL of the Amazon Web Services service that provides the operation, for example: s3.amazonaws.com.

", "smithy.api#jsonName": "apiServiceName" } }, @@ -966,7 +966,7 @@ "unknown": { "target": "com.amazonaws.macie2#__long", "traits": { - "smithy.api#documentation": "

The total number of buckets that Amazon Macie wasn't able to evaluate permissions settings for. Macie can't determine whether these buckets are publicly accessible.

", + "smithy.api#documentation": "

The total number of buckets that Amazon Macie wasn't able to evaluate permissions settings for. For example, the buckets' policies or a quota prevented Macie from retrieving the requisite data. Macie can't determine whether the buckets are publicly accessible.

", "smithy.api#jsonName": "unknown" } } @@ -1002,7 +1002,7 @@ "unknown": { "target": "com.amazonaws.macie2#__long", "traits": { - "smithy.api#documentation": "

The total number of buckets that Amazon Macie doesn't have current encryption metadata for. Macie can't provide current data about the default encryption settings for these buckets.

", + "smithy.api#documentation": "

The total number of buckets that Amazon Macie doesn't have current encryption metadata for. For example, the buckets' permissions settings or a quota prevented Macie from retrieving the default encryption settings for the buckets.

", "smithy.api#jsonName": "unknown" } } @@ -1038,7 +1038,7 @@ "unknown": { "target": "com.amazonaws.macie2#__long", "traits": { - "smithy.api#documentation": "

The total number of buckets that Amazon Macie wasn't able to evaluate shared access settings for. Macie can't determine whether these buckets are shared with other Amazon Web Services accounts, Amazon CloudFront OAIs, or CloudFront OACs.

", + "smithy.api#documentation": "

The total number of buckets that Amazon Macie wasn't able to evaluate shared access settings for. For example, the buckets' permissions settings or a quota prevented Macie from retrieving the requisite data. Macie can't determine whether the buckets are shared with other Amazon Web Services accounts, Amazon CloudFront OAIs, or CloudFront OACs.

", "smithy.api#jsonName": "unknown" } } @@ -1067,7 +1067,7 @@ "unknown": { "target": "com.amazonaws.macie2#__long", "traits": { - "smithy.api#documentation": "

The total number of buckets that Amazon Macie wasn't able to evaluate server-side encryption requirements for. Macie can't determine whether the bucket policies for these buckets require server-side encryption of new objects.

", + "smithy.api#documentation": "

The total number of buckets that Amazon Macie wasn't able to evaluate server-side encryption requirements for. For example, the buckets' permissions settings or a quota prevented Macie from retrieving the requisite data. Macie can't determine whether bucket policies for the buckets require server-side encryption of new objects.

", "smithy.api#jsonName": "unknown" } } @@ -1236,14 +1236,14 @@ "errorCode": { "target": "com.amazonaws.macie2#BucketMetadataErrorCode", "traits": { - "smithy.api#documentation": "

The error code for an error that prevented Amazon Macie from retrieving and processing information about the bucket and the bucket's objects. If this value is ACCESS_DENIED, Macie doesn't have permission to retrieve the information. For example, the bucket has a restrictive bucket policy and Amazon S3 denied the request. If this value is null, Macie was able to retrieve and process the information.

", + "smithy.api#documentation": "

The code for an error or issue that prevented Amazon Macie from retrieving and processing information about the bucket and the bucket's objects. Possible values are:

  • ACCESS_DENIED - Macie doesn't have permission to retrieve the information. For example, the bucket has a restrictive bucket policy and Amazon S3 denied the request.

  • BUCKET_COUNT_EXCEEDS_QUOTA - Retrieving and processing the information would exceed the quota for the number of buckets that Macie monitors for an account (10,000).

If this value is null, Macie was able to retrieve and process the information.

", "smithy.api#jsonName": "errorCode" } }, "errorMessage": { "target": "com.amazonaws.macie2#__string", "traits": { - "smithy.api#documentation": "

A brief description of the error (errorCode) that prevented Amazon Macie from retrieving and processing information about the bucket and the bucket's objects. This value is null if Macie was able to retrieve and process the information.

", + "smithy.api#documentation": "

A brief description of the error or issue (errorCode) that prevented Amazon Macie from retrieving and processing information about the bucket and the bucket's objects. This value is null if Macie was able to retrieve and process the information.

", "smithy.api#jsonName": "errorMessage" } }, @@ -1257,7 +1257,7 @@ "lastAutomatedDiscoveryTime": { "target": "com.amazonaws.macie2#__timestampIso8601", "traits": { - "smithy.api#documentation": "

The date and time, in UTC and extended ISO 8601 format, when Amazon Macie most recently analyzed objects in the bucket while performing automated sensitive data discovery. This value is null if automated sensitive data discovery is disabled for your account.

", + "smithy.api#documentation": "

The date and time, in UTC and extended ISO 8601 format, when Amazon Macie most recently analyzed objects in the bucket while performing automated sensitive data discovery. This value is null if this analysis hasn't occurred.

", "smithy.api#jsonName": "lastAutomatedDiscoveryTime" } }, @@ -1306,7 +1306,7 @@ "sensitivityScore": { "target": "com.amazonaws.macie2#__integer", "traits": { - "smithy.api#documentation": "

The sensitivity score for the bucket, ranging from -1 (classification error) to 100 (sensitive).

If automated sensitive data discovery has never been enabled for your account or it’s been disabled for your organization or your standalone account for more than 30 days, possible values are: 1, the bucket is empty; or, 50, the bucket stores objects but it’s been excluded from recent analyses.

", + "smithy.api#documentation": "

The sensitivity score for the bucket, ranging from -1 (classification error) to 100 (sensitive).

If automated sensitive data discovery has never been enabled for your account or it's been disabled for your organization or standalone account for more than 30 days, possible values are: 1, the bucket is empty; or, 50, the bucket stores objects but it's been excluded from recent analyses.

", "smithy.api#jsonName": "sensitivityScore" } }, @@ -1368,7 +1368,7 @@ } }, "traits": { - "smithy.api#documentation": "

Provides statistical data and other information about an S3 bucket that Amazon Macie monitors and analyzes for your account. By default, object count and storage size values include data for object parts that are the result of incomplete multipart uploads. For more information, see How Macie monitors Amazon S3 data security in the Amazon Macie User Guide.

If an error occurs when Macie attempts to retrieve and process metadata from Amazon S3 for the bucket or the bucket's objects, the value for the versioning property is false and the value for most other properties is null. Key exceptions are accountId, bucketArn, bucketCreatedAt, bucketName, lastUpdated, and region. To identify the cause of the error, refer to the errorCode and errorMessage values.

" + "smithy.api#documentation": "

Provides statistical data and other information about an S3 bucket that Amazon Macie monitors and analyzes for your account. By default, object count and storage size values include data for object parts that are the result of incomplete multipart uploads. For more information, see How Macie monitors Amazon S3 data security in the Amazon Macie User Guide.

If an error or issue prevents Macie from retrieving and processing metadata from Amazon S3 for the bucket or the bucket's objects, the value for the versioning property is false and the value for most other properties is null or UNKNOWN. Key exceptions are accountId, bucketArn, bucketCreatedAt, bucketName, lastUpdated, and region. To identify the cause, refer to the errorCode and errorMessage values.

" } }, "com.amazonaws.macie2#BucketMetadataErrorCode": { @@ -1379,10 +1379,16 @@ "traits": { "smithy.api#enumValue": "ACCESS_DENIED" } + }, + "BUCKET_COUNT_EXCEEDS_QUOTA": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "BUCKET_COUNT_EXCEEDS_QUOTA" + } } }, "traits": { - "smithy.api#documentation": "

The error code for an error that prevented Amazon Macie from retrieving and processing information about an S3 bucket and the bucket's objects.

" + "smithy.api#documentation": "

The code for an error or issue that prevented Amazon Macie from retrieving and processing information about an S3 bucket and the bucket's objects.

" } }, "com.amazonaws.macie2#BucketPermissionConfiguration": { @@ -1528,7 +1534,7 @@ } }, "traits": { - "smithy.api#documentation": "

Provides aggregated statistical data for sensitive data discovery metrics that apply to S3 buckets, grouped by bucket sensitivity score (sensitivityScore). If automated sensitive data discovery is currently disabled for your account, the value for each metric is 0.

" + "smithy.api#documentation": "

Provides aggregated statistical data for sensitive data discovery metrics that apply to S3 buckets, grouped by bucket sensitivity score (sensitivityScore). If automated sensitive data discovery is currently disabled for your account, the value for most of these metrics is 0.

" } }, "com.amazonaws.macie2#Cell": { @@ -3752,7 +3758,7 @@ "suppressed": { "target": "com.amazonaws.macie2#__boolean", "traits": { - "smithy.api#documentation": "

Specifies whether occurrences of this type of sensitive data are excluded (true) or included (false) in the bucket's sensitivity score.

", + "smithy.api#documentation": "

Specifies whether occurrences of this type of sensitive data are excluded (true) or included (false) in the bucket's sensitivity score, if the score is calculated by Amazon Macie.

", "smithy.api#jsonName": "suppressed" } }, @@ -4718,7 +4724,7 @@ } }, "traits": { - "smithy.api#documentation": "

The type of finding. For details about each type, see Types of Amazon Macie findings in the Amazon Macie User Guide. Possible values are:

" + "smithy.api#documentation": "

The type of finding. For details about each type, see Types of findings in the Amazon Macie User Guide. Possible values are:

" } }, "com.amazonaws.macie2#FindingsFilterAction": { @@ -5158,7 +5164,7 @@ "bucketStatisticsBySensitivity": { "target": "com.amazonaws.macie2#BucketStatisticsBySensitivity", "traits": { - "smithy.api#documentation": "

The aggregated sensitive data discovery statistics for the buckets. If automated sensitive data discovery is currently disabled for your account, the value for each statistic is 0.

", + "smithy.api#documentation": "

The aggregated sensitive data discovery statistics for the buckets. If automated sensitive data discovery is currently disabled for your account, the value for most statistics is 0.

", "smithy.api#jsonName": "bucketStatisticsBySensitivity" } }, @@ -6481,7 +6487,7 @@ "reasons": { "target": "com.amazonaws.macie2#__listOfUnavailabilityReasonCode", "traits": { - "smithy.api#documentation": "

Specifies why occurrences of sensitive data can't be retrieved for the finding. Possible values are:

  • ACCOUNT_NOT_IN_ORGANIZATION - The affected account isn't currently part of your organization. Or the account is part of your organization but Macie isn't currently enabled for the account. You're not allowed to access the affected S3 object by using Macie.

  • INVALID_CLASSIFICATION_RESULT - There isn't a corresponding sensitive data discovery result for the finding. Or the corresponding sensitive data discovery result isn't available in the current Amazon Web Services Region, is malformed or corrupted, or uses an unsupported storage format. Macie can't verify the location of the sensitive data to retrieve.

  • INVALID_RESULT_SIGNATURE - The corresponding sensitive data discovery result is stored in an S3 object that wasn't signed by Macie. Macie can't verify the integrity and authenticity of the sensitive data discovery result. Therefore, Macie can't verify the location of the sensitive data to retrieve.

  • MEMBER_ROLE_TOO_PERMISSIVE - The trust or permissions policy for the IAM role in the affected member account doesn't meet Macie requirements for restricting access to the role. Or the role's trust policy doesn't specify the correct external ID for your organization. Macie can't assume the role to retrieve the sensitive data.

  • MISSING_GET_MEMBER_PERMISSION - You're not allowed to retrieve information about the association between your account and the affected account. Macie can't determine whether you’re allowed to access the affected S3 object as the delegated Macie administrator for the affected account.

  • OBJECT_EXCEEDS_SIZE_QUOTA - The storage size of the affected S3 object exceeds the size quota for retrieving occurrences of sensitive data from this type of file.

  • OBJECT_UNAVAILABLE - The affected S3 object isn't available. The object was renamed, moved, deleted, or changed after Macie created the finding. Or the object is encrypted with an KMS key that's currently disabled.

  • RESULT_NOT_SIGNED - The corresponding sensitive data discovery result is stored in an S3 object that hasn't been signed. Macie can't verify the integrity and authenticity of the sensitive data discovery result. Therefore, Macie can't verify the location of the sensitive data to retrieve.

  • ROLE_TOO_PERMISSIVE - Your account is configured to retrieve occurrences of sensitive data by using an IAM role whose trust or permissions policy doesn't meet Macie requirements for restricting access to the role. Macie can’t assume the role to retrieve the sensitive data.

  • UNSUPPORTED_FINDING_TYPE - The specified finding isn't a sensitive data finding.

  • UNSUPPORTED_OBJECT_TYPE - The affected S3 object uses a file or storage format that Macie doesn't support for retrieving occurrences of sensitive data.

This value is null if sensitive data can be retrieved for the finding.

", + "smithy.api#documentation": "

Specifies why occurrences of sensitive data can't be retrieved for the finding. Possible values are:

  • ACCOUNT_NOT_IN_ORGANIZATION - The affected account isn't currently part of your organization. Or the account is part of your organization but Macie isn't currently enabled for the account. You're not allowed to access the affected S3 object by using Macie.

  • INVALID_CLASSIFICATION_RESULT - There isn't a corresponding sensitive data discovery result for the finding. Or the corresponding sensitive data discovery result isn't available in the current Amazon Web Services Region, is malformed or corrupted, or uses an unsupported storage format. Macie can't verify the location of the sensitive data to retrieve.

  • INVALID_RESULT_SIGNATURE - The corresponding sensitive data discovery result is stored in an S3 object that wasn't signed by Macie. Macie can't verify the integrity and authenticity of the sensitive data discovery result. Therefore, Macie can't verify the location of the sensitive data to retrieve.

  • MEMBER_ROLE_TOO_PERMISSIVE - The trust or permissions policy for the IAM role in the affected member account doesn't meet Macie requirements for restricting access to the role. Or the role's trust policy doesn't specify the correct external ID for your organization. Macie can't assume the role to retrieve the sensitive data.

  • MISSING_GET_MEMBER_PERMISSION - You're not allowed to retrieve information about the association between your account and the affected account. Macie can't determine whether you’re allowed to access the affected S3 object as the delegated Macie administrator for the affected account.

  • OBJECT_EXCEEDS_SIZE_QUOTA - The storage size of the affected S3 object exceeds the size quota for retrieving occurrences of sensitive data from this type of file.

  • OBJECT_UNAVAILABLE - The affected S3 object isn't available. The object was renamed, moved, deleted, or changed after Macie created the finding. Or the object is encrypted with an KMS key that isn’t available. For example, the key is disabled, is scheduled for deletion, or was deleted.

  • RESULT_NOT_SIGNED - The corresponding sensitive data discovery result is stored in an S3 object that hasn't been signed. Macie can't verify the integrity and authenticity of the sensitive data discovery result. Therefore, Macie can't verify the location of the sensitive data to retrieve.

  • ROLE_TOO_PERMISSIVE - Your account is configured to retrieve occurrences of sensitive data by using an IAM role whose trust or permissions policy doesn't meet Macie requirements for restricting access to the role. Macie can’t assume the role to retrieve the sensitive data.

  • UNSUPPORTED_FINDING_TYPE - The specified finding isn't a sensitive data finding.

  • UNSUPPORTED_OBJECT_TYPE - The affected S3 object uses a file or storage format that Macie doesn't support for retrieving occurrences of sensitive data.

This value is null if sensitive data can be retrieved for the finding.

", "smithy.api#jsonName": "reasons" } } @@ -7894,7 +7900,7 @@ } ], "traits": { - "smithy.api#documentation": "

Retrieves a subset of information about all the custom data identifiers for an account.

", + "smithy.api#documentation": "

Retrieves a subset of information about the custom data identifiers for an account.

", "smithy.api#http": { "method": "POST", "uri": "/custom-data-identifiers/list", @@ -10247,14 +10253,14 @@ "errorCode": { "target": "com.amazonaws.macie2#BucketMetadataErrorCode", "traits": { - "smithy.api#documentation": "

The error code for an error that prevented Amazon Macie from retrieving and processing information about the bucket and the bucket's objects. If this value is ACCESS_DENIED, Macie doesn't have permission to retrieve the information. For example, the bucket has a restrictive bucket policy and Amazon S3 denied the request. If this value is null, Macie was able to retrieve and process the information.

", + "smithy.api#documentation": "

The code for an error or issue that prevented Amazon Macie from retrieving and processing information about the bucket and the bucket's objects. Possible values are:

  • ACCESS_DENIED - Macie doesn't have permission to retrieve the information. For example, the bucket has a restrictive bucket policy and Amazon S3 denied the request.

  • BUCKET_COUNT_EXCEEDS_QUOTA - Retrieving and processing the information would exceed the quota for the number of buckets that Macie monitors for an account (10,000).

If this value is null, Macie was able to retrieve and process the information.

", "smithy.api#jsonName": "errorCode" } }, "errorMessage": { "target": "com.amazonaws.macie2#__string", "traits": { - "smithy.api#documentation": "

A brief description of the error (errorCode) that prevented Amazon Macie from retrieving and processing information about the bucket and the bucket's objects. This value is null if Macie was able to retrieve and process the information.

", + "smithy.api#documentation": "

A brief description of the error or issue (errorCode) that prevented Amazon Macie from retrieving and processing information about the bucket and the bucket's objects. This value is null if Macie was able to retrieve and process the information.

", "smithy.api#jsonName": "errorMessage" } }, @@ -10268,7 +10274,7 @@ "lastAutomatedDiscoveryTime": { "target": "com.amazonaws.macie2#__timestampIso8601", "traits": { - "smithy.api#documentation": "

The date and time, in UTC and extended ISO 8601 format, when Amazon Macie most recently analyzed objects in the bucket while performing automated sensitive data discovery. This value is null if automated sensitive data discovery is disabled for your account.

", + "smithy.api#documentation": "

The date and time, in UTC and extended ISO 8601 format, when Amazon Macie most recently analyzed objects in the bucket while performing automated sensitive data discovery. This value is null if this analysis hasn't occurred.

", "smithy.api#jsonName": "lastAutomatedDiscoveryTime" } }, @@ -10289,7 +10295,7 @@ "sensitivityScore": { "target": "com.amazonaws.macie2#__integer", "traits": { - "smithy.api#documentation": "

The sensitivity score for the bucket, ranging from -1 (classification error) to 100 (sensitive).

If automated sensitive data discovery has never been enabled for your account or it’s been disabled for your organization or your standalone account for more than 30 days, possible values are: 1, the bucket is empty; or, 50, the bucket stores objects but it’s been excluded from recent analyses.

", + "smithy.api#documentation": "

The sensitivity score for the bucket, ranging from -1 (classification error) to 100 (sensitive).

If automated sensitive data discovery has never been enabled for your account or it's been disabled for your organization or standalone account for more than 30 days, possible values are: 1, the bucket is empty; or, 50, the bucket stores objects but it's been excluded from recent analyses.

", "smithy.api#jsonName": "sensitivityScore" } }, @@ -10323,7 +10329,7 @@ } }, "traits": { - "smithy.api#documentation": "

Provides statistical data and other information about an S3 bucket that Amazon Macie monitors and analyzes for your account. By default, object count and storage size values include data for object parts that are the result of incomplete multipart uploads. For more information, see How Macie monitors Amazon S3 data security in the Amazon Macie User Guide.

If an error occurs when Macie attempts to retrieve and process information about the bucket or the bucket's objects, the value for most of these properties is null. Key exceptions are accountId and bucketName. To identify the cause of the error, refer to the errorCode and errorMessage values.

" + "smithy.api#documentation": "

Provides statistical data and other information about an S3 bucket that Amazon Macie monitors and analyzes for your account. By default, object count and storage size values include data for object parts that are the result of incomplete multipart uploads. For more information, see How Macie monitors Amazon S3 data security in the Amazon Macie User Guide.

If an error or issue prevents Macie from retrieving and processing information about the bucket or the bucket's objects, the value for many of these properties is null. Key exceptions are accountId and bucketName. To identify the cause, refer to the errorCode and errorMessage values.

" } }, "com.amazonaws.macie2#MatchingResource": { @@ -10332,7 +10338,7 @@ "matchingBucket": { "target": "com.amazonaws.macie2#MatchingBucket", "traits": { - "smithy.api#documentation": "

The details of an S3 bucket that Amazon Macie monitors and analyzes.

", + "smithy.api#documentation": "

The details of an S3 bucket that Amazon Macie monitors and analyzes for your account.

", "smithy.api#jsonName": "matchingBucket" } } @@ -11288,7 +11294,7 @@ } }, "traits": { - "smithy.api#documentation": "

Provides information about the S3 bucket that a finding applies to.

" + "smithy.api#documentation": "

Provides information about the S3 bucket that a finding applies to. If a quota prevented Amazon Macie from retrieving and processing all the bucket's information prior to generating the finding, the following values are UNKNOWN or null: allowsUnencryptedObjectUploads, defaultServerSideEncryption, publicAccess, and tags.

" } }, "com.amazonaws.macie2#S3BucketCriteriaForJob": { @@ -11409,7 +11415,7 @@ "target": "com.amazonaws.macie2#__listOfS3BucketName", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

Depending on the value specified for the update operation (ClassificationScopeUpdateOperation), an array of strings that: lists the names of buckets to add or remove from the list, or specifies a new set of bucket names that overwrites all existing names in the list. Each string must be the full name of an S3 bucket. Values are case sensitive.

", + "smithy.api#documentation": "

Depending on the value specified for the update operation (ClassificationScopeUpdateOperation), an array of strings that: lists the names of buckets to add or remove from the list, or specifies a new set of bucket names that overwrites all existing names in the list. Each string must be the full name of an existing S3 bucket. Values are case sensitive.

", "smithy.api#jsonName": "bucketNames", "smithy.api#required": {} } @@ -11711,7 +11717,7 @@ } ], "traits": { - "smithy.api#documentation": "

Retrieves (queries) statistical data and other information about Amazon Web Services resources that Amazon Macie monitors and analyzes.

", + "smithy.api#documentation": "

Retrieves (queries) statistical data and other information about Amazon Web Services resources that Amazon Macie monitors and analyzes for an account.

", "smithy.api#http": { "method": "POST", "uri": "/datasources/search-resources", @@ -12050,7 +12056,7 @@ } }, "traits": { - "smithy.api#documentation": "

Specifies configuration settings that determine which findings are published to Security Hub automatically. For information about how Macie publishes findings to Security Hub, see Amazon Macie integration with Security Hub in the Amazon Macie User Guide.

" + "smithy.api#documentation": "

Specifies configuration settings that determine which findings are published to Security Hub automatically. For information about how Macie publishes findings to Security Hub, see Evaluating findings with Security Hub in the Amazon Macie User Guide.

" } }, "com.amazonaws.macie2#SensitiveData": { @@ -12168,7 +12174,7 @@ } }, "traits": { - "smithy.api#documentation": "

Provides aggregated statistical data for sensitive data discovery metrics that apply to S3 buckets. Each field contains aggregated data for all the buckets that have a sensitivity score (sensitivityScore) of a specified value or within a specified range (BucketStatisticsBySensitivity). If automated sensitive data discovery is currently disabled for your account, the value for each field is 0.

" + "smithy.api#documentation": "

Provides aggregated statistical data for sensitive data discovery metrics that apply to S3 buckets. Each field contains aggregated data for all the buckets that have a sensitivity score (sensitivityScore) of a specified value or within a specified range (BucketStatisticsBySensitivity). If automated sensitive data discovery is currently disabled for your account, the value for most fields is 0.

" } }, "com.amazonaws.macie2#SensitivityInspectionTemplateExcludes": { @@ -12712,7 +12718,7 @@ "id": { "target": "com.amazonaws.macie2#__string", "traits": { - "smithy.api#documentation": "

The unique identifier for the custom data identifier or managed data identifier that detected the type of sensitive data to exclude or include in the score.

", + "smithy.api#documentation": "

The unique identifier for the custom data identifier or managed data identifier that detected the type of sensitive data to exclude from the score.

", "smithy.api#jsonName": "id" } }, @@ -12725,7 +12731,7 @@ } }, "traits": { - "smithy.api#documentation": "

Specifies a custom data identifier or managed data identifier that detected a type of sensitive data to start excluding or including in an S3 bucket's sensitivity score.

" + "smithy.api#documentation": "

Specifies a custom data identifier or managed data identifier that detected a type of sensitive data to exclude from an S3 bucket's sensitivity score.

" } }, "com.amazonaws.macie2#TagCriterionForJob": { @@ -13172,7 +13178,7 @@ "target": "com.amazonaws.macie2#__string", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The type of error that occurred and prevented Amazon Macie from retrieving occurrences of sensitive data reported by the finding. Possible values are:

  • ACCOUNT_NOT_IN_ORGANIZATION - The affected account isn't currently part of your organization. Or the account is part of your organization but Macie isn't currently enabled for the account. You're not allowed to access the affected S3 object by using Macie.

  • INVALID_CLASSIFICATION_RESULT - There isn't a corresponding sensitive data discovery result for the finding. Or the corresponding sensitive data discovery result isn't available in the current Amazon Web Services Region, is malformed or corrupted, or uses an unsupported storage format. Macie can't verify the location of the sensitive data to retrieve.

  • INVALID_RESULT_SIGNATURE - The corresponding sensitive data discovery result is stored in an S3 object that wasn't signed by Macie. Macie can't verify the integrity and authenticity of the sensitive data discovery result. Therefore, Macie can't verify the location of the sensitive data to retrieve.

  • MEMBER_ROLE_TOO_PERMISSIVE - The trust or permissions policy for the IAM role in the affected member account doesn't meet Macie requirements for restricting access to the role. Or the role's trust policy doesn't specify the correct external ID for your organization. Macie can't assume the role to retrieve the sensitive data.

  • MISSING_GET_MEMBER_PERMISSION - You're not allowed to retrieve information about the association between your account and the affected account. Macie can't determine whether you’re allowed to access the affected S3 object as the delegated Macie administrator for the affected account.

  • OBJECT_EXCEEDS_SIZE_QUOTA - The storage size of the affected S3 object exceeds the size quota for retrieving occurrences of sensitive data from this type of file.

  • OBJECT_UNAVAILABLE - The affected S3 object isn't available. The object was renamed, moved, deleted, or changed after Macie created the finding. Or the object is encrypted with an KMS key that's currently disabled.

  • RESULT_NOT_SIGNED - The corresponding sensitive data discovery result is stored in an S3 object that hasn't been signed. Macie can't verify the integrity and authenticity of the sensitive data discovery result. Therefore, Macie can't verify the location of the sensitive data to retrieve.

  • ROLE_TOO_PERMISSIVE - Your account is configured to retrieve occurrences of sensitive data by using an IAM role whose trust or permissions policy doesn't meet Macie requirements for restricting access to the role. Macie can’t assume the role to retrieve the sensitive data.

  • UNSUPPORTED_FINDING_TYPE - The specified finding isn't a sensitive data finding.

  • UNSUPPORTED_OBJECT_TYPE - The affected S3 object uses a file or storage format that Macie doesn't support for retrieving occurrences of sensitive data.

", + "smithy.api#documentation": "

The type of error that occurred and prevented Amazon Macie from retrieving occurrences of sensitive data reported by the finding. Possible values are:

  • ACCOUNT_NOT_IN_ORGANIZATION - The affected account isn't currently part of your organization. Or the account is part of your organization but Macie isn't currently enabled for the account. You're not allowed to access the affected S3 object by using Macie.

  • INVALID_CLASSIFICATION_RESULT - There isn't a corresponding sensitive data discovery result for the finding. Or the corresponding sensitive data discovery result isn't available in the current Amazon Web Services Region, is malformed or corrupted, or uses an unsupported storage format. Macie can't verify the location of the sensitive data to retrieve.

  • INVALID_RESULT_SIGNATURE - The corresponding sensitive data discovery result is stored in an S3 object that wasn't signed by Macie. Macie can't verify the integrity and authenticity of the sensitive data discovery result. Therefore, Macie can't verify the location of the sensitive data to retrieve.

  • MEMBER_ROLE_TOO_PERMISSIVE - The trust or permissions policy for the IAM role in the affected member account doesn't meet Macie requirements for restricting access to the role. Or the role's trust policy doesn't specify the correct external ID for your organization. Macie can't assume the role to retrieve the sensitive data.

  • MISSING_GET_MEMBER_PERMISSION - You're not allowed to retrieve information about the association between your account and the affected account. Macie can't determine whether you’re allowed to access the affected S3 object as the delegated Macie administrator for the affected account.

  • OBJECT_EXCEEDS_SIZE_QUOTA - The storage size of the affected S3 object exceeds the size quota for retrieving occurrences of sensitive data from this type of file.

  • OBJECT_UNAVAILABLE - The affected S3 object isn't available. The object was renamed, moved, deleted, or changed after Macie created the finding. Or the object is encrypted with an KMS key that isn’t available. For example, the key is disabled, is scheduled for deletion, or was deleted.

  • RESULT_NOT_SIGNED - The corresponding sensitive data discovery result is stored in an S3 object that hasn't been signed. Macie can't verify the integrity and authenticity of the sensitive data discovery result. Therefore, Macie can't verify the location of the sensitive data to retrieve.

  • ROLE_TOO_PERMISSIVE - Your account is configured to retrieve occurrences of sensitive data by using an IAM role whose trust or permissions policy doesn't meet Macie requirements for restricting access to the role. Macie can’t assume the role to retrieve the sensitive data.

  • UNSUPPORTED_FINDING_TYPE - The specified finding isn't a sensitive data finding.

  • UNSUPPORTED_OBJECT_TYPE - The affected S3 object uses a file or storage format that Macie doesn't support for retrieving occurrences of sensitive data.

", "smithy.api#jsonName": "message", "smithy.api#required": {} } @@ -13972,7 +13978,7 @@ "suppressDataIdentifiers": { "target": "com.amazonaws.macie2#__listOfSuppressDataIdentifier", "traits": { - "smithy.api#documentation": "

An array of objects, one for each custom data identifier or managed data identifier that detected the type of sensitive data to start excluding or including in the bucket's score. To start including all sensitive data types in the score, don't specify any values for this array.

", + "smithy.api#documentation": "

An array of objects, one for each custom data identifier or managed data identifier that detected a type of sensitive data to exclude from the bucket's score. To include all sensitive data types in the score, don't specify any values for this array.

", "smithy.api#jsonName": "suppressDataIdentifiers" } } @@ -14040,7 +14046,7 @@ } }, "traits": { - "smithy.api#documentation": "

Specifies the access method and settings to use when retrieving occurrences of sensitive data reported by findings. If your request specifies an Identity and Access Management (IAM) role to assume, Amazon Macie verifies that the role exists and the attached policies are configured correctly. If there's an issue, Macie returns an error. For information about addressing the issue, see Configuration options and requirements for retrieving sensitive data samples in the Amazon Macie User Guide.

" + "smithy.api#documentation": "

Specifies the access method and settings to use when retrieving occurrences of sensitive data reported by findings. If your request specifies an Identity and Access Management (IAM) role to assume, Amazon Macie verifies that the role exists and the attached policies are configured correctly. If there's an issue, Macie returns an error. For information about addressing the issue, see Configuration options for retrieving sensitive data samples in the Amazon Macie User Guide.

" } }, "com.amazonaws.macie2#UpdateRevealConfiguration": { @@ -14516,7 +14522,7 @@ "awsService": { "target": "com.amazonaws.macie2#AwsService", "traits": { - "smithy.api#documentation": "

If the action was performed by an Amazon Web Services account that belongs to an Amazon Web Service, the name of the service.

", + "smithy.api#documentation": "

If the action was performed by an Amazon Web Services account that belongs to an Amazon Web Services service, the name of the service.

", "smithy.api#jsonName": "awsService" } }, diff --git a/models/mediaconvert.json b/models/mediaconvert.json index 9ca11269d6..fd2ef9b715 100644 --- a/models/mediaconvert.json +++ b/models/mediaconvert.json @@ -1415,7 +1415,7 @@ } }, "AudioSourceName": { - "target": "com.amazonaws.mediaconvert#__string", + "target": "com.amazonaws.mediaconvert#__stringMax2048", "traits": { "smithy.api#documentation": "Specifies which audio data to use from each input. In the simplest case, specify an \"Audio Selector\":#inputs-audio_selector by name based on its order within each input. For example if you specify \"Audio Selector 3\", then the third audio selector will be used from each input. If an input does not have an \"Audio Selector 3\", then the audio selector marked as \"default\" in that input will be used. If there is no audio selector marked as \"default\", silence will be inserted for the duration of that input. Alternatively, an \"Audio Selector Group\":#inputs-audio_selector_group name may be specified, with similar default/silence behavior. If no audio_source_name is specified, then \"Audio Selector 1\" will be chosen automatically.", "smithy.api#jsonName": "audioSourceName" @@ -2820,6 +2820,13 @@ "smithy.api#jsonName": "outlineSize" } }, + "RemoveRubyReserveAttributes": { + "target": "com.amazonaws.mediaconvert#RemoveRubyReserveAttributes", + "traits": { + "smithy.api#documentation": "Optionally remove any tts:rubyReserve attributes present in your input, that do not have a tts:ruby attribute in the same element, from your output. Use if your vertical Japanese output captions have alignment issues. To remove ruby reserve attributes when present: Choose Enabled. To not remove any ruby reserve attributes: Keep the default value, Disabled.", + "smithy.api#jsonName": "removeRubyReserveAttributes" + } + }, "ShadowColor": { "target": "com.amazonaws.mediaconvert#BurninSubtitleShadowColor", "traits": { @@ -7052,7 +7059,7 @@ } }, "traits": { - "smithy.api#documentation": "Applies only to 29.97 fps outputs. When this feature is enabled, the service will use drop-frame timecode on outputs. If it is not possible to use drop-frame timecode, the system will fall back to non-drop-frame. This setting is enabled by default when Timecode insertion is enabled." + "smithy.api#documentation": "Applies only to 29.97 fps outputs. When this feature is enabled, the service will use drop-frame timecode on outputs. If it is not possible to use drop-frame timecode, the system will fall back to non-drop-frame. This setting is enabled by default when Timecode insertion or Timecode track is enabled." } }, "com.amazonaws.mediaconvert#DvbNitSettings": { @@ -13046,7 +13053,7 @@ } }, "FileInput": { - "target": "com.amazonaws.mediaconvert#__stringPatternS3Https", + "target": "com.amazonaws.mediaconvert#__stringMax2048PatternS3Https", "traits": { "smithy.api#documentation": "Specify the source file for your transcoding job. You can use multiple inputs in a single job. The service concatenates these inputs, in the order that you specify them in the job, to create the outputs. If your input format is IMF, specify your input by providing the path to your CPL. For example, \"s3://bucket/vf/cpl.xml\". If the CPL is in an incomplete IMP, make sure to use *Supplemental IMPs* to specify any supplemental IMPs that contain assets referenced by the CPL.", "smithy.api#jsonName": "fileInput" @@ -14054,7 +14061,7 @@ "FollowSource": { "target": "com.amazonaws.mediaconvert#__integerMin1Max150", "traits": { - "smithy.api#documentation": "Specify the input that MediaConvert references for your default output settings. MediaConvert uses this input's Resolution, Frame rate, and Pixel aspect ratio for all outputs that you don't manually specify different output settings for. Enabling this setting will disable \"Follow source\" for all other inputs. If MediaConvert cannot follow your source, for example if you specify an audio-only input, MediaConvert uses the first followable input instead. In your JSON job specification, enter an integer from 1 to 150 corresponding to the order of your inputs.", + "smithy.api#documentation": "Specify the input that MediaConvert references for your default output settings. MediaConvert uses this input's Resolution, Frame rate, and Pixel aspect ratio for all outputs that you don't manually specify different output settings for. Enabling this setting will disable \"Follow source\" for all other inputs. If MediaConvert cannot follow your source, for example if you specify an audio-only input, MediaConvert uses the first followable input instead. In your JSON job specification, enter an integer from 1 to 150 corresponding to the order of your inputs.", "smithy.api#jsonName": "followSource" } }, @@ -14327,7 +14334,7 @@ "FollowSource": { "target": "com.amazonaws.mediaconvert#__integerMin1Max150", "traits": { - "smithy.api#documentation": "Specify the input that MediaConvert references for your default output settings. MediaConvert uses this input's Resolution, Frame rate, and Pixel aspect ratio for all outputs that you don't manually specify different output settings for. Enabling this setting will disable \"Follow source\" for all other inputs. If MediaConvert cannot follow your source, for example if you specify an audio-only input, MediaConvert uses the first followable input instead. In your JSON job specification, enter an integer from 1 to 150 corresponding to the order of your inputs.", + "smithy.api#documentation": "Specify the input that MediaConvert references for your default output settings. MediaConvert uses this input's Resolution, Frame rate, and Pixel aspect ratio for all outputs that you don't manually specify different output settings for. Enabling this setting will disable \"Follow source\" for all other inputs. If MediaConvert cannot follow your source, for example if you specify an audio-only input, MediaConvert uses the first followable input instead. In your JSON job specification, enter an integer from 1 to 150 corresponding to the order of your inputs.", "smithy.api#jsonName": "followSource" } }, @@ -20538,7 +20545,7 @@ } }, "Extension": { - "target": "com.amazonaws.mediaconvert#__string", + "target": "com.amazonaws.mediaconvert#__stringMax256", "traits": { "smithy.api#documentation": "Use Extension to specify the file extension for outputs in File output groups. If you do not specify a value, the service will use default extensions by container type as follows * MPEG-2 transport stream, m2ts * Quicktime, mov * MXF container, mxf * MPEG-4 container, mp4 * WebM container, webm * No Container, the service will use codec extensions (e.g. AAC, H265, H265, AC3)", "smithy.api#jsonName": "extension" @@ -20639,7 +20646,7 @@ } }, "Name": { - "target": "com.amazonaws.mediaconvert#__string", + "target": "com.amazonaws.mediaconvert#__stringMax2048", "traits": { "smithy.api#documentation": "Name of the output group", "smithy.api#jsonName": "name" @@ -21779,6 +21786,26 @@ "smithy.api#documentation": "Use Manual audio remixing to adjust audio levels for each audio channel in each output of your job. With audio remixing, you can output more or fewer audio channels than your input audio source provides." } }, + "com.amazonaws.mediaconvert#RemoveRubyReserveAttributes": { + "type": "enum", + "members": { + "DISABLED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DISABLED" + } + }, + "ENABLED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ENABLED" + } + } + }, + "traits": { + "smithy.api#documentation": "Optionally remove any tts:rubyReserve attributes present in your input, that do not have a tts:ruby attribute in the same element, from your output. Use if your vertical Japanese output captions have alignment issues. To remove ruby reserve attributes when present: Choose Enabled. To not remove any ruby reserve attributes: Keep the default value, Disabled." + } + }, "com.amazonaws.mediaconvert#RenewalType": { "type": "enum", "members": { @@ -23027,6 +23054,26 @@ "smithy.api#documentation": "Use Source to set how timecodes are handled within this job. To make sure that your video, audio, captions, and markers are synchronized and that time-based features, such as image inserter, work correctly, choose the Timecode source option that matches your assets. All timecodes are in a 24-hour format with frame number (HH:MM:SS:FF). * Embedded - Use the timecode that is in the input video. If no embedded timecode is in the source, the service will use Start at 0 instead. * Start at 0 - Set the timecode of the initial frame to 00:00:00:00. * Specified Start - Set the timecode of the initial frame to a value other than zero. You use Start timecode to provide this value." } }, + "com.amazonaws.mediaconvert#TimecodeTrack": { + "type": "enum", + "members": { + "DISABLED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DISABLED" + } + }, + "ENABLED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ENABLED" + } + } + }, + "traits": { + "smithy.api#documentation": "To include a timecode track in your MP4 output: Choose Enabled. MediaConvert writes the timecode track in the Null Media Header box (NMHD), without any timecode text formatting information. You can also specify dropframe or non-dropframe timecode under the Drop Frame Timecode setting. To not include a timecode track: Keep the default value, Disabled." + } + }, "com.amazonaws.mediaconvert#TimedMetadata": { "type": "enum", "members": { @@ -24262,7 +24309,7 @@ "DropFrameTimecode": { "target": "com.amazonaws.mediaconvert#DropFrameTimecode", "traits": { - "smithy.api#documentation": "Applies only to 29.97 fps outputs. When this feature is enabled, the service will use drop-frame timecode on outputs. If it is not possible to use drop-frame timecode, the system will fall back to non-drop-frame. This setting is enabled by default when Timecode insertion is enabled.", + "smithy.api#documentation": "Applies only to 29.97 fps outputs. When this feature is enabled, the service will use drop-frame timecode on outputs. If it is not possible to use drop-frame timecode, the system will fall back to non-drop-frame. This setting is enabled by default when Timecode insertion or Timecode track is enabled.", "smithy.api#jsonName": "dropFrameTimecode" } }, @@ -24315,6 +24362,13 @@ "smithy.api#jsonName": "timecodeInsertion" } }, + "TimecodeTrack": { + "target": "com.amazonaws.mediaconvert#TimecodeTrack", + "traits": { + "smithy.api#documentation": "To include a timecode track in your MP4 output: Choose Enabled. MediaConvert writes the timecode track in the Null Media Header box (NMHD), without any timecode text formatting information. You can also specify dropframe or non-dropframe timecode under the Drop Frame Timecode setting. To not include a timecode track: Keep the default value, Disabled.", + "smithy.api#jsonName": "timecodeTrack" + } + }, "VideoPreprocessors": { "target": "com.amazonaws.mediaconvert#VideoPreprocessor", "traits": { @@ -27478,6 +27532,34 @@ } } }, + "com.amazonaws.mediaconvert#__stringMax2048": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 2048 + } + } + }, + "com.amazonaws.mediaconvert#__stringMax2048PatternS3Https": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 2048 + }, + "smithy.api#pattern": "^s3://([^\\/]+\\/+)+((([^\\/]*)))|^https?://[^\\/].*[^&]$" + } + }, + "com.amazonaws.mediaconvert#__stringMax256": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 256 + } + } + }, "com.amazonaws.mediaconvert#__stringMin0": { "type": "string", "traits": { diff --git a/models/medialive.json b/models/medialive.json index 4796ffa365..0c40fa229f 100644 --- a/models/medialive.json +++ b/models/medialive.json @@ -3555,6 +3555,13 @@ "smithy.api#documentation": "Anywhere settings for this channel.", "smithy.api#jsonName": "anywhereSettings" } + }, + "ChannelEngineVersion": { + "target": "com.amazonaws.medialive#ChannelEngineVersionResponse", + "traits": { + "smithy.api#documentation": "Requested engine version for this channel.", + "smithy.api#jsonName": "channelEngineVersion" + } } }, "traits": { @@ -3596,6 +3603,43 @@ "smithy.api#documentation": "Placeholder documentation for ChannelEgressEndpoint" } }, + "com.amazonaws.medialive#ChannelEngineVersionRequest": { + "type": "structure", + "members": { + "Version": { + "target": "com.amazonaws.medialive#__string", + "traits": { + "smithy.api#documentation": "The build identifier of the engine version to use for this channel. Specify 'DEFAULT' to reset to the default version.", + "smithy.api#jsonName": "version" + } + } + }, + "traits": { + "smithy.api#documentation": "Placeholder documentation for ChannelEngineVersionRequest" + } + }, + "com.amazonaws.medialive#ChannelEngineVersionResponse": { + "type": "structure", + "members": { + "ExpirationDate": { + "target": "com.amazonaws.medialive#__timestampIso8601", + "traits": { + "smithy.api#documentation": "The UTC time when the version expires.", + "smithy.api#jsonName": "expirationDate" + } + }, + "Version": { + "target": "com.amazonaws.medialive#__string", + "traits": { + "smithy.api#documentation": "The build identifier for this version of the channel version.", + "smithy.api#jsonName": "version" + } + } + }, + "traits": { + "smithy.api#documentation": "Placeholder documentation for ChannelEngineVersionResponse" + } + }, "com.amazonaws.medialive#ChannelPipelineIdToRestart": { "type": "enum", "members": { @@ -3860,6 +3904,20 @@ "smithy.api#documentation": "AnywhereSettings settings for this channel.", "smithy.api#jsonName": "anywhereSettings" } + }, + "ChannelEngineVersion": { + "target": "com.amazonaws.medialive#ChannelEngineVersionResponse", + "traits": { + "smithy.api#documentation": "The engine version that you requested for this channel.", + "smithy.api#jsonName": "channelEngineVersion" + } + }, + "UsedChannelEngineVersions": { + "target": "com.amazonaws.medialive#__listOfChannelEngineVersionResponse", + "traits": { + "smithy.api#documentation": "The engine version that the running pipelines are using.", + "smithy.api#jsonName": "usedChannelEngineVersions" + } } }, "traits": { @@ -5002,6 +5060,19 @@ "smithy.api#documentation": "The Elemental Anywhere settings for this channel.", "smithy.api#jsonName": "anywhereSettings" } + }, + "ChannelEngineVersion": { + "target": "com.amazonaws.medialive#ChannelEngineVersionRequest", + "traits": { + "smithy.api#documentation": "The desired engine version for this channel.", + "smithy.api#jsonName": "channelEngineVersion" + } + }, + "DryRun": { + "target": "com.amazonaws.medialive#__boolean", + "traits": { + "smithy.api#jsonName": "dryRun" + } } }, "traits": { @@ -7553,6 +7624,13 @@ "smithy.api#documentation": "Anywhere settings for this channel.", "smithy.api#jsonName": "anywhereSettings" } + }, + "ChannelEngineVersion": { + "target": "com.amazonaws.medialive#ChannelEngineVersionResponse", + "traits": { + "smithy.api#documentation": "Requested engine version for this channel.", + "smithy.api#jsonName": "channelEngineVersion" + } } }, "traits": { @@ -9615,6 +9693,13 @@ "smithy.api#documentation": "Anywhere settings for this channel.", "smithy.api#jsonName": "anywhereSettings" } + }, + "ChannelEngineVersion": { + "target": "com.amazonaws.medialive#ChannelEngineVersionResponse", + "traits": { + "smithy.api#documentation": "Requested engine version for this channel.", + "smithy.api#jsonName": "channelEngineVersion" + } } }, "traits": { @@ -22786,6 +22871,73 @@ "smithy.api#output": {} } }, + "com.amazonaws.medialive#ListVersions": { + "type": "operation", + "input": { + "target": "com.amazonaws.medialive#ListVersionsRequest" + }, + "output": { + "target": "com.amazonaws.medialive#ListVersionsResponse" + }, + "errors": [ + { + "target": "com.amazonaws.medialive#BadGatewayException" + }, + { + "target": "com.amazonaws.medialive#BadRequestException" + }, + { + "target": "com.amazonaws.medialive#ConflictException" + }, + { + "target": "com.amazonaws.medialive#ForbiddenException" + }, + { + "target": "com.amazonaws.medialive#GatewayTimeoutException" + }, + { + "target": "com.amazonaws.medialive#InternalServerErrorException" + }, + { + "target": "com.amazonaws.medialive#NotFoundException" + }, + { + "target": "com.amazonaws.medialive#TooManyRequestsException" + } + ], + "traits": { + "smithy.api#documentation": "Retrieves an array of all the encoder engine versions that are available in this AWS account.", + "smithy.api#http": { + "method": "GET", + "uri": "/prod/versions", + "code": 200 + } + } + }, + "com.amazonaws.medialive#ListVersionsRequest": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#documentation": "Placeholder documentation for ListVersionsRequest", + "smithy.api#input": {} + } + }, + "com.amazonaws.medialive#ListVersionsResponse": { + "type": "structure", + "members": { + "Versions": { + "target": "com.amazonaws.medialive#__listOfChannelEngineVersionResponse", + "traits": { + "smithy.api#documentation": "List of engine versions that are available for this AWS account.", + "smithy.api#jsonName": "versions" + } + } + }, + "traits": { + "smithy.api#documentation": "Placeholder documentation for ListVersionsResponse", + "smithy.api#output": {} + } + }, "com.amazonaws.medialive#LogLevel": { "type": "enum", "members": { @@ -24237,6 +24389,9 @@ { "target": "com.amazonaws.medialive#ListTagsForResource" }, + { + "target": "com.amazonaws.medialive#ListVersions" + }, { "target": "com.amazonaws.medialive#PurchaseOffering" }, @@ -27971,6 +28126,13 @@ "smithy.api#documentation": "Pipeline ID", "smithy.api#jsonName": "pipelineId" } + }, + "ChannelEngineVersion": { + "target": "com.amazonaws.medialive#ChannelEngineVersionResponse", + "traits": { + "smithy.api#documentation": "Current engine version of the encoder for this pipeline.", + "smithy.api#jsonName": "channelEngineVersion" + } } }, "traits": { @@ -29089,6 +29251,13 @@ "smithy.api#documentation": "Anywhere settings for this channel.", "smithy.api#jsonName": "anywhereSettings" } + }, + "ChannelEngineVersion": { + "target": "com.amazonaws.medialive#ChannelEngineVersionResponse", + "traits": { + "smithy.api#documentation": "Requested engine version for this channel.", + "smithy.api#jsonName": "channelEngineVersion" + } } }, "traits": { @@ -31135,6 +31304,13 @@ "smithy.api#documentation": "Anywhere settings for this channel.", "smithy.api#jsonName": "anywhereSettings" } + }, + "ChannelEngineVersion": { + "target": "com.amazonaws.medialive#ChannelEngineVersionResponse", + "traits": { + "smithy.api#documentation": "Requested engine version for this channel.", + "smithy.api#jsonName": "channelEngineVersion" + } } }, "traits": { @@ -32451,6 +32627,13 @@ "smithy.api#documentation": "Anywhere settings for this channel.", "smithy.api#jsonName": "anywhereSettings" } + }, + "ChannelEngineVersion": { + "target": "com.amazonaws.medialive#ChannelEngineVersionResponse", + "traits": { + "smithy.api#documentation": "Requested engine version for this channel.", + "smithy.api#jsonName": "channelEngineVersion" + } } }, "traits": { @@ -33942,6 +34125,19 @@ "smithy.api#documentation": "An optional Amazon Resource Name (ARN) of the role to assume when running the Channel. If you do not specify this on an update call but the role was previously set that role will be removed.", "smithy.api#jsonName": "roleArn" } + }, + "ChannelEngineVersion": { + "target": "com.amazonaws.medialive#ChannelEngineVersionRequest", + "traits": { + "smithy.api#documentation": "Channel engine version for this channel", + "smithy.api#jsonName": "channelEngineVersion" + } + }, + "DryRun": { + "target": "com.amazonaws.medialive#__boolean", + "traits": { + "smithy.api#jsonName": "dryRun" + } } }, "traits": { @@ -37169,6 +37365,15 @@ "smithy.api#documentation": "Placeholder documentation for __listOfChannelEgressEndpoint" } }, + "com.amazonaws.medialive#__listOfChannelEngineVersionResponse": { + "type": "list", + "member": { + "target": "com.amazonaws.medialive#ChannelEngineVersionResponse" + }, + "traits": { + "smithy.api#documentation": "Placeholder documentation for __listOfChannelEngineVersionResponse" + } + }, "com.amazonaws.medialive#__listOfChannelPipelineIdToRestart": { "type": "list", "member": { diff --git a/models/mwaa.json b/models/mwaa.json index 161364d670..99d50643a8 100644 --- a/models/mwaa.json +++ b/models/mwaa.json @@ -1136,7 +1136,7 @@ "AirflowVersion": { "target": "com.amazonaws.mwaa#AirflowVersion", "traits": { - "smithy.api#documentation": "

The Apache Airflow version for your environment. If no value is specified, it defaults to the latest version.\n For more information, see Apache Airflow versions on Amazon Managed Workflows for Apache Airflow (Amazon MWAA).

\n

Valid values: 1.10.12, 2.0.2, 2.2.2,\n 2.4.3, 2.5.1, 2.6.3, 2.7.2,\n 2.8.1, 2.9.2, and 2.10.1.

" + "smithy.api#documentation": "

The Apache Airflow version for your environment. If no value is specified, it defaults to the latest version.\n For more information, see Apache Airflow versions on Amazon Managed Workflows for Apache Airflow (Amazon MWAA).

\n

Valid values: 1.10.12, 2.0.2, 2.2.2,\n 2.4.3, 2.5.1, 2.6.3, 2.7.2,\n 2.8.1, 2.9.2, 2.10.1, and 2.10.3.

" } }, "LoggingConfiguration": { @@ -1443,7 +1443,7 @@ "AirflowVersion": { "target": "com.amazonaws.mwaa#AirflowVersion", "traits": { - "smithy.api#documentation": "

The Apache Airflow version on your environment.

\n

Valid values: 1.10.12, 2.0.2, 2.2.2,\n 2.4.3, 2.5.1, 2.6.3, 2.7.2,\n 2.8.1, 2.9.2, and 2.10.1.

" + "smithy.api#documentation": "

The Apache Airflow version on your environment.

\n

Valid values: 1.10.12, 2.0.2, 2.2.2,\n 2.4.3, 2.5.1, 2.6.3, 2.7.2,\n 2.8.1, 2.9.2, 2.10.1, and 2.10.3.

" } }, "SourceBucketArn": { @@ -2989,7 +2989,7 @@ "AirflowVersion": { "target": "com.amazonaws.mwaa#AirflowVersion", "traits": { - "smithy.api#documentation": "

The Apache Airflow version for your environment. To upgrade your environment, specify a newer version of Apache Airflow supported by Amazon MWAA.

\n

Before you upgrade an environment, make sure your requirements, DAGs, plugins, and other resources used in your workflows are compatible with the new Apache Airflow version. For more information about updating\n your resources, see Upgrading an Amazon MWAA environment.

\n

Valid values: 1.10.12, 2.0.2, 2.2.2,\n 2.4.3, 2.5.1, 2.6.3, 2.7.2,\n 2.8.1, 2.9.2, and 2.10.1.

" + "smithy.api#documentation": "

The Apache Airflow version for your environment. To upgrade your environment, specify a newer version of Apache Airflow supported by Amazon MWAA.

\n

Before you upgrade an environment, make sure your requirements, DAGs, plugins, and other resources used in your workflows are compatible with the new Apache Airflow version. For more information about updating\n your resources, see Upgrading an Amazon MWAA environment.

\n

Valid values: 1.10.12, 2.0.2, 2.2.2,\n 2.4.3, 2.5.1, 2.6.3, 2.7.2,\n 2.8.1, 2.9.2, 2.10.1, and 2.10.3.

" } }, "SourceBucketArn": { diff --git a/models/network-firewall.json b/models/network-firewall.json index 7d936974e9..05d71ac87a 100644 --- a/models/network-firewall.json +++ b/models/network-firewall.json @@ -3374,7 +3374,7 @@ "name": "network-firewall" }, "aws.protocols#awsJson1_0": {}, - "smithy.api#documentation": "

This is the API Reference for Network Firewall. This guide is for developers who need\n detailed information about the Network Firewall API actions, data types, and errors.

\n
    \n
  • \n

    The REST API requires you to handle connection details, such as calculating\n signatures, handling request retries, and error handling. For general information\n about using the Amazon Web Services REST APIs, see Amazon Web Services APIs.

    \n

    To access Network Firewall using the REST API endpoint:\n https://network-firewall..amazonaws.com \n

    \n
  • \n
  • \n

    Alternatively, you can use one of the Amazon Web Services SDKs to access an API that's tailored to\n the programming language or platform that you're using. For more information, see\n Amazon Web Services SDKs.

    \n
  • \n
  • \n

    For descriptions of Network Firewall features, including and step-by-step\n instructions on how to use them through the Network Firewall console, see the Network Firewall Developer\n Guide.

    \n
  • \n
\n

Network Firewall is a stateful, managed, network firewall and intrusion detection and\n prevention service for Amazon Virtual Private Cloud (Amazon VPC). With Network Firewall, you can filter traffic at the\n perimeter of your VPC. This includes filtering traffic going to and coming from an internet\n gateway, NAT gateway, or over VPN or Direct Connect. Network Firewall uses rules that are compatible\n with Suricata, a free, open source network analysis and threat detection engine.

\n

You can use Network Firewall to monitor and protect your VPC traffic in a number of ways.\n The following are just a few examples:

\n
    \n
  • \n

    Allow domains or IP addresses for known Amazon Web Services service endpoints, such as Amazon S3, and\n block all other forms of traffic.

    \n
  • \n
  • \n

    Use custom lists of known bad domains to limit the types of domain names that your\n applications can access.

    \n
  • \n
  • \n

    Perform deep packet inspection on traffic entering or leaving your VPC.

    \n
  • \n
  • \n

    Use stateful protocol detection to filter protocols like HTTPS, regardless of the\n port used.

    \n
  • \n
\n

To enable Network Firewall for your VPCs, you perform steps in both Amazon VPC and in\n Network Firewall. For information about using Amazon VPC, see Amazon VPC User Guide.

\n

To start using Network Firewall, do the following:

\n
    \n
  1. \n

    (Optional) If you don't already have a VPC that you want to protect, create it in\n Amazon VPC.

    \n
  2. \n
  3. \n

    In Amazon VPC, in each Availability Zone where you want to have a firewall endpoint, create a\n subnet for the sole use of Network Firewall.

    \n
  4. \n
  5. \n

    In Network Firewall, create stateless and stateful rule groups,\n to define the components of the network traffic filtering behavior that you want your firewall to have.

    \n
  6. \n
  7. \n

    In Network Firewall, create a firewall policy that uses your rule groups and\n specifies additional default traffic filtering behavior.

    \n
  8. \n
  9. \n

    In Network Firewall, create a firewall and specify your new firewall policy and\n VPC subnets. Network Firewall creates a firewall endpoint in each subnet that you\n specify, with the behavior that's defined in the firewall policy.

    \n
  10. \n
  11. \n

    In Amazon VPC, use ingress routing enhancements to route traffic through the new firewall\n endpoints.

    \n
  12. \n
", + "smithy.api#documentation": "

This is the API Reference for Network Firewall. This guide is for developers who need\n detailed information about the Network Firewall API actions, data types, and errors.

\n

The REST API requires you to handle connection details, such as calculating\n signatures, handling request retries, and error handling. For general information\n about using the Amazon Web Services REST APIs, see Amazon Web Services APIs.

\n

To view the complete list of Amazon Web Services Regions where Network Firewall is available, see\n Service\n endpoints and quotas in the Amazon Web Services General\n Reference.\n

\n

To access Network Firewall using the IPv4 REST API endpoint:\n https://network-firewall..amazonaws.com \n

\n

To access Network Firewall using the Dualstack (IPv4 and IPv6) REST API endpoint:\n https://network-firewall..aws.api \n

\n

Alternatively, you can use one of the Amazon Web Services SDKs to access an API that's tailored to\n the programming language or platform that you're using. For more information, see\n Amazon Web Services SDKs.

\n

For descriptions of Network Firewall features, including and step-by-step\n instructions on how to use them through the Network Firewall console, see the Network Firewall Developer\n Guide.

\n

Network Firewall is a stateful, managed, network firewall and intrusion detection and\n prevention service for Amazon Virtual Private Cloud (Amazon VPC). With Network Firewall, you can filter traffic at the\n perimeter of your VPC. This includes filtering traffic going to and coming from an internet\n gateway, NAT gateway, or over VPN or Direct Connect. Network Firewall uses rules that are compatible\n with Suricata, a free, open source network analysis and threat detection engine.

\n

You can use Network Firewall to monitor and protect your VPC traffic in a number of ways.\n The following are just a few examples:

\n
    \n
  • \n

    Allow domains or IP addresses for known Amazon Web Services service endpoints, such as Amazon S3, and\n block all other forms of traffic.

    \n
  • \n
  • \n

    Use custom lists of known bad domains to limit the types of domain names that your\n applications can access.

    \n
  • \n
  • \n

    Perform deep packet inspection on traffic entering or leaving your VPC.

    \n
  • \n
  • \n

    Use stateful protocol detection to filter protocols like HTTPS, regardless of the\n port used.

    \n
  • \n
\n

To enable Network Firewall for your VPCs, you perform steps in both Amazon VPC and in\n Network Firewall. For information about using Amazon VPC, see Amazon VPC User Guide.

\n

To start using Network Firewall, do the following:

\n
    \n
  1. \n

    (Optional) If you don't already have a VPC that you want to protect, create it in\n Amazon VPC.

    \n
  2. \n
  3. \n

    In Amazon VPC, in each Availability Zone where you want to have a firewall endpoint, create a\n subnet for the sole use of Network Firewall.

    \n
  4. \n
  5. \n

    In Network Firewall, create stateless and stateful rule groups,\n to define the components of the network traffic filtering behavior that you want your firewall to have.

    \n
  6. \n
  7. \n

    In Network Firewall, create a firewall policy that uses your rule groups and\n specifies additional default traffic filtering behavior.

    \n
  8. \n
  9. \n

    In Network Firewall, create a firewall and specify your new firewall policy and\n VPC subnets. Network Firewall creates a firewall endpoint in each subnet that you\n specify, with the behavior that's defined in the firewall policy.

    \n
  10. \n
  11. \n

    In Amazon VPC, use ingress routing enhancements to route traffic through the new firewall\n endpoints.

    \n
  12. \n
", "smithy.api#title": "AWS Network Firewall", "smithy.rules#endpointRuleSet": { "version": "1.0", diff --git a/models/outposts.json b/models/outposts.json index 93969fca11..c4d64247e0 100644 --- a/models/outposts.json +++ b/models/outposts.json @@ -5193,6 +5193,12 @@ "traits": { "smithy.api#enumValue": "AH532P6W" } + }, + "CS8365C": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "CS8365C" + } } } }, @@ -5593,7 +5599,7 @@ } ], "traits": { - "smithy.api#documentation": "

Starts the specified capacity task. You can have one active capacity task per order or Outpost.

", + "smithy.api#documentation": "

Starts the specified capacity task. You can have one active capacity task for each order and each Outpost.

", "smithy.api#http": { "method": "POST", "uri": "/outposts/{OutpostIdentifier}/capacity", @@ -6409,7 +6415,7 @@ "PowerConnector": { "target": "com.amazonaws.outposts#PowerConnector", "traits": { - "smithy.api#documentation": "

The power connector that Amazon Web Services should plan to provide for connections to the hardware.\n Note the correlation between PowerPhase and PowerConnector.

\n
    \n
  • \n

    Single-phase AC feed

    \n
      \n
    • \n

      \n L6-30P – (common in US); 30A; single phase

      \n
    • \n
    • \n

      \n IEC309 (blue) – P+N+E, 6hr; 32 A; single\n phase

      \n
    • \n
    \n
  • \n
  • \n

    Three-phase AC feed

    \n
      \n
    • \n

      \n AH530P7W (red) – 3P+N+E, 7hr; 30A; three\n phase

      \n
    • \n
    • \n

      \n AH532P6W (red) – 3P+N+E, 6hr; 32A; three\n phase

      \n
    • \n
    \n
  • \n
" + "smithy.api#documentation": "

The power connector that Amazon Web Services should plan to provide for connections to the hardware.\n Note the correlation between PowerPhase and PowerConnector.

\n
    \n
  • \n

    Single-phase AC feed

    \n
      \n
    • \n

      \n L6-30P – (common in US); 30A; single phase

      \n
    • \n
    • \n

      \n IEC309 (blue) – P+N+E, 6hr; 32 A; single\n phase

      \n
    • \n
    \n
  • \n
  • \n

    Three-phase AC feed

    \n
      \n
    • \n

      \n AH530P7W (red) – 3P+N+E, 7hr; 30A; three\n phase

      \n
    • \n
    • \n

      \n AH532P6W (red) – 3P+N+E, 6hr; 32A; three\n phase

      \n
    • \n
    • \n

      \n CS8365C – (common in US); 3P+E, 50A; three phase

      \n
    • \n
    \n
  • \n
" } }, "PowerFeedDrop": { diff --git a/models/qconnect.json b/models/qconnect.json index 5341c7344e..193ad79626 100644 --- a/models/qconnect.json +++ b/models/qconnect.json @@ -1299,6 +1299,12 @@ "traits": { "smithy.api#documentation": "

The association configurations for overriding behavior on this AI Agent.

" } + }, + "locale": { + "target": "com.amazonaws.qconnect#NonEmptyString", + "traits": { + "smithy.api#documentation": "

The locale to which specifies the language and region settings that determine the response\n language for QueryAssistant.

\n \n

Changing this locale to anything other than en_US will turn off\n recommendations triggered by contact transcripts for agent assistance, as this feature is\n not supported in multiple languages.

\n
" + } } }, "traits": { @@ -7805,7 +7811,7 @@ "type": { "target": "com.amazonaws.qconnect#GuardrailPiiEntityType", "traits": { - "smithy.api#documentation": "

Configure AI Guardrail type when the PII entity is detected.

\n

The following PIIs are used to block or mask sensitive information:

\n
    \n
  • \n

    \n General\n

    \n
      \n
    • \n

      \n ADDRESS\n

      \n

      A physical address, such as \"100 Main Street, Anytown, USA\" or \"Suite #12,\n Building 123\". An address can include information such as the street, building,\n location, city, state, country, county, zip code, precinct, and neighborhood.

      \n
    • \n
    • \n

      \n AGE\n

      \n

      An individual's age, including the quantity and unit of time. For example, in the\n phrase \"I am 40 years old,\" Guarrails recognizes \"40 years\" as an age.

      \n
    • \n
    • \n

      \n NAME\n

      \n

      An individual's name. This entity type does not include titles, such as Dr., Mr.,\n Mrs., or Miss. AI Guardrail doesn't apply this entity type to names that are part of\n organizations or addresses. For example, AI Guardrail recognizes the \"John Doe\n Organization\" as an organization, and it recognizes \"Jane Doe Street\" as an address.\n

      \n
    • \n
    • \n

      \n EMAIL\n

      \n

      An email address, such as marymajor@email.com.

      \n
    • \n
    • \n

      \n PHONE\n

      \n

      A phone number. This entity type also includes fax and pager numbers.

      \n
    • \n
    • \n

      \n USERNAME\n

      \n

      A user name that identifies an account, such as a login name, screen name, nick\n name, or handle.

      \n
    • \n
    • \n

      \n PASSWORD\n

      \n

      An alphanumeric string that is used as a password, such as \"*\n very20special#pass*\".

      \n
    • \n
    • \n

      \n DRIVER_ID\n

      \n

      The number assigned to a driver's license, which is an official document\n permitting an individual to operate one or more motorized vehicles on a public road. A\n driver's license number consists of alphanumeric characters.

      \n
    • \n
    • \n

      \n LICENSE_PLATE\n

      \n

      A license plate for a vehicle is issued by the state or country where the vehicle\n is registered. The format for passenger vehicles is typically five to eight digits,\n consisting of upper-case letters and numbers. The format varies depending on the\n location of the issuing state or country.

      \n
    • \n
    • \n

      \n VEHICLE_IDENTIFICATION_NUMBER\n

      \n

      A Vehicle Identification Number (VIN) uniquely identifies a vehicle. VIN content\n and format are defined in the ISO 3779 specification. Each\n country has specific codes and formats for VINs.

      \n
    • \n
    \n
  • \n
  • \n

    \n Finance\n

    \n
      \n
    • \n

      \n REDIT_DEBIT_CARD_CVV\n

      \n

      A three-digit card verification code (CVV) that is present on VISA, MasterCard,\n and Discover credit and debit cards. For American Express credit or debit cards, the\n CVV is a four-digit numeric code.

      \n
    • \n
    • \n

      \n CREDIT_DEBIT_CARD_EXPIRY\n

      \n

      The expiration date for a credit or debit card. This number is usually four digits\n long and is often formatted as month/year or\n MM/YY. AI Guardrail recognizes expiration dates such as\n 01/21, 01/2021, and Jan\n 2021.

      \n
    • \n
    • \n

      \n CREDIT_DEBIT_CARD_NUMBER\n

      \n

      The number for a credit or debit card. These numbers can vary from 13 to 16 digits\n in length. However, Amazon Comprehend also recognizes credit or debit card numbers\n when only the last four digits are present.

      \n
    • \n
    • \n

      \n PIN\n

      \n

      A four-digit personal identification number (PIN) with which you can access your\n bank account.

      \n
    • \n
    • \n

      \n INTERNATIONAL_BANK_ACCOUNT_NUMBER\n

      \n

      An International Bank Account Number has specific formats in each country. For\n more information, see \n www.iban.com/structure.

      \n
    • \n
    • \n

      \n SWIFT_CODE\n

      \n

      A SWIFT code is a standard format of Bank Identifier Code (BIC) used to specify a\n particular bank or branch. Banks use these codes for money transfers such as\n international wire transfers.

      \n

      SWIFT codes consist of eight or 11 characters. The 11-digit codes refer to\n specific branches, while eight-digit codes (or 11-digit codes ending in 'XXX') refer\n to the head or primary office.

      \n
    • \n
    \n
  • \n
  • \n

    \n IT\n

    \n
      \n
    • \n

      \n IP_ADDRESS\n

      \n

      An IPv4 address, such as 198.51.100.0.

      \n
    • \n
    • \n

      \n MAC_ADDRESS\n

      \n

      A media access control (MAC) address is a unique identifier\n assigned to a network interface controller (NIC).

      \n
    • \n
    • \n

      \n URL\n

      \n

      A web address, such as www.example.com.

      \n
    • \n
    • \n

      \n AWS_ACCESS_KEY\n

      \n

      A unique identifier that's associated with a secret access key; you use the access\n key ID and secret access key to sign programmatic Amazon Web Services requests\n cryptographically.

      \n
    • \n
    • \n

      \n AWS_SECRET_KEY\n

      \n

      A unique identifier that's associated with an access key. You use the access key\n ID and secret access key to sign programmatic Amazon Web Services requests\n cryptographically.

      \n
    • \n
    \n
  • \n
  • \n

    \n USA specific\n

    \n
      \n
    • \n

      \n US_BANK_ACCOUNT_NUMBER\n

      \n

      A US bank account number, which is typically 10 to 12 digits long.

      \n
    • \n
    • \n

      \n US_BANK_ROUTING_NUMBER\n

      \n

      A US bank account routing number. These are typically nine digits long,

      \n
    • \n
    • \n

      \n US_INDIVIDUAL_TAX_IDENTIFICATION_NUMBER\n

      \n

      A US Individual Taxpayer Identification Number (ITIN) is a nine-digit number that\n starts with a \"9\" and contain a \"7\" or \"8\" as the fourth digit. An ITIN can be\n formatted with a space or a dash after the third and forth digits.

      \n
    • \n
    • \n

      \n US_PASSPORT_NUMBER\n

      \n

      A US passport number. Passport numbers range from six to nine alphanumeric\n characters.

      \n
    • \n
    • \n

      \n US_SOCIAL_SECURITY_NUMBER\n

      \n

      A US Social Security Number (SSN) is a nine-digit number that is issued to US\n citizens, permanent residents, and temporary working residents.

      \n
    • \n
    \n
  • \n
  • \n

    \n Canada specific\n

    \n
      \n
    • \n

      \n CA_HEALTH_NUMBER\n

      \n

      A Canadian Health Service Number is a 10-digit unique identifier, required for\n individuals to access healthcare benefits.

      \n
    • \n
    • \n

      \n CA_SOCIAL_INSURANCE_NUMBER\n

      \n

      A Canadian Social Insurance Number (SIN) is a nine-digit unique identifier,\n required for individuals to access government programs and benefits.

      \n

      The SIN is formatted as three groups of three digits, such as \n 123-456-789. A SIN can be validated through a simple check-digit process\n called the Luhn\n algorithm .

      \n
    • \n
    \n
  • \n
  • \n

    \n UK Specific\n

    \n
      \n
    • \n

      \n UK_NATIONAL_HEALTH_SERVICE_NUMBER\n

      \n

      A UK National Health Service Number is a 10-17 digit number, such as 485\n 555 3456. The current system formats the 10-digit number with spaces\n after the third and sixth digits. The final digit is an error-detecting\n checksum.

      \n
    • \n
    • \n

      \n UK_NATIONAL_INSURANCE_NUMBER\n

      \n

      A UK National Insurance Number (NINO) provides individuals with access to National\n Insurance (social security) benefits. It is also used for some purposes in the UK tax\n system.

      \n

      The number is nine digits long and starts with two letters, followed by six\n numbers and one letter. A NINO can be formatted with a space or a dash after the two\n letters and after the second, forth, and sixth digits.

      \n
    • \n
    • \n

      \n UK_UNIQUE_TAXPAYER_REFERENCE_NUMBER\n

      \n

      A UK Unique Taxpayer Reference (UTR) is a 10-digit number that identifies a\n taxpayer or a business.

      \n
    • \n
    \n
  • \n
  • \n

    \n Custom\n

    \n
      \n
    • \n

      \n Regex filter - You can use a regular expressions to\n define patterns for an AI Guardrail to recognize and act upon such as serial number,\n booking ID etc..

      \n
    • \n
    \n
  • \n
", + "smithy.api#documentation": "

Configure AI Guardrail type when the PII entity is detected.

\n

The following PIIs are used to block or mask sensitive information:

\n
    \n
  • \n

    \n General\n

    \n
      \n
    • \n

      \n ADDRESS\n

      \n

      A physical address, such as \"100 Main Street, Anytown, USA\" or \"Suite #12,\n Building 123\". An address can include information such as the street, building,\n location, city, state, country, county, zip code, precinct, and neighborhood.

      \n
    • \n
    • \n

      \n AGE\n

      \n

      An individual's age, including the quantity and unit of time. For example, in the\n phrase \"I am 40 years old,\" Guarrails recognizes \"40 years\" as an age.

      \n
    • \n
    • \n

      \n NAME\n

      \n

      An individual's name. This entity type does not include titles, such as Dr., Mr.,\n Mrs., or Miss. AI Guardrail doesn't apply this entity type to names that are part of\n organizations or addresses. For example, AI Guardrail recognizes the \"John Doe\n Organization\" as an organization, and it recognizes \"Jane Doe Street\" as an address.\n

      \n
    • \n
    • \n

      \n EMAIL\n

      \n

      An email address, such as marymajor@email.com.

      \n
    • \n
    • \n

      \n PHONE\n

      \n

      A phone number. This entity type also includes fax and pager numbers.

      \n
    • \n
    • \n

      \n USERNAME\n

      \n

      A user name that identifies an account, such as a login name, screen name, nick\n name, or handle.

      \n
    • \n
    • \n

      \n PASSWORD\n

      \n

      An alphanumeric string that is used as a password, such as \"*\n very20special#pass*\".

      \n
    • \n
    • \n

      \n DRIVER_ID\n

      \n

      The number assigned to a driver's license, which is an official document\n permitting an individual to operate one or more motorized vehicles on a public road. A\n driver's license number consists of alphanumeric characters.

      \n
    • \n
    • \n

      \n LICENSE_PLATE\n

      \n

      A license plate for a vehicle is issued by the state or country where the vehicle\n is registered. The format for passenger vehicles is typically five to eight digits,\n consisting of upper-case letters and numbers. The format varies depending on the\n location of the issuing state or country.

      \n
    • \n
    • \n

      \n VEHICLE_IDENTIFICATION_NUMBER\n

      \n

      A Vehicle Identification Number (VIN) uniquely identifies a vehicle. VIN content\n and format are defined in the ISO 3779 specification. Each\n country has specific codes and formats for VINs.

      \n
    • \n
    \n
  • \n
  • \n

    \n Finance\n

    \n
      \n
    • \n

      \n CREDIT_DEBIT_CARD_CVV\n

      \n

      A three-digit card verification code (CVV) that is present on VISA, MasterCard,\n and Discover credit and debit cards. For American Express credit or debit cards, the\n CVV is a four-digit numeric code.

      \n
    • \n
    • \n

      \n CREDIT_DEBIT_CARD_EXPIRY\n

      \n

      The expiration date for a credit or debit card. This number is usually four digits\n long and is often formatted as month/year or\n MM/YY. AI Guardrail recognizes expiration dates such as\n 01/21, 01/2021, and Jan\n 2021.

      \n
    • \n
    • \n

      \n CREDIT_DEBIT_CARD_NUMBER\n

      \n

      The number for a credit or debit card. These numbers can vary from 13 to 16 digits\n in length. However, Amazon Comprehend also recognizes credit or debit card numbers\n when only the last four digits are present.

      \n
    • \n
    • \n

      \n PIN\n

      \n

      A four-digit personal identification number (PIN) with which you can access your\n bank account.

      \n
    • \n
    • \n

      \n INTERNATIONAL_BANK_ACCOUNT_NUMBER\n

      \n

      An International Bank Account Number has specific formats in each country. For\n more information, see \n www.iban.com/structure.

      \n
    • \n
    • \n

      \n SWIFT_CODE\n

      \n

      A SWIFT code is a standard format of Bank Identifier Code (BIC) used to specify a\n particular bank or branch. Banks use these codes for money transfers such as\n international wire transfers.

      \n

      SWIFT codes consist of eight or 11 characters. The 11-digit codes refer to\n specific branches, while eight-digit codes (or 11-digit codes ending in 'XXX') refer\n to the head or primary office.

      \n
    • \n
    \n
  • \n
  • \n

    \n IT\n

    \n
      \n
    • \n

      \n IP_ADDRESS\n

      \n

      An IPv4 address, such as 198.51.100.0.

      \n
    • \n
    • \n

      \n MAC_ADDRESS\n

      \n

      A media access control (MAC) address is a unique identifier\n assigned to a network interface controller (NIC).

      \n
    • \n
    • \n

      \n URL\n

      \n

      A web address, such as www.example.com.

      \n
    • \n
    • \n

      \n AWS_ACCESS_KEY\n

      \n

      A unique identifier that's associated with a secret access key; you use the access\n key ID and secret access key to sign programmatic Amazon Web Services requests\n cryptographically.

      \n
    • \n
    • \n

      \n AWS_SECRET_KEY\n

      \n

      A unique identifier that's associated with an access key. You use the access key\n ID and secret access key to sign programmatic Amazon Web Services requests\n cryptographically.

      \n
    • \n
    \n
  • \n
  • \n

    \n USA specific\n

    \n
      \n
    • \n

      \n US_BANK_ACCOUNT_NUMBER\n

      \n

      A US bank account number, which is typically 10 to 12 digits long.

      \n
    • \n
    • \n

      \n US_BANK_ROUTING_NUMBER\n

      \n

      A US bank account routing number. These are typically nine digits long,

      \n
    • \n
    • \n

      \n US_INDIVIDUAL_TAX_IDENTIFICATION_NUMBER\n

      \n

      A US Individual Taxpayer Identification Number (ITIN) is a nine-digit number that\n starts with a \"9\" and contain a \"7\" or \"8\" as the fourth digit. An ITIN can be\n formatted with a space or a dash after the third and forth digits.

      \n
    • \n
    • \n

      \n US_PASSPORT_NUMBER\n

      \n

      A US passport number. Passport numbers range from six to nine alphanumeric\n characters.

      \n
    • \n
    • \n

      \n US_SOCIAL_SECURITY_NUMBER\n

      \n

      A US Social Security Number (SSN) is a nine-digit number that is issued to US\n citizens, permanent residents, and temporary working residents.

      \n
    • \n
    \n
  • \n
  • \n

    \n Canada specific\n

    \n
      \n
    • \n

      \n CA_HEALTH_NUMBER\n

      \n

      A Canadian Health Service Number is a 10-digit unique identifier, required for\n individuals to access healthcare benefits.

      \n
    • \n
    • \n

      \n CA_SOCIAL_INSURANCE_NUMBER\n

      \n

      A Canadian Social Insurance Number (SIN) is a nine-digit unique identifier,\n required for individuals to access government programs and benefits.

      \n

      The SIN is formatted as three groups of three digits, such as \n 123-456-789. A SIN can be validated through a simple check-digit process\n called the Luhn\n algorithm .

      \n
    • \n
    \n
  • \n
  • \n

    \n UK Specific\n

    \n
      \n
    • \n

      \n UK_NATIONAL_HEALTH_SERVICE_NUMBER\n

      \n

      A UK National Health Service Number is a 10-17 digit number, such as 485\n 555 3456. The current system formats the 10-digit number with spaces\n after the third and sixth digits. The final digit is an error-detecting\n checksum.

      \n
    • \n
    • \n

      \n UK_NATIONAL_INSURANCE_NUMBER\n

      \n

      A UK National Insurance Number (NINO) provides individuals with access to National\n Insurance (social security) benefits. It is also used for some purposes in the UK tax\n system.

      \n

      The number is nine digits long and starts with two letters, followed by six\n numbers and one letter. A NINO can be formatted with a space or a dash after the two\n letters and after the second, forth, and sixth digits.

      \n
    • \n
    • \n

      \n UK_UNIQUE_TAXPAYER_REFERENCE_NUMBER\n

      \n

      A UK Unique Taxpayer Reference (UTR) is a 10-digit number that identifies a\n taxpayer or a business.

      \n
    • \n
    \n
  • \n
  • \n

    \n Custom\n

    \n
      \n
    • \n

      \n Regex filter - You can use a regular expressions to\n define patterns for an AI Guardrail to recognize and act upon such as serial number,\n booking ID etc..

      \n
    • \n
    \n
  • \n
", "smithy.api#required": {} } }, @@ -10442,6 +10448,12 @@ "traits": { "smithy.api#documentation": "

The association configurations for overriding behavior on this AI Agent.

" } + }, + "locale": { + "target": "com.amazonaws.qconnect#NonEmptyString", + "traits": { + "smithy.api#documentation": "

The locale to which specifies the language and region settings that determine the response\n language for QueryAssistant.

" + } } }, "traits": { diff --git a/models/quicksight.json b/models/quicksight.json index c949cbe8c3..8f68cab4ba 100644 --- a/models/quicksight.json +++ b/models/quicksight.json @@ -9141,6 +9141,12 @@ "traits": { "smithy.api#documentation": "

When you create the dataset, Amazon QuickSight adds the dataset to these folders.

" } + }, + "PerformanceConfiguration": { + "target": "com.amazonaws.quicksight#PerformanceConfiguration", + "traits": { + "smithy.api#documentation": "

The configuration for the performance optimization of the dataset that contains a UniqueKey configuration.

" + } } }, "traits": { @@ -13418,6 +13424,12 @@ "traits": { "smithy.api#documentation": "

The parameters that are declared in a dataset.

" } + }, + "PerformanceConfiguration": { + "target": "com.amazonaws.quicksight#PerformanceConfiguration", + "traits": { + "smithy.api#documentation": "

The performance optimization configuration of a dataset.

" + } } }, "traits": { @@ -40442,6 +40454,20 @@ } } }, + "com.amazonaws.quicksight#PerformanceConfiguration": { + "type": "structure", + "members": { + "UniqueKeys": { + "target": "com.amazonaws.quicksight#UniqueKeyList", + "traits": { + "smithy.api#documentation": "

A UniqueKey configuration.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The configuration for the performance optimization of the dataset that contains a UniqueKey configuration.

" + } + }, "com.amazonaws.quicksight#PeriodOverPeriodComputation": { "type": "structure", "members": { @@ -56274,6 +56300,45 @@ "smithy.api#pattern": "^[^\\u0000-\\u00FF]$" } }, + "com.amazonaws.quicksight#UniqueKey": { + "type": "structure", + "members": { + "ColumnNames": { + "target": "com.amazonaws.quicksight#UniqueKeyColumnNameList", + "traits": { + "smithy.api#documentation": "

The name of the column that is referenced in the UniqueKey configuration.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

A UniqueKey configuration that references a dataset column.

" + } + }, + "com.amazonaws.quicksight#UniqueKeyColumnNameList": { + "type": "list", + "member": { + "target": "com.amazonaws.quicksight#ColumnName" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 1 + } + } + }, + "com.amazonaws.quicksight#UniqueKeyList": { + "type": "list", + "member": { + "target": "com.amazonaws.quicksight#UniqueKey" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 1 + } + } + }, "com.amazonaws.quicksight#UniqueValuesComputation": { "type": "structure", "members": { @@ -58187,6 +58252,12 @@ "traits": { "smithy.api#documentation": "

The parameter declarations of the dataset.

" } + }, + "PerformanceConfiguration": { + "target": "com.amazonaws.quicksight#PerformanceConfiguration", + "traits": { + "smithy.api#documentation": "

The configuration for the performance optimization of the dataset that contains a UniqueKey configuration.

" + } } }, "traits": { diff --git a/models/rds.json b/models/rds.json index 7906ba4e51..d41295db5b 100644 --- a/models/rds.json +++ b/models/rds.json @@ -2906,13 +2906,13 @@ "EnableLogTypes": { "target": "com.amazonaws.rds#LogTypeList", "traits": { - "smithy.api#documentation": "

The list of log types to enable.

" + "smithy.api#documentation": "

The list of log types to enable.

\n

The following values are valid for each DB engine:

\n
    \n
  • \n

    Aurora MySQL - audit | error | general | slowquery\n

    \n
  • \n
  • \n

    Aurora PostgreSQL - postgresql\n

    \n
  • \n
  • \n

    RDS for MySQL - error | general | slowquery\n

    \n
  • \n
  • \n

    RDS for PostgreSQL - postgresql | upgrade\n

    \n
  • \n
" } }, "DisableLogTypes": { "target": "com.amazonaws.rds#LogTypeList", "traits": { - "smithy.api#documentation": "

The list of log types to disable.

" + "smithy.api#documentation": "

The list of log types to disable.

\n

The following values are valid for each DB engine:

\n
    \n
  • \n

    Aurora MySQL - audit | error | general | slowquery\n

    \n
  • \n
  • \n

    Aurora PostgreSQL - postgresql\n

    \n
  • \n
  • \n

    RDS for MySQL - error | general | slowquery\n

    \n
  • \n
  • \n

    RDS for PostgreSQL - postgresql | upgrade\n

    \n
  • \n
" } } }, @@ -4462,43 +4462,43 @@ "AutoMinorVersionUpgrade": { "target": "com.amazonaws.rds#BooleanOptional", "traits": { - "smithy.api#documentation": "

Specifies whether minor engine upgrades are applied automatically to the DB cluster during the maintenance window. \n By default, minor engine upgrades are applied automatically.

\n

Valid for Cluster Type: Multi-AZ DB clusters only

" + "smithy.api#documentation": "

Specifies whether minor engine upgrades are applied automatically to the DB cluster during the maintenance window. \n By default, minor engine upgrades are applied automatically.

\n

Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB cluster

" } }, "MonitoringInterval": { "target": "com.amazonaws.rds#IntegerOptional", "traits": { - "smithy.api#documentation": "

The interval, in seconds, between points when Enhanced Monitoring metrics are collected for the DB cluster. To turn off \n collecting Enhanced Monitoring metrics, specify 0.

\n

If MonitoringRoleArn is specified, also set MonitoringInterval\n to a value other than 0.

\n

Valid for Cluster Type: Multi-AZ DB clusters only

\n

Valid Values: 0 | 1 | 5 | 10 | 15 | 30 | 60\n

\n

Default: 0\n

" + "smithy.api#documentation": "

The interval, in seconds, between points when Enhanced Monitoring metrics are collected for the DB cluster. To turn off \n collecting Enhanced Monitoring metrics, specify 0.

\n

If MonitoringRoleArn is specified, also set MonitoringInterval\n to a value other than 0.

\n

Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters

\n

Valid Values: 0 | 1 | 5 | 10 | 15 | 30 | 60\n

\n

Default: 0\n

" } }, "MonitoringRoleArn": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) for the IAM role that permits RDS to send Enhanced Monitoring metrics to Amazon CloudWatch Logs. \n An example is arn:aws:iam:123456789012:role/emaccess. For information on creating a monitoring role,\n see Setting \n up and enabling Enhanced Monitoring in the Amazon RDS User Guide.

\n

If MonitoringInterval is set to a value other than 0, supply a MonitoringRoleArn value.

\n

Valid for Cluster Type: Multi-AZ DB clusters only

" + "smithy.api#documentation": "

The Amazon Resource Name (ARN) for the IAM role that permits RDS to send Enhanced Monitoring metrics to Amazon CloudWatch Logs. \n An example is arn:aws:iam:123456789012:role/emaccess. For information on creating a monitoring role,\n see Setting \n up and enabling Enhanced Monitoring in the Amazon RDS User Guide.

\n

If MonitoringInterval is set to a value other than 0, supply a MonitoringRoleArn value.

\n

Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters

" } }, "DatabaseInsightsMode": { "target": "com.amazonaws.rds#DatabaseInsightsMode", "traits": { - "smithy.api#documentation": "

Specifies the mode of Database Insights to enable for the cluster.

" + "smithy.api#documentation": "

The mode of Database Insights to enable for the DB cluster.

\n

If you set this value to advanced, you must also set the PerformanceInsightsEnabled\n parameter to true and the PerformanceInsightsRetentionPeriod parameter to 465.

\n

Valid for Cluster Type: Aurora DB clusters only

" } }, "EnablePerformanceInsights": { "target": "com.amazonaws.rds#BooleanOptional", "traits": { - "smithy.api#documentation": "

Specifies whether to turn on Performance Insights for the DB cluster.

\n

For more information, see \n Using Amazon Performance Insights in the Amazon RDS User Guide.

\n

Valid for Cluster Type: Multi-AZ DB clusters only

" + "smithy.api#documentation": "

Specifies whether to turn on Performance Insights for the DB cluster.

\n

For more information, see \n Using Amazon Performance Insights in the Amazon RDS User Guide.

\n

Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters

" } }, "PerformanceInsightsKMSKeyId": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

The Amazon Web Services KMS key identifier for encryption of Performance Insights data.

\n

The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key.

\n

If you don't specify a value for PerformanceInsightsKMSKeyId, then Amazon RDS \n uses your default KMS key. There is a default KMS key for your Amazon Web Services account. \n Your Amazon Web Services account has a different default KMS key for each Amazon Web Services Region.

\n

Valid for Cluster Type: Multi-AZ DB clusters only

" + "smithy.api#documentation": "

The Amazon Web Services KMS key identifier for encryption of Performance Insights data.

\n

The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key.

\n

If you don't specify a value for PerformanceInsightsKMSKeyId, then Amazon RDS \n uses your default KMS key. There is a default KMS key for your Amazon Web Services account. \n Your Amazon Web Services account has a different default KMS key for each Amazon Web Services Region.

\n

Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters

" } }, "PerformanceInsightsRetentionPeriod": { "target": "com.amazonaws.rds#IntegerOptional", "traits": { - "smithy.api#documentation": "

The number of days to retain Performance Insights data.

\n

Valid for Cluster Type: Multi-AZ DB clusters only

\n

Valid Values:

\n
    \n
  • \n

    \n 7\n

    \n
  • \n
  • \n

    \n month * 31, where month is a number of months from 1-23. \n Examples: 93 (3 months * 31), 341 (11 months * 31), 589 (19 months * 31)

    \n
  • \n
  • \n

    \n 731\n

    \n
  • \n
\n

Default: 7 days

\n

If you specify a retention period that isn't valid, such as 94, Amazon RDS issues an error.

" + "smithy.api#documentation": "

The number of days to retain Performance Insights data.

\n

Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters

\n

Valid Values:

\n
    \n
  • \n

    \n 7\n

    \n
  • \n
  • \n

    \n month * 31, where month is a number of months from 1-23. \n Examples: 93 (3 months * 31), 341 (11 months * 31), 589 (19 months * 31)

    \n
  • \n
  • \n

    \n 731\n

    \n
  • \n
\n

Default: 7 days

\n

If you specify a retention period that isn't valid, such as 94, Amazon RDS issues an error.

" } }, "EnableLimitlessDatabase": { @@ -5232,7 +5232,7 @@ "DatabaseInsightsMode": { "target": "com.amazonaws.rds#DatabaseInsightsMode", "traits": { - "smithy.api#documentation": "

Specifies the mode of Database Insights to enable for the instance.

" + "smithy.api#documentation": "

The mode of Database Insights to enable for the DB instance.

\n

This setting only applies to Amazon Aurora DB instances.

\n \n

Currently, this value is inherited from the DB cluster and can't be changed.

\n
" } }, "EnablePerformanceInsights": { @@ -5591,7 +5591,7 @@ "DatabaseInsightsMode": { "target": "com.amazonaws.rds#DatabaseInsightsMode", "traits": { - "smithy.api#documentation": "

Specifies the mode of Database Insights.

" + "smithy.api#documentation": "

The mode of Database Insights to enable for the read replica.

\n \n

Currently, this setting is not supported.

\n
" } }, "EnablePerformanceInsights": { @@ -5711,7 +5711,7 @@ "AllocatedStorage": { "target": "com.amazonaws.rds#IntegerOptional", "traits": { - "smithy.api#documentation": "

The amount of storage (in gibibytes) to allocate initially for the read replica.\n Follow the allocation rules specified in CreateDBInstance.

\n \n

Be sure to allocate enough storage for your read replica so that the create operation can succeed.\n You can also allocate additional storage for future growth.

\n
" + "smithy.api#documentation": "

The amount of storage (in gibibytes) to allocate initially for the read replica.\n Follow the allocation rules specified in CreateDBInstance.

\n

This setting isn't valid for RDS for SQL Server.

\n \n

Be sure to allocate enough storage for your read replica so that the create operation can succeed.\n You can also allocate additional storage for future growth.

\n
" } }, "SourceDBClusterIdentifier": { @@ -7528,43 +7528,43 @@ "AutoMinorVersionUpgrade": { "target": "com.amazonaws.rds#Boolean", "traits": { - "smithy.api#documentation": "

Indicates whether minor version patches are applied automatically.

\n

This setting is only for non-Aurora Multi-AZ DB clusters.

" + "smithy.api#documentation": "

Indicates whether minor version patches are applied automatically.

\n

This setting is for Aurora DB clusters and Multi-AZ DB clusters.

" } }, "MonitoringInterval": { "target": "com.amazonaws.rds#IntegerOptional", "traits": { - "smithy.api#documentation": "

The interval, in seconds, between points when Enhanced Monitoring metrics are collected for the DB cluster.

\n

This setting is only for non-Aurora Multi-AZ DB clusters.

" + "smithy.api#documentation": "

The interval, in seconds, between points when Enhanced Monitoring metrics are collected for the DB cluster.

\n

This setting is only for -Aurora DB clusters and Multi-AZ DB clusters.

" } }, "MonitoringRoleArn": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

The ARN for the IAM role that permits RDS to send Enhanced Monitoring metrics to Amazon CloudWatch Logs.

\n

This setting is only for non-Aurora Multi-AZ DB clusters.

" + "smithy.api#documentation": "

The ARN for the IAM role that permits RDS to send Enhanced Monitoring metrics to Amazon CloudWatch Logs.

\n

This setting is only for Aurora DB clusters and Multi-AZ DB clusters.

" } }, "DatabaseInsightsMode": { "target": "com.amazonaws.rds#DatabaseInsightsMode", "traits": { - "smithy.api#documentation": "

The mode of Database Insights that is enabled for the cluster.

" + "smithy.api#documentation": "

The mode of Database Insights that is enabled for the DB cluster.

" } }, "PerformanceInsightsEnabled": { "target": "com.amazonaws.rds#BooleanOptional", "traits": { - "smithy.api#documentation": "

Indicates whether Performance Insights is enabled for the DB cluster.

\n

This setting is only for non-Aurora Multi-AZ DB clusters.

" + "smithy.api#documentation": "

Indicates whether Performance Insights is enabled for the DB cluster.

\n

This setting is only for Aurora DB clusters and Multi-AZ DB clusters.

" } }, "PerformanceInsightsKMSKeyId": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

The Amazon Web Services KMS key identifier for encryption of Performance Insights data.

\n

The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key.

\n

This setting is only for non-Aurora Multi-AZ DB clusters.

" + "smithy.api#documentation": "

The Amazon Web Services KMS key identifier for encryption of Performance Insights data.

\n

The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key.

\n

This setting is only for Aurora DB clusters and Multi-AZ DB clusters.

" } }, "PerformanceInsightsRetentionPeriod": { "target": "com.amazonaws.rds#IntegerOptional", "traits": { - "smithy.api#documentation": "

The number of days to retain Performance Insights data.

\n

This setting is only for non-Aurora Multi-AZ DB clusters.

\n

Valid Values:

\n
    \n
  • \n

    \n 7\n

    \n
  • \n
  • \n

    \n month * 31, where month is a number of months from 1-23. \n Examples: 93 (3 months * 31), 341 (11 months * 31), 589 (19 months * 31)

    \n
  • \n
  • \n

    \n 731\n

    \n
  • \n
\n

Default: 7 days

" + "smithy.api#documentation": "

The number of days to retain Performance Insights data.

\n

This setting is only for Aurora DB clusters and Multi-AZ DB clusters.

\n

Valid Values:

\n
    \n
  • \n

    \n 7\n

    \n
  • \n
  • \n

    \n month * 31, where month is a number of months from 1-23. \n Examples: 93 (3 months * 31), 341 (11 months * 31), 589 (19 months * 31)

    \n
  • \n
  • \n

    \n 731\n

    \n
  • \n
\n

Default: 7 days

" } }, "ServerlessV2ScalingConfiguration": { @@ -14071,13 +14071,13 @@ "Source": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

A specific source to return parameters for.

\n

Valid Values:

\n
    \n
  • \n

    \n customer\n

    \n
  • \n
  • \n

    \n engine\n

    \n
  • \n
  • \n

    \n service\n

    \n
  • \n
" + "smithy.api#documentation": "

A specific source to return parameters for.

\n

Valid Values:

\n
    \n
  • \n

    \n engine-default\n

    \n
  • \n
  • \n

    \n system\n

    \n
  • \n
  • \n

    \n user\n

    \n
  • \n
" } }, "Filters": { "target": "com.amazonaws.rds#FilterList", "traits": { - "smithy.api#documentation": "

This parameter isn't currently supported.

" + "smithy.api#documentation": "

A filter that specifies one or more DB cluster parameters to describe.

\n

The only supported filter is parameter-name. The results list only includes information about the DB cluster parameters with these names.

" } }, "MaxRecords": { @@ -15537,7 +15537,7 @@ "Filters": { "target": "com.amazonaws.rds#FilterList", "traits": { - "smithy.api#documentation": "

This parameter isn't currently supported.

" + "smithy.api#documentation": "

A filter that specifies one or more DB parameters to describe.

\n

The only supported filter is parameter-name. The results list only includes information about the DB parameters with these names.

" } }, "MaxRecords": { @@ -16735,7 +16735,7 @@ "Filters": { "target": "com.amazonaws.rds#FilterList", "traits": { - "smithy.api#documentation": "

This parameter isn't currently supported.

" + "smithy.api#documentation": "

A filter that specifies one or more parameters to describe.

\n

The only supported filter is parameter-name. The results list only includes information about the parameters with these names.

" } }, "MaxRecords": { @@ -21871,7 +21871,7 @@ "AutoMinorVersionUpgrade": { "target": "com.amazonaws.rds#BooleanOptional", "traits": { - "smithy.api#documentation": "

Specifies whether minor engine upgrades are applied automatically to the DB cluster during the maintenance window. \n By default, minor engine upgrades are applied automatically.

\n

Valid for Cluster Type: Multi-AZ DB clusters only

" + "smithy.api#documentation": "

Specifies whether minor engine upgrades are applied automatically to the DB cluster during the maintenance window. \n By default, minor engine upgrades are applied automatically.

\n

Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters

" } }, "MonitoringInterval": { @@ -21889,7 +21889,7 @@ "DatabaseInsightsMode": { "target": "com.amazonaws.rds#DatabaseInsightsMode", "traits": { - "smithy.api#documentation": "

Specifies the mode of Database Insights to enable for the cluster.

" + "smithy.api#documentation": "

Specifies the mode of Database Insights to enable for the DB cluster.

\n

If you change the value from standard to advanced, you must set the \n PerformanceInsightsEnabled parameter to true and the \n PerformanceInsightsRetentionPeriod parameter to 465.

\n

If you change the value from advanced to standard, you must \n set the PerformanceInsightsEnabled parameter to false.

\n

Valid for Cluster Type: Aurora DB clusters only

" } }, "EnablePerformanceInsights": { @@ -21901,13 +21901,13 @@ "PerformanceInsightsKMSKeyId": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

The Amazon Web Services KMS key identifier for encryption of Performance Insights data.

\n

The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key.

\n

If you don't specify a value for PerformanceInsightsKMSKeyId, then Amazon RDS \n uses your default KMS key. There is a default KMS key for your Amazon Web Services account. \n Your Amazon Web Services account has a different default KMS key for each Amazon Web Services Region.

\n

Valid for Cluster Type: Multi-AZ DB clusters only

" + "smithy.api#documentation": "

The Amazon Web Services KMS key identifier for encryption of Performance Insights data.

\n

The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key.

\n

If you don't specify a value for PerformanceInsightsKMSKeyId, then Amazon RDS \n uses your default KMS key. There is a default KMS key for your Amazon Web Services account. \n Your Amazon Web Services account has a different default KMS key for each Amazon Web Services Region.

\n

Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters

" } }, "PerformanceInsightsRetentionPeriod": { "target": "com.amazonaws.rds#IntegerOptional", "traits": { - "smithy.api#documentation": "

The number of days to retain Performance Insights data.

\n

Valid for Cluster Type: Multi-AZ DB clusters only

\n

Valid Values:

\n
    \n
  • \n

    \n 7\n

    \n
  • \n
  • \n

    \n month * 31, where month is a number of months from 1-23. \n Examples: 93 (3 months * 31), 341 (11 months * 31), 589 (19 months * 31)

    \n
  • \n
  • \n

    \n 731\n

    \n
  • \n
\n

Default: 7 days

\n

If you specify a retention period that isn't valid, such as 94, Amazon RDS issues an error.

" + "smithy.api#documentation": "

The number of days to retain Performance Insights data.

\n

Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters

\n

Valid Values:

\n
    \n
  • \n

    \n 7\n

    \n
  • \n
  • \n

    \n month * 31, where month is a number of months from 1-23. \n Examples: 93 (3 months * 31), 341 (11 months * 31), 589 (19 months * 31)

    \n
  • \n
  • \n

    \n 731\n

    \n
  • \n
\n

Default: 7 days

\n

If you specify a retention period that isn't valid, such as 94, Amazon RDS issues an error.

" } }, "ServerlessV2ScalingConfiguration": { @@ -22514,7 +22514,7 @@ "DatabaseInsightsMode": { "target": "com.amazonaws.rds#DatabaseInsightsMode", "traits": { - "smithy.api#documentation": "

Specifies the mode of Database Insights to enable for the instance.

" + "smithy.api#documentation": "

Specifies the mode of Database Insights to enable for the DB instance.

\n

This setting only applies to Amazon Aurora DB instances.

\n \n

Currently, this value is inherited from the DB cluster and can't be changed.

\n
" } }, "EnablePerformanceInsights": { @@ -22538,7 +22538,7 @@ "CloudwatchLogsExportConfiguration": { "target": "com.amazonaws.rds#CloudwatchLogsExportConfiguration", "traits": { - "smithy.api#documentation": "

The log types to be enabled for export to CloudWatch Logs for a \n specific DB instance.

\n

A change to the CloudwatchLogsExportConfiguration parameter is always applied to the DB instance \n immediately. Therefore, the ApplyImmediately parameter has no effect.

\n

This setting doesn't apply to RDS Custom DB instances.

" + "smithy.api#documentation": "

The log types to be enabled for export to CloudWatch Logs for a \n specific DB instance.

\n

A change to the CloudwatchLogsExportConfiguration parameter is always applied to the DB instance \n immediately. Therefore, the ApplyImmediately parameter has no effect.

\n

This setting doesn't apply to RDS Custom DB instances.

\n

The following values are valid for each DB engine:

\n
    \n
  • \n

    Aurora MySQL - audit | error | general | slowquery\n

    \n
  • \n
  • \n

    Aurora PostgreSQL - postgresql\n

    \n
  • \n
  • \n

    RDS for MySQL - error | general | slowquery\n

    \n
  • \n
  • \n

    RDS for PostgreSQL - postgresql | upgrade\n

    \n
  • \n
\n

For more information about exporting CloudWatch Logs for Amazon RDS, see \n Publishing Database Logs to Amazon CloudWatch Logs in the Amazon RDS User Guide.

\n

For more information about exporting CloudWatch Logs for Amazon Aurora, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon Aurora User Guide.

" } }, "ProcessorFeatures": { @@ -28289,7 +28289,7 @@ "AllocatedStorage": { "target": "com.amazonaws.rds#IntegerOptional", "traits": { - "smithy.api#documentation": "

The amount of storage (in gibibytes) to allocate initially for the DB instance. Follow the allocation rules specified in\n CreateDBInstance.

\n \n

Be sure to allocate enough storage for your new DB instance so that the restore operation can succeed. You can also\n allocate additional storage for future growth.

\n
" + "smithy.api#documentation": "

The amount of storage (in gibibytes) to allocate initially for the DB instance. Follow the allocation rules specified in\n CreateDBInstance.

\n

This setting isn't valid for RDS for SQL Server.

\n \n

Be sure to allocate enough storage for your new DB instance so that the restore operation can succeed. You can also\n allocate additional storage for future growth.

\n
" } }, "DedicatedLogVolume": { @@ -28418,7 +28418,7 @@ "AllocatedStorage": { "target": "com.amazonaws.rds#IntegerOptional", "traits": { - "smithy.api#documentation": "

The amount of storage (in gibibytes) to allocate initially for the DB instance.\n Follow the allocation rules specified in CreateDBInstance.

\n \n

Be sure to allocate enough storage for your new DB instance so that the restore operation can succeed.\n You can also allocate additional storage for future growth.

\n
" + "smithy.api#documentation": "

The amount of storage (in gibibytes) to allocate initially for the DB instance.\n Follow the allocation rules specified in CreateDBInstance.

\n

This setting isn't valid for RDS for SQL Server.

\n \n

Be sure to allocate enough storage for your new DB instance so that the restore operation can succeed.\n You can also allocate additional storage for future growth.

\n
" } }, "DBInstanceClass": { @@ -28634,7 +28634,7 @@ "DatabaseInsightsMode": { "target": "com.amazonaws.rds#DatabaseInsightsMode", "traits": { - "smithy.api#documentation": "

Specifies the mode of Database Insights to enable for the instance.

" + "smithy.api#documentation": "

Specifies the mode of Database Insights to enable for the DB instance.

\n

This setting only applies to Amazon Aurora DB instances.

\n \n

Currently, this value is inherited from the DB cluster and can't be changed.

\n
" } }, "EnablePerformanceInsights": { @@ -29179,7 +29179,7 @@ "AllocatedStorage": { "target": "com.amazonaws.rds#IntegerOptional", "traits": { - "smithy.api#documentation": "

The amount of storage (in gibibytes) to allocate initially for the DB instance.\n Follow the allocation rules specified in CreateDBInstance.

\n \n

Be sure to allocate enough storage for your new DB instance so that the restore operation can succeed.\n You can also allocate additional storage for future growth.

\n
" + "smithy.api#documentation": "

The amount of storage (in gibibytes) to allocate initially for the DB instance.\n Follow the allocation rules specified in CreateDBInstance.

\n

This setting isn't valid for RDS for SQL Server.

\n \n

Be sure to allocate enough storage for your new DB instance so that the restore operation can succeed.\n You can also allocate additional storage for future growth.

\n
" } }, "DedicatedLogVolume": { diff --git a/models/resiliencehub.json b/models/resiliencehub.json index e2fc7be896..18f132ed97 100644 --- a/models/resiliencehub.json +++ b/models/resiliencehub.json @@ -225,6 +225,26 @@ } } }, + "com.amazonaws.resiliencehub#Alarm": { + "type": "structure", + "members": { + "alarmArn": { + "target": "com.amazonaws.resiliencehub#Arn", + "traits": { + "smithy.api#documentation": "

Amazon Resource Name (ARN) of the Amazon CloudWatch alarm.

" + } + }, + "source": { + "target": "com.amazonaws.resiliencehub#String255", + "traits": { + "smithy.api#documentation": "

Indicates the source of the Amazon CloudWatch alarm. That is, it indicates if the\n alarm was created using Resilience Hub recommendation (AwsResilienceHub),\n or if you had created the alarm in Amazon CloudWatch (Customer).

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Indicates the Amazon CloudWatch alarm detected while running an assessment.

" + } + }, "com.amazonaws.resiliencehub#AlarmRecommendation": { "type": "structure", "members": { @@ -683,7 +703,7 @@ "complianceStatus": { "target": "com.amazonaws.resiliencehub#ComplianceStatus", "traits": { - "smithy.api#documentation": "

Current\n status of compliance for the resiliency policy.

" + "smithy.api#documentation": "

Current status of compliance for the resiliency policy.

" } }, "cost": { @@ -1146,7 +1166,7 @@ "appComponents": { "target": "com.amazonaws.resiliencehub#AppComponentNameList", "traits": { - "smithy.api#documentation": "

Indicates the Application Components (AppComponents) that were assessed as part of the\n assessnent and are associated with the identified risk and recommendation.

\n \n

This property is available only in the US East (N. Virginia) Region.

\n
" + "smithy.api#documentation": "

Indicates the Application Components (AppComponents) that were assessed as part of the\n assessment and are associated with the identified risk and recommendation.

\n \n

This property is available only in the US East (N. Virginia) Region.

\n
" } } }, @@ -2465,6 +2485,12 @@ "smithy.api#required": {} } }, + "appComponentId": { + "target": "com.amazonaws.resiliencehub#EntityName255", + "traits": { + "smithy.api#documentation": "

Indicates the identifier of an AppComponent.

" + } + }, "excludeReason": { "target": "com.amazonaws.resiliencehub#ExcludeRecommendationReason", "traits": { @@ -2549,7 +2575,7 @@ "diffType": { "target": "com.amazonaws.resiliencehub#DifferenceType", "traits": { - "smithy.api#documentation": "

Difference type between actual and expected recovery point objective (RPO) and recovery\n time objective (RTO) values. Currently, Resilience Hub supports only\n NotEqual difference type.

" + "smithy.api#documentation": "

Difference type between actual and expected recovery point objective (RPO) and recovery\n time objective (RTO) values. Currently, Resilience Hub supports only\n NotEqual difference type.

" } } }, @@ -5422,6 +5448,26 @@ } } }, + "com.amazonaws.resiliencehub#Experiment": { + "type": "structure", + "members": { + "experimentArn": { + "target": "com.amazonaws.resiliencehub#String255", + "traits": { + "smithy.api#documentation": "

Amazon Resource Name (ARN) of the FIS experiment.

" + } + }, + "experimentTemplateId": { + "target": "com.amazonaws.resiliencehub#String255", + "traits": { + "smithy.api#documentation": "

Identifier of the FIS experiment template.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Indicates the FIS experiment detected while running an assessment.

" + } + }, "com.amazonaws.resiliencehub#FailedGroupingRecommendationEntries": { "type": "list", "member": { @@ -7987,7 +8033,7 @@ "invokerRoleName": { "target": "com.amazonaws.resiliencehub#IamRoleName", "traits": { - "smithy.api#documentation": "

Existing Amazon Web Services\n IAM role name in the primary Amazon Web Services account that will be assumed by\n Resilience Hub Service Principle to obtain a read-only access to your application\n resources while running an assessment.

\n \n
    \n
  • \n

    You must have iam:passRole permission for this role while creating or\n updating the application.

    \n
  • \n
  • \n

    Currently, invokerRoleName accepts only [A-Za-z0-9_+=,.@-]\n characters.

    \n
  • \n
\n
" + "smithy.api#documentation": "

Existing Amazon Web Services\n IAM role name in the primary Amazon Web Services account that will be assumed by\n Resilience Hub Service Principle to obtain a read-only access to your application\n resources while running an assessment.

\n

If your IAM role includes a path, you must include the path in the invokerRoleName parameter. \n For example, if your IAM role's ARN is arn:aws:iam:123456789012:role/my-path/role-name, you should pass my-path/role-name.\n

\n \n
    \n
  • \n

    You must have iam:passRole permission for this role while creating or\n updating the application.

    \n
  • \n
  • \n

    Currently, invokerRoleName accepts only [A-Za-z0-9_+=,.@-]\n characters.

    \n
  • \n
\n
" } }, "crossAccountRoleArns": { @@ -8427,6 +8473,18 @@ "traits": { "smithy.api#documentation": "

Indicates the reason for excluding an operational recommendation.

" } + }, + "latestDiscoveredExperiment": { + "target": "com.amazonaws.resiliencehub#Experiment", + "traits": { + "smithy.api#documentation": "

Indicates the experiment created in FIS that was discovered by Resilience Hub, which matches the recommendation.

" + } + }, + "discoveredAlarm": { + "target": "com.amazonaws.resiliencehub#Alarm", + "traits": { + "smithy.api#documentation": "

Indicates the previously implemented Amazon CloudWatch alarm discovered by Resilience Hub.

" + } } }, "traits": { @@ -9205,7 +9263,7 @@ "hasMoreErrors": { "target": "com.amazonaws.resiliencehub#BooleanOptional", "traits": { - "smithy.api#documentation": "

This indicates if there are more errors not listed in the\n resourceErrors\n list.

" + "smithy.api#documentation": "

This indicates if there are more errors not listed in the resourceErrors\n list.

" } } }, @@ -10204,6 +10262,12 @@ "smithy.api#required": {} } }, + "appComponentId": { + "target": "com.amazonaws.resiliencehub#EntityName255", + "traits": { + "smithy.api#documentation": "

Indicates the identifier of the AppComponent.

" + } + }, "appComponentName": { "target": "com.amazonaws.resiliencehub#EntityId", "traits": { @@ -10924,6 +10988,12 @@ "smithy.api#required": {} } }, + "appComponentId": { + "target": "com.amazonaws.resiliencehub#EntityName255", + "traits": { + "smithy.api#documentation": "

Indicates the identifier of the AppComponent.

" + } + }, "excludeReason": { "target": "com.amazonaws.resiliencehub#ExcludeRecommendationReason", "traits": { diff --git a/models/sagemaker.json b/models/sagemaker.json index 81d8031897..2f9964da63 100644 --- a/models/sagemaker.json +++ b/models/sagemaker.json @@ -701,7 +701,7 @@ } }, "traits": { - "smithy.api#documentation": "

Specifies the training algorithm to use in a CreateTrainingJob request.

\n

For more information about algorithms provided by SageMaker, see Algorithms. For\n information about using your own algorithms, see Using Your Own Algorithms with\n Amazon SageMaker.

" + "smithy.api#documentation": "

Specifies the training algorithm to use in a CreateTrainingJob request.

\n \n

SageMaker uses its own SageMaker account credentials to pull and access built-in algorithms\n so built-in algorithms are universally accessible across all Amazon Web Services accounts. As a\n result, built-in algorithms have standard, unrestricted access. You cannot restrict\n built-in algorithms using IAM roles. Use custom algorithms if you require specific\n access controls.

\n
\n

For more information about algorithms provided by SageMaker, see Algorithms. For\n information about using your own algorithms, see Using Your Own Algorithms with\n Amazon SageMaker.

" } }, "com.amazonaws.sagemaker#AlgorithmStatus": { @@ -1009,7 +1009,7 @@ } }, "traits": { - "smithy.api#documentation": "

Details about an Amazon SageMaker app.

" + "smithy.api#documentation": "

Details about an Amazon SageMaker AI app.

" } }, "com.amazonaws.sagemaker#AppImageConfigArn": { @@ -1052,7 +1052,7 @@ "KernelGatewayImageConfig": { "target": "com.amazonaws.sagemaker#KernelGatewayImageConfig", "traits": { - "smithy.api#documentation": "

The configuration for the file system and kernels in the SageMaker image.

" + "smithy.api#documentation": "

The configuration for the file system and kernels in the SageMaker AI image.

" } }, "JupyterLabAppImageConfig": { @@ -1069,7 +1069,7 @@ } }, "traits": { - "smithy.api#documentation": "

The configuration for running a SageMaker image as a KernelGateway app.

" + "smithy.api#documentation": "

The configuration for running a SageMaker AI image as a KernelGateway app.

" } }, "com.amazonaws.sagemaker#AppImageConfigList": { @@ -4448,7 +4448,7 @@ "target": "com.amazonaws.sagemaker#AutoMLS3DataType", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The data type.

\n
    \n
  • \n

    If you choose S3Prefix, S3Uri identifies a key name\n prefix. SageMaker uses all objects that match the specified key name prefix\n for model training.

    \n

    The S3Prefix should have the following format:

    \n

    \n s3://DOC-EXAMPLE-BUCKET/DOC-EXAMPLE-FOLDER-OR-FILE\n

    \n
  • \n
  • \n

    If you choose ManifestFile, S3Uri identifies an object\n that is a manifest file containing a list of object keys that you want SageMaker to use for model training.

    \n

    A ManifestFile should have the format shown below:

    \n

    \n [ {\"prefix\":\n \"s3://DOC-EXAMPLE-BUCKET/DOC-EXAMPLE-FOLDER/DOC-EXAMPLE-PREFIX/\"}, \n

    \n

    \n \"DOC-EXAMPLE-RELATIVE-PATH/DOC-EXAMPLE-FOLDER/DATA-1\",\n

    \n

    \n \"DOC-EXAMPLE-RELATIVE-PATH/DOC-EXAMPLE-FOLDER/DATA-2\",\n

    \n

    \n ... \"DOC-EXAMPLE-RELATIVE-PATH/DOC-EXAMPLE-FOLDER/DATA-N\" ]\n

    \n
  • \n
  • \n

    If you choose AugmentedManifestFile, S3Uri identifies an\n object that is an augmented manifest file in JSON lines format. This file contains\n the data you want to use for model training. AugmentedManifestFile is\n available for V2 API jobs only (for example, for jobs created by calling\n CreateAutoMLJobV2).

    \n

    Here is a minimal, single-record example of an\n AugmentedManifestFile:

    \n

    \n {\"source-ref\":\n \"s3://DOC-EXAMPLE-BUCKET/DOC-EXAMPLE-FOLDER/cats/cat.jpg\",\n

    \n

    \n \"label-metadata\": {\"class-name\": \"cat\" }

    \n

    For more information on AugmentedManifestFile, see Provide\n Dataset Metadata to Training Jobs with an Augmented Manifest File.

    \n
  • \n
", + "smithy.api#documentation": "

The data type.

\n
    \n
  • \n

    If you choose S3Prefix, S3Uri identifies a key name\n prefix. SageMaker AI uses all objects that match the specified key name prefix\n for model training.

    \n

    The S3Prefix should have the following format:

    \n

    \n s3://DOC-EXAMPLE-BUCKET/DOC-EXAMPLE-FOLDER-OR-FILE\n

    \n
  • \n
  • \n

    If you choose ManifestFile, S3Uri identifies an object\n that is a manifest file containing a list of object keys that you want SageMaker AI to use for model training.

    \n

    A ManifestFile should have the format shown below:

    \n

    \n [ {\"prefix\":\n \"s3://DOC-EXAMPLE-BUCKET/DOC-EXAMPLE-FOLDER/DOC-EXAMPLE-PREFIX/\"}, \n

    \n

    \n \"DOC-EXAMPLE-RELATIVE-PATH/DOC-EXAMPLE-FOLDER/DATA-1\",\n

    \n

    \n \"DOC-EXAMPLE-RELATIVE-PATH/DOC-EXAMPLE-FOLDER/DATA-2\",\n

    \n

    \n ... \"DOC-EXAMPLE-RELATIVE-PATH/DOC-EXAMPLE-FOLDER/DATA-N\" ]\n

    \n
  • \n
  • \n

    If you choose AugmentedManifestFile, S3Uri identifies an\n object that is an augmented manifest file in JSON lines format. This file contains\n the data you want to use for model training. AugmentedManifestFile is\n available for V2 API jobs only (for example, for jobs created by calling\n CreateAutoMLJobV2).

    \n

    Here is a minimal, single-record example of an\n AugmentedManifestFile:

    \n

    \n {\"source-ref\":\n \"s3://DOC-EXAMPLE-BUCKET/DOC-EXAMPLE-FOLDER/cats/cat.jpg\",\n

    \n

    \n \"label-metadata\": {\"class-name\": \"cat\" }

    \n

    For more information on AugmentedManifestFile, see Provide\n Dataset Metadata to Training Jobs with an Augmented Manifest File.

    \n
  • \n
", "smithy.api#required": {} } }, @@ -5601,18 +5601,18 @@ "CsvContentTypes": { "target": "com.amazonaws.sagemaker#CsvContentTypes", "traits": { - "smithy.api#documentation": "

The list of all content type headers that Amazon SageMaker will treat as CSV and\n capture accordingly.

" + "smithy.api#documentation": "

The list of all content type headers that Amazon SageMaker AI will treat as CSV and\n capture accordingly.

" } }, "JsonContentTypes": { "target": "com.amazonaws.sagemaker#JsonContentTypes", "traits": { - "smithy.api#documentation": "

The list of all content type headers that SageMaker will treat as JSON and\n capture accordingly.

" + "smithy.api#documentation": "

The list of all content type headers that SageMaker AI will treat as JSON and\n capture accordingly.

" } } }, "traits": { - "smithy.api#documentation": "

Configuration specifying how to treat different headers. If no headers are specified\n Amazon SageMaker will by default base64 encode when capturing the data.

" + "smithy.api#documentation": "

Configuration specifying how to treat different headers. If no headers are specified\n Amazon SageMaker AI will by default base64 encode when capturing the data.

" } }, "com.amazonaws.sagemaker#CaptureMode": { @@ -6873,7 +6873,8 @@ "type": "integer", "traits": { "smithy.api#range": { - "min": 0 + "min": 0, + "max": 6758 } } }, @@ -7528,6 +7529,168 @@ "traits": { "smithy.api#enumValue": "ml.trn2.48xlarge" } + }, + "ML_C6I_LARGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ml.c6i.large" + } + }, + "ML_C6I_XLARGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ml.c6i.xlarge" + } + }, + "ML_C6I_2XLARGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ml.c6i.2xlarge" + } + }, + "ML_C6I_4XLARGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ml.c6i.4xlarge" + } + }, + "ML_C6I_8XLARGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ml.c6i.8xlarge" + } + }, + "ML_C6I_12XLARGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ml.c6i.12xlarge" + } + }, + "ML_C6I_16XLARGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ml.c6i.16xlarge" + } + }, + "ML_C6I_24XLARGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ml.c6i.24xlarge" + } + }, + "ML_C6I_32XLARGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ml.c6i.32xlarge" + } + }, + "ML_M6I_LARGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ml.m6i.large" + } + }, + "ML_M6I_XLARGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ml.m6i.xlarge" + } + }, + "ML_M6I_2XLARGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ml.m6i.2xlarge" + } + }, + "ML_M6I_4XLARGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ml.m6i.4xlarge" + } + }, + "ML_M6I_8XLARGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ml.m6i.8xlarge" + } + }, + "ML_M6I_12XLARGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ml.m6i.12xlarge" + } + }, + "ML_M6I_16XLARGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ml.m6i.16xlarge" + } + }, + "ML_M6I_24XLARGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ml.m6i.24xlarge" + } + }, + "ML_M6I_32XLARGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ml.m6i.32xlarge" + } + }, + "ML_R6I_LARGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ml.r6i.large" + } + }, + "ML_R6I_XLARGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ml.r6i.xlarge" + } + }, + "ML_R6I_2XLARGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ml.r6i.2xlarge" + } + }, + "ML_R6I_4XLARGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ml.r6i.4xlarge" + } + }, + "ML_R6I_8XLARGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ml.r6i.8xlarge" + } + }, + "ML_R6I_12XLARGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ml.r6i.12xlarge" + } + }, + "ML_R6I_16XLARGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ml.r6i.16xlarge" + } + }, + "ML_R6I_24XLARGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ml.r6i.24xlarge" + } + }, + "ML_R6I_32XLARGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ml.r6i.32xlarge" + } } } }, @@ -8110,7 +8273,7 @@ } }, "traits": { - "smithy.api#documentation": "

A Git repository that SageMaker automatically displays to users for cloning in the\n JupyterServer application.

" + "smithy.api#documentation": "

A Git repository that SageMaker AI automatically displays to users for cloning in the\n JupyterServer application.

" } }, "com.amazonaws.sagemaker#CodeRepositoryArn": { @@ -9523,7 +9686,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates a running app for the specified UserProfile. This operation is automatically\n invoked by Amazon SageMaker upon access to the associated Domain, and when new kernel\n configurations are selected by the user. A user may have multiple Apps active\n simultaneously.

" + "smithy.api#documentation": "

Creates a running app for the specified UserProfile. This operation is automatically\n invoked by Amazon SageMaker AI upon access to the associated Domain, and when new kernel\n configurations are selected by the user. A user may have multiple Apps active\n simultaneously.

" } }, "com.amazonaws.sagemaker#CreateAppImageConfig": { @@ -9540,7 +9703,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates a configuration for running a SageMaker image as a KernelGateway app. The\n configuration specifies the Amazon Elastic File System storage volume on the image, and a list of the\n kernels in the image.

" + "smithy.api#documentation": "

Creates a configuration for running a SageMaker AI image as a KernelGateway app. The\n configuration specifies the Amazon Elastic File System storage volume on the image, and a list of the\n kernels in the image.

" } }, "com.amazonaws.sagemaker#CreateAppImageConfigRequest": { @@ -9645,7 +9808,7 @@ "ResourceSpec": { "target": "com.amazonaws.sagemaker#ResourceSpec", "traits": { - "smithy.api#documentation": "

The instance type and the Amazon Resource Name (ARN) of the SageMaker image\n created on the instance.

\n \n

The value of InstanceType passed as part of the ResourceSpec\n in the CreateApp call overrides the value passed as part of the\n ResourceSpec configured for the user profile or the domain. If\n InstanceType is not specified in any of those three ResourceSpec\n values for a KernelGateway app, the CreateApp call fails with a\n request validation error.

\n
" + "smithy.api#documentation": "

The instance type and the Amazon Resource Name (ARN) of the SageMaker AI image\n created on the instance.

\n \n

The value of InstanceType passed as part of the ResourceSpec\n in the CreateApp call overrides the value passed as part of the\n ResourceSpec configured for the user profile or the domain. If\n InstanceType is not specified in any of those three ResourceSpec\n values for a KernelGateway app, the CreateApp call fails with a\n request validation error.

\n
" } } }, @@ -9760,7 +9923,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates an Autopilot job also referred to as Autopilot experiment or AutoML job.

\n

An AutoML job in SageMaker is a fully automated process that allows you to build machine\n learning models with minimal effort and machine learning expertise. When initiating an\n AutoML job, you provide your data and optionally specify parameters tailored to your use\n case. SageMaker then automates the entire model development lifecycle, including data\n preprocessing, model training, tuning, and evaluation. AutoML jobs are designed to simplify\n and accelerate the model building process by automating various tasks and exploring\n different combinations of machine learning algorithms, data preprocessing techniques, and\n hyperparameter values. The output of an AutoML job comprises one or more trained models\n ready for deployment and inference. Additionally, SageMaker AutoML jobs generate a candidate\n model leaderboard, allowing you to select the best-performing model for deployment.

\n

For more information about AutoML jobs, see https://docs.aws.amazon.com/sagemaker/latest/dg/autopilot-automate-model-development.html\n in the SageMaker developer guide.

\n \n

We recommend using the new versions CreateAutoMLJobV2 and DescribeAutoMLJobV2, which offer backward compatibility.

\n

\n CreateAutoMLJobV2 can manage tabular problem types identical to those of\n its previous version CreateAutoMLJob, as well as time-series forecasting,\n non-tabular problem types such as image or text classification, and text generation\n (LLMs fine-tuning).

\n

Find guidelines about how to migrate a CreateAutoMLJob to\n CreateAutoMLJobV2 in Migrate a CreateAutoMLJob to CreateAutoMLJobV2.

\n
\n

You can find the best-performing model after you run an AutoML job by calling DescribeAutoMLJobV2 (recommended) or DescribeAutoMLJob.

" + "smithy.api#documentation": "

Creates an Autopilot job also referred to as Autopilot experiment or AutoML job.

\n

An AutoML job in SageMaker AI is a fully automated process that allows you to build machine\n learning models with minimal effort and machine learning expertise. When initiating an\n AutoML job, you provide your data and optionally specify parameters tailored to your use\n case. SageMaker AI then automates the entire model development lifecycle, including data\n preprocessing, model training, tuning, and evaluation. AutoML jobs are designed to simplify\n and accelerate the model building process by automating various tasks and exploring\n different combinations of machine learning algorithms, data preprocessing techniques, and\n hyperparameter values. The output of an AutoML job comprises one or more trained models\n ready for deployment and inference. Additionally, SageMaker AI AutoML jobs generate a candidate\n model leaderboard, allowing you to select the best-performing model for deployment.

\n

For more information about AutoML jobs, see https://docs.aws.amazon.com/sagemaker/latest/dg/autopilot-automate-model-development.html\n in the SageMaker AI developer guide.

\n \n

We recommend using the new versions CreateAutoMLJobV2 and DescribeAutoMLJobV2, which offer backward compatibility.

\n

\n CreateAutoMLJobV2 can manage tabular problem types identical to those of\n its previous version CreateAutoMLJob, as well as time-series forecasting,\n non-tabular problem types such as image or text classification, and text generation\n (LLMs fine-tuning).

\n

Find guidelines about how to migrate a CreateAutoMLJob to\n CreateAutoMLJobV2 in Migrate a CreateAutoMLJob to CreateAutoMLJobV2.

\n
\n

You can find the best-performing model after you run an AutoML job by calling DescribeAutoMLJobV2 (recommended) or DescribeAutoMLJob.

" } }, "com.amazonaws.sagemaker#CreateAutoMLJobRequest": { @@ -9872,7 +10035,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates an Autopilot job also referred to as Autopilot experiment or AutoML job V2.

\n

An AutoML job in SageMaker is a fully automated process that allows you to build machine\n learning models with minimal effort and machine learning expertise. When initiating an\n AutoML job, you provide your data and optionally specify parameters tailored to your use\n case. SageMaker then automates the entire model development lifecycle, including data\n preprocessing, model training, tuning, and evaluation. AutoML jobs are designed to simplify\n and accelerate the model building process by automating various tasks and exploring\n different combinations of machine learning algorithms, data preprocessing techniques, and\n hyperparameter values. The output of an AutoML job comprises one or more trained models\n ready for deployment and inference. Additionally, SageMaker AutoML jobs generate a candidate\n model leaderboard, allowing you to select the best-performing model for deployment.

\n

For more information about AutoML jobs, see https://docs.aws.amazon.com/sagemaker/latest/dg/autopilot-automate-model-development.html\n in the SageMaker developer guide.

\n

AutoML jobs V2 support various problem types such as regression, binary, and multiclass\n classification with tabular data, text and image classification, time-series forecasting,\n and fine-tuning of large language models (LLMs) for text generation.

\n \n

\n CreateAutoMLJobV2 and DescribeAutoMLJobV2 are new versions of CreateAutoMLJob\n and DescribeAutoMLJob which offer backward compatibility.

\n

\n CreateAutoMLJobV2 can manage tabular problem types identical to those of\n its previous version CreateAutoMLJob, as well as time-series forecasting,\n non-tabular problem types such as image or text classification, and text generation\n (LLMs fine-tuning).

\n

Find guidelines about how to migrate a CreateAutoMLJob to\n CreateAutoMLJobV2 in Migrate a CreateAutoMLJob to CreateAutoMLJobV2.

\n
\n

For the list of available problem types supported by CreateAutoMLJobV2, see\n AutoMLProblemTypeConfig.

\n

You can find the best-performing model after you run an AutoML job V2 by calling DescribeAutoMLJobV2.

" + "smithy.api#documentation": "

Creates an Autopilot job also referred to as Autopilot experiment or AutoML job V2.

\n

An AutoML job in SageMaker AI is a fully automated process that allows you to build machine\n learning models with minimal effort and machine learning expertise. When initiating an\n AutoML job, you provide your data and optionally specify parameters tailored to your use\n case. SageMaker AI then automates the entire model development lifecycle, including data\n preprocessing, model training, tuning, and evaluation. AutoML jobs are designed to simplify\n and accelerate the model building process by automating various tasks and exploring\n different combinations of machine learning algorithms, data preprocessing techniques, and\n hyperparameter values. The output of an AutoML job comprises one or more trained models\n ready for deployment and inference. Additionally, SageMaker AI AutoML jobs generate a candidate\n model leaderboard, allowing you to select the best-performing model for deployment.

\n

For more information about AutoML jobs, see https://docs.aws.amazon.com/sagemaker/latest/dg/autopilot-automate-model-development.html\n in the SageMaker AI developer guide.

\n

AutoML jobs V2 support various problem types such as regression, binary, and multiclass\n classification with tabular data, text and image classification, time-series forecasting,\n and fine-tuning of large language models (LLMs) for text generation.

\n \n

\n CreateAutoMLJobV2 and DescribeAutoMLJobV2 are new versions of CreateAutoMLJob\n and DescribeAutoMLJob which offer backward compatibility.

\n

\n CreateAutoMLJobV2 can manage tabular problem types identical to those of\n its previous version CreateAutoMLJob, as well as time-series forecasting,\n non-tabular problem types such as image or text classification, and text generation\n (LLMs fine-tuning).

\n

Find guidelines about how to migrate a CreateAutoMLJob to\n CreateAutoMLJobV2 in Migrate a CreateAutoMLJob to CreateAutoMLJobV2.

\n
\n

For the list of available problem types supported by CreateAutoMLJobV2, see\n AutoMLProblemTypeConfig.

\n

You can find the best-performing model after you run an AutoML job V2 by calling DescribeAutoMLJobV2.

" } }, "com.amazonaws.sagemaker#CreateAutoMLJobV2Request": { @@ -10153,7 +10316,7 @@ "target": "com.amazonaws.sagemaker#CreateCodeRepositoryOutput" }, "traits": { - "smithy.api#documentation": "

Creates a Git repository as a resource in your SageMaker account. You can\n associate the repository with notebook instances so that you can use Git source control\n for the notebooks you create. The Git repository is a resource in your SageMaker\n account, so it can be associated with more than one notebook instance, and it persists\n independently from the lifecycle of any notebook instances it is associated with.

\n

The repository can be hosted either in Amazon Web Services CodeCommit\n or in any other Git repository.

" + "smithy.api#documentation": "

Creates a Git repository as a resource in your SageMaker AI account. You can\n associate the repository with notebook instances so that you can use Git source control\n for the notebooks you create. The Git repository is a resource in your SageMaker AI\n account, so it can be associated with more than one notebook instance, and it persists\n independently from the lifecycle of any notebook instances it is associated with.

\n

The repository can be hosted either in Amazon Web Services CodeCommit\n or in any other Git repository.

" } }, "com.amazonaws.sagemaker#CreateCodeRepositoryInput": { @@ -10219,7 +10382,7 @@ } ], "traits": { - "smithy.api#documentation": "

Starts a model compilation job. After the model has been compiled, Amazon SageMaker saves the\n resulting model artifacts to an Amazon Simple Storage Service (Amazon S3) bucket that you specify.

\n

If\n you choose to host your model using Amazon SageMaker hosting services, you can use the resulting\n model artifacts as part of the model. You can also use the artifacts with\n Amazon Web Services IoT Greengrass. In that case, deploy them as an ML\n resource.

\n

In the request body, you provide the following:

\n
    \n
  • \n

    A name for the compilation job

    \n
  • \n
  • \n

    Information about the input model artifacts

    \n
  • \n
  • \n

    The output location for the compiled model and the device (target) that the\n model runs on

    \n
  • \n
  • \n

    The Amazon Resource Name (ARN) of the IAM role that Amazon SageMaker assumes to perform\n the model compilation job.

    \n
  • \n
\n

You can also provide a Tag to track the model compilation job's resource\n use and costs. The response body contains the\n CompilationJobArn\n for the compiled job.

\n

To stop a model compilation job, use StopCompilationJob. To get information about a particular model compilation\n job, use DescribeCompilationJob. To get information about multiple model compilation\n jobs, use ListCompilationJobs.

" + "smithy.api#documentation": "

Starts a model compilation job. After the model has been compiled, Amazon SageMaker AI saves the\n resulting model artifacts to an Amazon Simple Storage Service (Amazon S3) bucket that you specify.

\n

If\n you choose to host your model using Amazon SageMaker AI hosting services, you can use the resulting\n model artifacts as part of the model. You can also use the artifacts with\n Amazon Web Services IoT Greengrass. In that case, deploy them as an ML\n resource.

\n

In the request body, you provide the following:

\n
    \n
  • \n

    A name for the compilation job

    \n
  • \n
  • \n

    Information about the input model artifacts

    \n
  • \n
  • \n

    The output location for the compiled model and the device (target) that the\n model runs on

    \n
  • \n
  • \n

    The Amazon Resource Name (ARN) of the IAM role that Amazon SageMaker AI assumes to perform\n the model compilation job.

    \n
  • \n
\n

You can also provide a Tag to track the model compilation job's resource\n use and costs. The response body contains the\n CompilationJobArn\n for the compiled job.

\n

To stop a model compilation job, use StopCompilationJob. To get information about a particular model compilation\n job, use DescribeCompilationJob. To get information about multiple model compilation\n jobs, use ListCompilationJobs.

" } }, "com.amazonaws.sagemaker#CreateCompilationJobRequest": { @@ -10237,7 +10400,7 @@ "target": "com.amazonaws.sagemaker#RoleArn", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of an IAM role that enables Amazon SageMaker to perform tasks on\n your behalf.

\n

During model compilation, Amazon SageMaker needs your permission to:

\n
    \n
  • \n

    Read input data from an S3 bucket

    \n
  • \n
  • \n

    Write model artifacts to an S3 bucket

    \n
  • \n
  • \n

    Write logs to Amazon CloudWatch Logs

    \n
  • \n
  • \n

    Publish metrics to Amazon CloudWatch

    \n
  • \n
\n

You grant permissions for all of these tasks to an IAM role. To pass this role to\n Amazon SageMaker, the caller of this API must have the iam:PassRole permission. For\n more information, see Amazon SageMaker\n Roles.\n

", + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of an IAM role that enables Amazon SageMaker AI to perform tasks on\n your behalf.

\n

During model compilation, Amazon SageMaker AI needs your permission to:

\n
    \n
  • \n

    Read input data from an S3 bucket

    \n
  • \n
  • \n

    Write model artifacts to an S3 bucket

    \n
  • \n
  • \n

    Write logs to Amazon CloudWatch Logs

    \n
  • \n
  • \n

    Publish metrics to Amazon CloudWatch

    \n
  • \n
\n

You grant permissions for all of these tasks to an IAM role. To pass this role to\n Amazon SageMaker AI, the caller of this API must have the iam:PassRole permission. For\n more information, see Amazon SageMaker AI\n Roles.\n

", "smithy.api#required": {} } }, @@ -10271,7 +10434,7 @@ "target": "com.amazonaws.sagemaker#StoppingCondition", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

Specifies a limit to how long a model compilation job can run. When the job reaches\n the time limit, Amazon SageMaker ends the compilation job. Use this API to cap model training\n costs.

", + "smithy.api#documentation": "

Specifies a limit to how long a model compilation job can run. When the job reaches\n the time limit, Amazon SageMaker AI ends the compilation job. Use this API to cap model training\n costs.

", "smithy.api#required": {} } }, @@ -10293,7 +10456,7 @@ "target": "com.amazonaws.sagemaker#CompilationJobArn", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

If the action is successful, the service sends back an HTTP 200 response. Amazon SageMaker returns\n the following data in JSON format:

\n
    \n
  • \n

    \n CompilationJobArn: The Amazon Resource Name (ARN) of the compiled\n job.

    \n
  • \n
", + "smithy.api#documentation": "

If the action is successful, the service sends back an HTTP 200 response. Amazon SageMaker AI returns\n the following data in JSON format:

\n
    \n
  • \n

    \n CompilationJobArn: The Amazon Resource Name (ARN) of the compiled\n job.

    \n
  • \n
", "smithy.api#required": {} } } @@ -10502,7 +10665,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates a definition for a job that monitors data quality and drift. For information\n about model monitor, see Amazon SageMaker Model\n Monitor.

" + "smithy.api#documentation": "

Creates a definition for a job that monitors data quality and drift. For information\n about model monitor, see Amazon SageMaker AI Model\n Monitor.

" } }, "com.amazonaws.sagemaker#CreateDataQualityJobDefinitionRequest": { @@ -10562,7 +10725,7 @@ "target": "com.amazonaws.sagemaker#RoleArn", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker can \n assume to perform tasks on your behalf.

", + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker AI can \n assume to perform tasks on your behalf.

", "smithy.api#required": {} } }, @@ -10681,7 +10844,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates a Domain. A domain consists of an associated Amazon Elastic File System\n volume, a list of authorized users, and a variety of security, application, policy, and\n Amazon Virtual Private Cloud (VPC) configurations. Users within a domain can share notebook files\n and other artifacts with each other.

\n

\n EFS storage\n

\n

When a domain is created, an EFS volume is created for use by all of the users within the\n domain. Each user receives a private home directory within the EFS volume for notebooks, Git\n repositories, and data files.

\n

SageMaker uses the Amazon Web Services Key Management Service (Amazon Web Services\n KMS) to encrypt the EFS volume attached to the domain with an Amazon Web Services managed key\n by default. For more control, you can specify a customer managed key. For more information,\n see Protect Data\n at Rest Using Encryption.

\n

\n VPC configuration\n

\n

All traffic between the domain and the Amazon EFS volume is through the specified\n VPC and subnets. For other traffic, you can specify the AppNetworkAccessType\n parameter. AppNetworkAccessType corresponds to the network access type that you\n choose when you onboard to the domain. The following options are available:

\n
    \n
  • \n

    \n PublicInternetOnly - Non-EFS traffic goes through a VPC managed by\n Amazon SageMaker, which allows internet access. This is the default value.

    \n
  • \n
  • \n

    \n VpcOnly - All traffic is through the specified VPC and subnets. Internet\n access is disabled by default. To allow internet access, you must specify a NAT\n gateway.

    \n

    When internet access is disabled, you won't be able to run a Amazon SageMaker\n Studio notebook or to train or host models unless your VPC has an interface endpoint to\n the SageMaker API and runtime or a NAT gateway and your security groups allow\n outbound connections.

    \n
  • \n
\n \n

NFS traffic over TCP on port 2049 needs to be allowed in both inbound and outbound rules\n in order to launch a Amazon SageMaker Studio app successfully.

\n
\n

For more information, see Connect Amazon SageMaker Studio Notebooks to Resources in a VPC.

" + "smithy.api#documentation": "

Creates a Domain. A domain consists of an associated Amazon Elastic File System\n volume, a list of authorized users, and a variety of security, application, policy, and\n Amazon Virtual Private Cloud (VPC) configurations. Users within a domain can share notebook files\n and other artifacts with each other.

\n

\n EFS storage\n

\n

When a domain is created, an EFS volume is created for use by all of the users within the\n domain. Each user receives a private home directory within the EFS volume for notebooks, Git\n repositories, and data files.

\n

SageMaker AI uses the Amazon Web Services Key Management Service (Amazon Web Services\n KMS) to encrypt the EFS volume attached to the domain with an Amazon Web Services managed key\n by default. For more control, you can specify a customer managed key. For more information,\n see Protect Data\n at Rest Using Encryption.

\n

\n VPC configuration\n

\n

All traffic between the domain and the Amazon EFS volume is through the specified\n VPC and subnets. For other traffic, you can specify the AppNetworkAccessType\n parameter. AppNetworkAccessType corresponds to the network access type that you\n choose when you onboard to the domain. The following options are available:

\n
    \n
  • \n

    \n PublicInternetOnly - Non-EFS traffic goes through a VPC managed by\n Amazon SageMaker AI, which allows internet access. This is the default value.

    \n
  • \n
  • \n

    \n VpcOnly - All traffic is through the specified VPC and subnets. Internet\n access is disabled by default. To allow internet access, you must specify a NAT\n gateway.

    \n

    When internet access is disabled, you won't be able to run a Amazon SageMaker AI\n Studio notebook or to train or host models unless your VPC has an interface endpoint to\n the SageMaker AI API and runtime or a NAT gateway and your security groups allow\n outbound connections.

    \n
  • \n
\n \n

NFS traffic over TCP on port 2049 needs to be allowed in both inbound and outbound rules\n in order to launch a Amazon SageMaker AI Studio app successfully.

\n
\n

For more information, see Connect Amazon SageMaker AI Studio Notebooks to Resources in a VPC.

" } }, "com.amazonaws.sagemaker#CreateDomainRequest": { @@ -10742,7 +10905,7 @@ "AppNetworkAccessType": { "target": "com.amazonaws.sagemaker#AppNetworkAccessType", "traits": { - "smithy.api#documentation": "

Specifies the VPC used for non-EFS traffic. The default value is\n PublicInternetOnly.

\n
    \n
  • \n

    \n PublicInternetOnly - Non-EFS traffic is through a VPC managed by Amazon SageMaker, which allows direct internet access

    \n
  • \n
  • \n

    \n VpcOnly - All traffic is through the specified VPC and subnets

    \n
  • \n
" + "smithy.api#documentation": "

Specifies the VPC used for non-EFS traffic. The default value is\n PublicInternetOnly.

\n
    \n
  • \n

    \n PublicInternetOnly - Non-EFS traffic is through a VPC managed by Amazon SageMaker AI, which allows direct internet access

    \n
  • \n
  • \n

    \n VpcOnly - All traffic is through the specified VPC and subnets

    \n
  • \n
" } }, "HomeEfsFileSystemKmsKeyId": { @@ -10757,7 +10920,7 @@ "KmsKeyId": { "target": "com.amazonaws.sagemaker#KmsKeyId", "traits": { - "smithy.api#documentation": "

SageMaker uses Amazon Web Services KMS to encrypt EFS and EBS volumes attached to\n the domain with an Amazon Web Services managed key by default. For more control, specify a\n customer managed key.

" + "smithy.api#documentation": "

SageMaker AI uses Amazon Web Services KMS to encrypt EFS and EBS volumes attached to\n the domain with an Amazon Web Services managed key by default. For more control, specify a\n customer managed key.

" } }, "AppSecurityGroupManagement": { @@ -11095,7 +11258,7 @@ "ExecutionRoleArn": { "target": "com.amazonaws.sagemaker#RoleArn", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker can assume to perform actions on your behalf. For more information, see SageMaker\n Roles.

\n \n

To be able to pass this role to Amazon SageMaker, the caller of this action must\n have the iam:PassRole permission.

\n
" + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker AI can assume to perform actions on your behalf. For more information, see SageMaker AI\n Roles.

\n \n

To be able to pass this role to Amazon SageMaker AI, the caller of this action must\n have the iam:PassRole permission.

\n
" } }, "VpcConfig": { @@ -11788,7 +11951,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates a custom SageMaker image. A SageMaker image is a set of image versions. Each image\n version represents a container image stored in Amazon ECR. For more information, see\n Bring your own SageMaker image.

" + "smithy.api#documentation": "

Creates a custom SageMaker AI image. A SageMaker AI image is a set of image versions. Each image\n version represents a container image stored in Amazon ECR. For more information, see\n Bring your own SageMaker AI image.

" } }, "com.amazonaws.sagemaker#CreateImageRequest": { @@ -11818,7 +11981,7 @@ "target": "com.amazonaws.sagemaker#RoleArn", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The ARN of an IAM role that enables Amazon SageMaker to perform tasks on your behalf.

", + "smithy.api#documentation": "

The ARN of an IAM role that enables Amazon SageMaker AI to perform tasks on your behalf.

", "smithy.api#required": {} } }, @@ -11867,7 +12030,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates a version of the SageMaker image specified by ImageName. The version\n represents the Amazon ECR container image specified by BaseImage.

" + "smithy.api#documentation": "

Creates a version of the SageMaker AI image specified by ImageName. The version\n represents the Amazon ECR container image specified by BaseImage.

" } }, "com.amazonaws.sagemaker#CreateImageVersionRequest": { @@ -11913,7 +12076,7 @@ "JobType": { "target": "com.amazonaws.sagemaker#JobType", "traits": { - "smithy.api#documentation": "

Indicates SageMaker job type compatibility.

\n
    \n
  • \n

    \n TRAINING: The image version is compatible with SageMaker training jobs.

    \n
  • \n
  • \n

    \n INFERENCE: The image version is compatible with SageMaker inference jobs.

    \n
  • \n
  • \n

    \n NOTEBOOK_KERNEL: The image version is compatible with SageMaker notebook kernels.

    \n
  • \n
" + "smithy.api#documentation": "

Indicates SageMaker AI job type compatibility.

\n
    \n
  • \n

    \n TRAINING: The image version is compatible with SageMaker AI training jobs.

    \n
  • \n
  • \n

    \n INFERENCE: The image version is compatible with SageMaker AI inference jobs.

    \n
  • \n
  • \n

    \n NOTEBOOK_KERNEL: The image version is compatible with SageMaker AI notebook kernels.

    \n
  • \n
" } }, "MLFramework": { @@ -11979,7 +12142,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates an inference component, which is a SageMaker hosting object that you can\n use to deploy a model to an endpoint. In the inference component settings, you specify the\n model, the endpoint, and how the model utilizes the resources that the endpoint hosts. You\n can optimize resource utilization by tailoring how the required CPU cores, accelerators,\n and memory are allocated. You can deploy multiple inference components to an endpoint,\n where each inference component contains one model and the resource utilization needs for\n that individual model. After you deploy an inference component, you can directly invoke the\n associated model when you use the InvokeEndpoint API action.

" + "smithy.api#documentation": "

Creates an inference component, which is a SageMaker AI hosting object that you can\n use to deploy a model to an endpoint. In the inference component settings, you specify the\n model, the endpoint, and how the model utilizes the resources that the endpoint hosts. You\n can optimize resource utilization by tailoring how the required CPU cores, accelerators,\n and memory are allocated. You can deploy multiple inference components to an endpoint,\n where each inference component contains one model and the resource utilization needs for\n that individual model. After you deploy an inference component, you can directly invoke the\n associated model when you use the InvokeEndpoint API action.

" } }, "com.amazonaws.sagemaker#CreateInferenceComponentInput": { @@ -12573,7 +12736,7 @@ "target": "com.amazonaws.sagemaker#RoleArn", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker can \n assume to perform tasks on your behalf.

", + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker AI can \n assume to perform tasks on your behalf.

", "smithy.api#required": {} } }, @@ -12841,7 +13004,7 @@ "target": "com.amazonaws.sagemaker#RoleArn", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker can \n assume to perform tasks on your behalf.

", + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker AI can \n assume to perform tasks on your behalf.

", "smithy.api#required": {} } }, @@ -13207,7 +13370,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates a definition for a job that monitors model quality and drift. For information\n about model monitor, see Amazon SageMaker Model\n Monitor.

" + "smithy.api#documentation": "

Creates a definition for a job that monitors model quality and drift. For information\n about model monitor, see Amazon SageMaker AI Model\n Monitor.

" } }, "com.amazonaws.sagemaker#CreateModelQualityJobDefinitionRequest": { @@ -13267,7 +13430,7 @@ "target": "com.amazonaws.sagemaker#RoleArn", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker can \n assume to perform tasks on your behalf.

", + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker AI can \n assume to perform tasks on your behalf.

", "smithy.api#required": {} } }, @@ -13318,7 +13481,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates a schedule that regularly starts Amazon SageMaker Processing Jobs to\n monitor the data captured for an Amazon SageMaker Endpoint.

" + "smithy.api#documentation": "

Creates a schedule that regularly starts Amazon SageMaker AI Processing Jobs to\n monitor the data captured for an Amazon SageMaker AI Endpoint.

" } }, "com.amazonaws.sagemaker#CreateMonitoringScheduleRequest": { @@ -13381,7 +13544,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates an SageMaker notebook instance. A notebook instance is a machine\n learning (ML) compute instance running on a Jupyter notebook.

\n

In a CreateNotebookInstance request, specify the type of ML compute\n instance that you want to run. SageMaker launches the instance, installs common\n libraries that you can use to explore datasets for model training, and attaches an ML\n storage volume to the notebook instance.

\n

SageMaker also provides a set of example notebooks. Each notebook\n demonstrates how to use SageMaker with a specific algorithm or with a machine\n learning framework.

\n

After receiving the request, SageMaker does the following:

\n
    \n
  1. \n

    Creates a network interface in the SageMaker VPC.

    \n
  2. \n
  3. \n

    (Option) If you specified SubnetId, SageMaker creates\n a network interface in your own VPC, which is inferred from the subnet ID that\n you provide in the input. When creating this network interface, SageMaker attaches the security group that you specified in the request to the network\n interface that it creates in your VPC.

    \n
  4. \n
  5. \n

    Launches an EC2 instance of the type specified in the request in the\n SageMaker VPC. If you specified SubnetId of your VPC,\n SageMaker specifies both network interfaces when launching this\n instance. This enables inbound traffic from your own VPC to the notebook\n instance, assuming that the security groups allow it.

    \n
  6. \n
\n

After creating the notebook instance, SageMaker returns its Amazon Resource\n Name (ARN). You can't change the name of a notebook instance after you create\n it.

\n

After SageMaker creates the notebook instance, you can connect to the\n Jupyter server and work in Jupyter notebooks. For example, you can write code to explore\n a dataset that you can use for model training, train a model, host models by creating\n SageMaker endpoints, and validate hosted models.

\n

For more information, see How It Works.

" + "smithy.api#documentation": "

Creates an SageMaker AI notebook instance. A notebook instance is a machine\n learning (ML) compute instance running on a Jupyter notebook.

\n

In a CreateNotebookInstance request, specify the type of ML compute\n instance that you want to run. SageMaker AI launches the instance, installs common\n libraries that you can use to explore datasets for model training, and attaches an ML\n storage volume to the notebook instance.

\n

SageMaker AI also provides a set of example notebooks. Each notebook\n demonstrates how to use SageMaker AI with a specific algorithm or with a machine\n learning framework.

\n

After receiving the request, SageMaker AI does the following:

\n
    \n
  1. \n

    Creates a network interface in the SageMaker AI VPC.

    \n
  2. \n
  3. \n

    (Option) If you specified SubnetId, SageMaker AI creates\n a network interface in your own VPC, which is inferred from the subnet ID that\n you provide in the input. When creating this network interface, SageMaker AI attaches the security group that you specified in the request to the network\n interface that it creates in your VPC.

    \n
  4. \n
  5. \n

    Launches an EC2 instance of the type specified in the request in the\n SageMaker AI VPC. If you specified SubnetId of your VPC,\n SageMaker AI specifies both network interfaces when launching this\n instance. This enables inbound traffic from your own VPC to the notebook\n instance, assuming that the security groups allow it.

    \n
  6. \n
\n

After creating the notebook instance, SageMaker AI returns its Amazon Resource\n Name (ARN). You can't change the name of a notebook instance after you create\n it.

\n

After SageMaker AI creates the notebook instance, you can connect to the\n Jupyter server and work in Jupyter notebooks. For example, you can write code to explore\n a dataset that you can use for model training, train a model, host models by creating\n SageMaker AI endpoints, and validate hosted models.

\n

For more information, see How It Works.

" } }, "com.amazonaws.sagemaker#CreateNotebookInstanceInput": { @@ -13419,14 +13582,14 @@ "target": "com.amazonaws.sagemaker#RoleArn", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

When you send any requests to Amazon Web Services resources from the notebook\n instance, SageMaker assumes this role to perform tasks on your behalf. You must\n grant this role necessary permissions so SageMaker can perform these tasks. The\n policy must allow the SageMaker service principal (sagemaker.amazonaws.com)\n permissions to assume this role. For more information, see SageMaker Roles.

\n \n

To be able to pass this role to SageMaker, the caller of this API must\n have the iam:PassRole permission.

\n
", + "smithy.api#documentation": "

When you send any requests to Amazon Web Services resources from the notebook\n instance, SageMaker AI assumes this role to perform tasks on your behalf. You must\n grant this role necessary permissions so SageMaker AI can perform these tasks. The\n policy must allow the SageMaker AI service principal (sagemaker.amazonaws.com)\n permissions to assume this role. For more information, see SageMaker AI Roles.

\n \n

To be able to pass this role to SageMaker AI, the caller of this API must\n have the iam:PassRole permission.

\n
", "smithy.api#required": {} } }, "KmsKeyId": { "target": "com.amazonaws.sagemaker#KmsKeyId", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of a Amazon Web Services Key Management Service key that\n SageMaker uses to encrypt data on the storage volume attached to your\n notebook instance. The KMS key you provide must be enabled. For information, see Enabling and\n Disabling Keys in the Amazon Web Services Key Management Service\n Developer Guide.

" + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of a Amazon Web Services Key Management Service key that\n SageMaker AI uses to encrypt data on the storage volume attached to your\n notebook instance. The KMS key you provide must be enabled. For information, see Enabling and\n Disabling Keys in the Amazon Web Services Key Management Service\n Developer Guide.

" } }, "Tags": { @@ -13444,7 +13607,7 @@ "DirectInternetAccess": { "target": "com.amazonaws.sagemaker#DirectInternetAccess", "traits": { - "smithy.api#documentation": "

Sets whether SageMaker provides internet access to the notebook instance. If\n you set this to Disabled this notebook instance is able to access resources\n only in your VPC, and is not be able to connect to SageMaker training and\n endpoint services unless you configure a NAT Gateway in your VPC.

\n

For more information, see Notebook Instances Are Internet-Enabled by Default. You can set the value\n of this parameter to Disabled only if you set a value for the\n SubnetId parameter.

" + "smithy.api#documentation": "

Sets whether SageMaker AI provides internet access to the notebook instance. If\n you set this to Disabled this notebook instance is able to access resources\n only in your VPC, and is not be able to connect to SageMaker AI training and\n endpoint services unless you configure a NAT Gateway in your VPC.

\n

For more information, see Notebook Instances Are Internet-Enabled by Default. You can set the value\n of this parameter to Disabled only if you set a value for the\n SubnetId parameter.

" } }, "VolumeSizeInGB": { @@ -13462,13 +13625,13 @@ "DefaultCodeRepository": { "target": "com.amazonaws.sagemaker#CodeRepositoryNameOrUrl", "traits": { - "smithy.api#documentation": "

A Git repository to associate with the notebook instance as its default code\n repository. This can be either the name of a Git repository stored as a resource in your\n account, or the URL of a Git repository in Amazon Web Services CodeCommit\n or in any other Git repository. When you open a notebook instance, it opens in the\n directory that contains this repository. For more information, see Associating Git\n Repositories with SageMaker Notebook Instances.

" + "smithy.api#documentation": "

A Git repository to associate with the notebook instance as its default code\n repository. This can be either the name of a Git repository stored as a resource in your\n account, or the URL of a Git repository in Amazon Web Services CodeCommit\n or in any other Git repository. When you open a notebook instance, it opens in the\n directory that contains this repository. For more information, see Associating Git\n Repositories with SageMaker AI Notebook Instances.

" } }, "AdditionalCodeRepositories": { "target": "com.amazonaws.sagemaker#AdditionalCodeRepositoryNamesOrUrls", "traits": { - "smithy.api#documentation": "

An array of up to three Git repositories to associate with the notebook instance.\n These can be either the names of Git repositories stored as resources in your account,\n or the URL of Git repositories in Amazon Web Services CodeCommit\n or in any other Git repository. These repositories are cloned at the same level as the\n default repository of your notebook instance. For more information, see Associating Git\n Repositories with SageMaker Notebook Instances.

" + "smithy.api#documentation": "

An array of up to three Git repositories to associate with the notebook instance.\n These can be either the names of Git repositories stored as resources in your account,\n or the URL of Git repositories in Amazon Web Services CodeCommit\n or in any other Git repository. These repositories are cloned at the same level as the\n default repository of your notebook instance. For more information, see Associating Git\n Repositories with SageMaker AI Notebook Instances.

" } }, "RootAccess": { @@ -13602,7 +13765,7 @@ "target": "com.amazonaws.sagemaker#RoleArn", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of an IAM role that enables Amazon SageMaker to perform tasks on your behalf.

\n

During model optimization, Amazon SageMaker needs your permission to:

\n
    \n
  • \n

    Read input data from an S3 bucket

    \n
  • \n
  • \n

    Write model artifacts to an S3 bucket

    \n
  • \n
  • \n

    Write logs to Amazon CloudWatch Logs

    \n
  • \n
  • \n

    Publish metrics to Amazon CloudWatch

    \n
  • \n
\n

You grant permissions for all of these tasks to an IAM role. To pass this\n role to Amazon SageMaker, the caller of this API must have the\n iam:PassRole permission. For more information, see Amazon SageMaker Roles.\n

", + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of an IAM role that enables Amazon SageMaker AI to perform tasks on your behalf.

\n

During model optimization, Amazon SageMaker AI needs your permission to:

\n
    \n
  • \n

    Read input data from an S3 bucket

    \n
  • \n
  • \n

    Write model artifacts to an S3 bucket

    \n
  • \n
  • \n

    Write logs to Amazon CloudWatch Logs

    \n
  • \n
  • \n

    Publish metrics to Amazon CloudWatch

    \n
  • \n
\n

You grant permissions for all of these tasks to an IAM role. To pass this\n role to Amazon SageMaker AI, the caller of this API must have the\n iam:PassRole permission. For more information, see Amazon SageMaker AI Roles.\n

", "smithy.api#required": {} } }, @@ -13976,7 +14139,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates a URL for a specified UserProfile in a Domain. When accessed in a web browser, the\n user will be automatically signed in to the domain, and granted access to all of the Apps and\n files associated with the Domain's Amazon Elastic File System volume. This operation can only be\n called when the authentication mode equals IAM.

\n

The IAM role or user passed to this API defines the permissions to access\n the app. Once the presigned URL is created, no additional permission is required to access\n this URL. IAM authorization policies for this API are also enforced for every\n HTTP request and WebSocket frame that attempts to connect to the app.

\n

You can restrict access to this API and to the URL that it returns to a list of IP\n addresses, Amazon VPCs or Amazon VPC Endpoints that you specify. For more\n information, see Connect to Amazon SageMaker\n Studio Through an Interface VPC Endpoint .

\n \n
    \n
  • \n

    The URL that you get from a call to CreatePresignedDomainUrl has a\n default timeout of 5 minutes. You can configure this value using\n ExpiresInSeconds. If you try to use the URL after the timeout limit\n expires, you are directed to the Amazon Web Services console sign-in page.

    \n
  • \n
  • \n

    The JupyterLab session default expiration time is 12 hours. You can configure this\n value using SessionExpirationDurationInSeconds.

    \n
  • \n
\n
" + "smithy.api#documentation": "

Creates a URL for a specified UserProfile in a Domain. When accessed in a web browser, the\n user will be automatically signed in to the domain, and granted access to all of the Apps and\n files associated with the Domain's Amazon Elastic File System volume. This operation can only be\n called when the authentication mode equals IAM.

\n

The IAM role or user passed to this API defines the permissions to access\n the app. Once the presigned URL is created, no additional permission is required to access\n this URL. IAM authorization policies for this API are also enforced for every\n HTTP request and WebSocket frame that attempts to connect to the app.

\n

You can restrict access to this API and to the URL that it returns to a list of IP\n addresses, Amazon VPCs or Amazon VPC Endpoints that you specify. For more\n information, see Connect to Amazon SageMaker AI\n Studio Through an Interface VPC Endpoint .

\n \n
    \n
  • \n

    The URL that you get from a call to CreatePresignedDomainUrl has a\n default timeout of 5 minutes. You can configure this value using\n ExpiresInSeconds. If you try to use the URL after the timeout limit\n expires, you are directed to the Amazon Web Services console sign-in page.

    \n
  • \n
  • \n

    The JupyterLab session default expiration time is 12 hours. You can configure this\n value using SessionExpirationDurationInSeconds.

    \n
  • \n
\n
" } }, "com.amazonaws.sagemaker#CreatePresignedDomainUrlRequest": { @@ -14109,7 +14272,7 @@ "target": "com.amazonaws.sagemaker#CreatePresignedNotebookInstanceUrlOutput" }, "traits": { - "smithy.api#documentation": "

Returns a URL that you can use to connect to the Jupyter server from a notebook\n instance. In the SageMaker console, when you choose Open next to a\n notebook instance, SageMaker opens a new tab showing the Jupyter server home\n page from the notebook instance. The console uses this API to get the URL and show the\n page.

\n

The IAM role or user used to call this API defines the permissions to\n access the notebook instance. Once the presigned URL is created, no additional\n permission is required to access this URL. IAM authorization policies for\n this API are also enforced for every HTTP request and WebSocket frame that attempts to\n connect to the notebook instance.

\n

You can restrict access to this API and to the URL that it returns to a list of IP\n addresses that you specify. Use the NotIpAddress condition operator and the\n aws:SourceIP condition context key to specify the list of IP addresses\n that you want to have access to the notebook instance. For more information, see Limit Access to a Notebook Instance by IP Address.

\n \n

The URL that you get from a call to CreatePresignedNotebookInstanceUrl is valid only for 5 minutes. If you\n try to use the URL after the 5-minute limit expires, you are directed to the Amazon Web Services console sign-in page.

\n
" + "smithy.api#documentation": "

Returns a URL that you can use to connect to the Jupyter server from a notebook\n instance. In the SageMaker AI console, when you choose Open next to a\n notebook instance, SageMaker AI opens a new tab showing the Jupyter server home\n page from the notebook instance. The console uses this API to get the URL and show the\n page.

\n

The IAM role or user used to call this API defines the permissions to\n access the notebook instance. Once the presigned URL is created, no additional\n permission is required to access this URL. IAM authorization policies for\n this API are also enforced for every HTTP request and WebSocket frame that attempts to\n connect to the notebook instance.

\n

You can restrict access to this API and to the URL that it returns to a list of IP\n addresses that you specify. Use the NotIpAddress condition operator and the\n aws:SourceIP condition context key to specify the list of IP addresses\n that you want to have access to the notebook instance. For more information, see Limit Access to a Notebook Instance by IP Address.

\n \n

The URL that you get from a call to CreatePresignedNotebookInstanceUrl is valid only for 5 minutes. If you\n try to use the URL after the 5-minute limit expires, you are directed to the Amazon Web Services console sign-in page.

\n
" } }, "com.amazonaws.sagemaker#CreatePresignedNotebookInstanceUrlInput": { @@ -14445,7 +14608,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates a new Amazon SageMaker Studio Lifecycle Configuration.

" + "smithy.api#documentation": "

Creates a new Amazon SageMaker AI Studio Lifecycle Configuration.

" } }, "com.amazonaws.sagemaker#CreateStudioLifecycleConfigRequest": { @@ -14455,7 +14618,7 @@ "target": "com.amazonaws.sagemaker#StudioLifecycleConfigName", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The name of the Amazon SageMaker Studio Lifecycle Configuration to create.

", + "smithy.api#documentation": "

The name of the Amazon SageMaker AI Studio Lifecycle Configuration to create.

", "smithy.api#required": {} } }, @@ -14463,7 +14626,7 @@ "target": "com.amazonaws.sagemaker#StudioLifecycleConfigContent", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The content of your Amazon SageMaker Studio Lifecycle Configuration script. This\n content must be base64 encoded.

", + "smithy.api#documentation": "

The content of your Amazon SageMaker AI Studio Lifecycle Configuration script. This\n content must be base64 encoded.

", "smithy.api#required": {} } }, @@ -15380,7 +15543,7 @@ } }, "traits": { - "smithy.api#documentation": "

A file system, created by you, that you assign to a user profile or space for an Amazon SageMaker Domain. Permitted users can access this file system in Amazon SageMaker\n Studio.

" + "smithy.api#documentation": "

A file system, created by you, that you assign to a user profile or space for an Amazon SageMaker AI Domain. Permitted users can access this file system in Amazon SageMaker AI\n Studio.

" } }, "com.amazonaws.sagemaker#CustomFileSystemConfig": { @@ -15400,7 +15563,7 @@ } }, "traits": { - "smithy.api#documentation": "

The settings for assigning a custom file system to a user profile or space for an Amazon SageMaker Domain. Permitted users can access this file system in Amazon SageMaker\n Studio.

" + "smithy.api#documentation": "

The settings for assigning a custom file system to a user profile or space for an Amazon SageMaker AI Domain. Permitted users can access this file system in Amazon SageMaker AI\n Studio.

" } }, "com.amazonaws.sagemaker#CustomFileSystemConfigs": { @@ -15454,7 +15617,7 @@ } }, "traits": { - "smithy.api#documentation": "

A custom SageMaker image. For more information, see\n Bring your own SageMaker image.

" + "smithy.api#documentation": "

A custom SageMaker AI image. For more information, see\n Bring your own SageMaker AI image.

" } }, "com.amazonaws.sagemaker#CustomImageContainerArguments": { @@ -15612,7 +15775,7 @@ "target": "com.amazonaws.sagemaker#SamplingPercentage", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The percentage of requests SageMaker will capture. A lower value is recommended\n for Endpoints with high traffic.

", + "smithy.api#documentation": "

The percentage of requests SageMaker AI will capture. A lower value is recommended\n for Endpoints with high traffic.

", "smithy.api#required": {} } }, @@ -15627,7 +15790,7 @@ "KmsKeyId": { "target": "com.amazonaws.sagemaker#KmsKeyId", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of an Key Management Service key that SageMaker\n uses to encrypt the captured data at rest using Amazon S3 server-side\n encryption.

\n

The KmsKeyId can be any of the following formats:

\n
    \n
  • \n

    Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab\n

    \n
  • \n
  • \n

    Key ARN:\n arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab\n

    \n
  • \n
  • \n

    Alias name: alias/ExampleAlias\n

    \n
  • \n
  • \n

    Alias name ARN:\n arn:aws:kms:us-west-2:111122223333:alias/ExampleAlias\n

    \n
  • \n
" + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of an Key Management Service key that SageMaker AI\n uses to encrypt the captured data at rest using Amazon S3 server-side\n encryption.

\n

The KmsKeyId can be any of the following formats:

\n
    \n
  • \n

    Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab\n

    \n
  • \n
  • \n

    Key ARN:\n arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab\n

    \n
  • \n
  • \n

    Alias name: alias/ExampleAlias\n

    \n
  • \n
  • \n

    Alias name ARN:\n arn:aws:kms:us-west-2:111122223333:alias/ExampleAlias\n

    \n
  • \n
" } }, "CaptureOptions": { @@ -15641,12 +15804,12 @@ "CaptureContentTypeHeader": { "target": "com.amazonaws.sagemaker#CaptureContentTypeHeader", "traits": { - "smithy.api#documentation": "

Configuration specifying how to treat different headers. If no headers are specified\n SageMaker will by default base64 encode when capturing the data.

" + "smithy.api#documentation": "

Configuration specifying how to treat different headers. If no headers are specified\n SageMaker AI will by default base64 encode when capturing the data.

" } } }, "traits": { - "smithy.api#documentation": "

Configuration to control how SageMaker captures inference data.

" + "smithy.api#documentation": "

Configuration to control how SageMaker AI captures inference data.

" } }, "com.amazonaws.sagemaker#DataCaptureConfigSummary": { @@ -16185,7 +16348,7 @@ "CustomFileSystemConfigs": { "target": "com.amazonaws.sagemaker#CustomFileSystemConfigs", "traits": { - "smithy.api#documentation": "

The settings for assigning a custom file system to a domain. Permitted users can access\n this file system in Amazon SageMaker Studio.

" + "smithy.api#documentation": "

The settings for assigning a custom file system to a domain. Permitted users can access\n this file system in Amazon SageMaker AI Studio.

" } } }, @@ -16632,7 +16795,7 @@ } ], "traits": { - "smithy.api#documentation": "

Deletes the specified compilation job. This action deletes only the compilation job\n resource in Amazon SageMaker. It doesn't delete other resources that are related to\n that job, such as the model artifacts that the job creates, the compilation logs in\n CloudWatch, the compiled model, or the IAM role.

\n

You can delete a compilation job only if its current status is COMPLETED,\n FAILED, or STOPPED. If the job status is\n STARTING or INPROGRESS, stop the job, and then delete it\n after its status becomes STOPPED.

" + "smithy.api#documentation": "

Deletes the specified compilation job. This action deletes only the compilation job\n resource in Amazon SageMaker AI. It doesn't delete other resources that are related to\n that job, such as the model artifacts that the job creates, the compilation logs in\n CloudWatch, the compiled model, or the IAM role.

\n

You can delete a compilation job only if its current status is COMPLETED,\n FAILED, or STOPPED. If the job status is\n STARTING or INPROGRESS, stop the job, and then delete it\n after its status becomes STOPPED.

" } }, "com.amazonaws.sagemaker#DeleteCompilationJobRequest": { @@ -17322,7 +17485,7 @@ } ], "traits": { - "smithy.api#documentation": "

Deletes a SageMaker image and all versions of the image. The container images aren't\n deleted.

" + "smithy.api#documentation": "

Deletes a SageMaker AI image and all versions of the image. The container images aren't\n deleted.

" } }, "com.amazonaws.sagemaker#DeleteImageRequest": { @@ -17365,7 +17528,7 @@ } ], "traits": { - "smithy.api#documentation": "

Deletes a version of a SageMaker image. The container image the version represents isn't\n deleted.

" + "smithy.api#documentation": "

Deletes a version of a SageMaker AI image. The container image the version represents isn't\n deleted.

" } }, "com.amazonaws.sagemaker#DeleteImageVersionRequest": { @@ -17556,7 +17719,7 @@ } ], "traits": { - "smithy.api#documentation": "

Deletes an Amazon SageMaker model bias job definition.

" + "smithy.api#documentation": "

Deletes an Amazon SageMaker AI model bias job definition.

" } }, "com.amazonaws.sagemaker#DeleteModelBiasJobDefinitionRequest": { @@ -17625,7 +17788,7 @@ } ], "traits": { - "smithy.api#documentation": "

Deletes an Amazon SageMaker model explainability job definition.

" + "smithy.api#documentation": "

Deletes an Amazon SageMaker AI model explainability job definition.

" } }, "com.amazonaws.sagemaker#DeleteModelExplainabilityJobDefinitionRequest": { @@ -17829,7 +17992,7 @@ "target": "smithy.api#Unit" }, "traits": { - "smithy.api#documentation": "

Deletes an SageMaker notebook instance. Before you can delete a notebook\n instance, you must call the StopNotebookInstance API.

\n \n

When you delete a notebook instance, you lose all of your data. SageMaker removes the ML compute instance, and deletes the ML storage volume and the\n network interface associated with the notebook instance.

\n
" + "smithy.api#documentation": "

Deletes an SageMaker AI notebook instance. Before you can delete a notebook\n instance, you must call the StopNotebookInstance API.

\n \n

When you delete a notebook instance, you lose all of your data. SageMaker AI removes the ML compute instance, and deletes the ML storage volume and the\n network interface associated with the notebook instance.

\n
" } }, "com.amazonaws.sagemaker#DeleteNotebookInstanceInput": { @@ -17839,7 +18002,7 @@ "target": "com.amazonaws.sagemaker#NotebookInstanceName", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The name of the SageMaker notebook instance to delete.

", + "smithy.api#documentation": "

The name of the SageMaker AI notebook instance to delete.

", "smithy.api#required": {} } } @@ -18119,7 +18282,7 @@ } ], "traits": { - "smithy.api#documentation": "

Deletes the Amazon SageMaker Studio Lifecycle Configuration. In order to delete the\n Lifecycle Configuration, there must be no running apps using the Lifecycle Configuration. You\n must also remove the Lifecycle Configuration from UserSettings in all Domains and\n UserProfiles.

" + "smithy.api#documentation": "

Deletes the Amazon SageMaker AI Studio Lifecycle Configuration. In order to delete the\n Lifecycle Configuration, there must be no running apps using the Lifecycle Configuration. You\n must also remove the Lifecycle Configuration from UserSettings in all Domains and\n UserProfiles.

" } }, "com.amazonaws.sagemaker#DeleteStudioLifecycleConfigRequest": { @@ -18129,7 +18292,7 @@ "target": "com.amazonaws.sagemaker#StudioLifecycleConfigName", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The name of the Amazon SageMaker Studio Lifecycle Configuration to delete.

", + "smithy.api#documentation": "

The name of the Amazon SageMaker AI Studio Lifecycle Configuration to delete.

", "smithy.api#required": {} } } @@ -19065,13 +19228,13 @@ "LastUserActivityTimestamp": { "target": "com.amazonaws.sagemaker#Timestamp", "traits": { - "smithy.api#documentation": "

The timestamp of the last user's activity. LastUserActivityTimestamp is also\n updated when SageMaker performs health checks without user activity. As a result, this\n value is set to the same value as LastHealthCheckTimestamp.

" + "smithy.api#documentation": "

The timestamp of the last user's activity. LastUserActivityTimestamp is also\n updated when SageMaker AI performs health checks without user activity. As a result, this\n value is set to the same value as LastHealthCheckTimestamp.

" } }, "CreationTime": { "target": "com.amazonaws.sagemaker#Timestamp", "traits": { - "smithy.api#documentation": "

The creation time of the application.

\n \n

After an application has been shut down for 24 hours, SageMaker deletes all\n metadata for the application. To be considered an update and retain application metadata,\n applications must be restarted within 24 hours after the previous application has been shut\n down. After this time window, creation of an application is considered a new application\n rather than an update of the previous application.

\n
" + "smithy.api#documentation": "

The creation time of the application.

\n \n

After an application has been shut down for 24 hours, SageMaker AI deletes all\n metadata for the application. To be considered an update and retain application metadata,\n applications must be restarted within 24 hours after the previous application has been shut\n down. After this time window, creation of an application is considered a new application\n rather than an update of the previous application.

\n
" } }, "FailureReason": { @@ -19083,7 +19246,7 @@ "ResourceSpec": { "target": "com.amazonaws.sagemaker#ResourceSpec", "traits": { - "smithy.api#documentation": "

The instance type and the Amazon Resource Name (ARN) of the SageMaker image\n created on the instance.

" + "smithy.api#documentation": "

The instance type and the Amazon Resource Name (ARN) of the SageMaker AI image\n created on the instance.

" } }, "BuiltInLifecycleConfigArn": { @@ -19326,7 +19489,7 @@ "BestCandidate": { "target": "com.amazonaws.sagemaker#AutoMLCandidate", "traits": { - "smithy.api#documentation": "

The best model candidate selected by SageMaker Autopilot using both the best\n objective metric and lowest InferenceLatency for\n an experiment.

" + "smithy.api#documentation": "

The best model candidate selected by SageMaker AI Autopilot using both the best\n objective metric and lowest InferenceLatency for\n an experiment.

" } }, "AutoMLJobStatus": { @@ -20003,14 +20166,14 @@ "CompilationEndTime": { "target": "com.amazonaws.sagemaker#Timestamp", "traits": { - "smithy.api#documentation": "

The time when the model compilation job on a compilation job instance ended. For a\n successful or stopped job, this is when the job's model artifacts have finished\n uploading. For a failed job, this is when Amazon SageMaker detected that the job failed.

" + "smithy.api#documentation": "

The time when the model compilation job on a compilation job instance ended. For a\n successful or stopped job, this is when the job's model artifacts have finished\n uploading. For a failed job, this is when Amazon SageMaker AI detected that the job failed.

" } }, "StoppingCondition": { "target": "com.amazonaws.sagemaker#StoppingCondition", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

Specifies a limit to how long a model compilation job can run. When the job reaches\n the time limit, Amazon SageMaker ends the compilation job. Use this API to cap model training\n costs.

", + "smithy.api#documentation": "

Specifies a limit to how long a model compilation job can run. When the job reaches\n the time limit, Amazon SageMaker AI ends the compilation job. Use this API to cap model training\n costs.

", "smithy.api#required": {} } }, @@ -20068,7 +20231,7 @@ "target": "com.amazonaws.sagemaker#RoleArn", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker assumes to perform the model\n compilation job.

", + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker AI assumes to perform the model\n compilation job.

", "smithy.api#required": {} } }, @@ -20457,7 +20620,7 @@ "target": "com.amazonaws.sagemaker#RoleArn", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker can \n assume to perform tasks on your behalf.

", + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker AI can \n assume to perform tasks on your behalf.

", "smithy.api#required": {} } }, @@ -20764,7 +20927,7 @@ "SingleSignOnApplicationArn": { "target": "com.amazonaws.sagemaker#SingleSignOnApplicationArn", "traits": { - "smithy.api#documentation": "

The ARN of the application managed by SageMaker in IAM Identity Center. This value\n is only returned for domains created after October 1, 2023.

" + "smithy.api#documentation": "

The ARN of the application managed by SageMaker AI in IAM Identity Center. This value\n is only returned for domains created after October 1, 2023.

" } }, "Status": { @@ -20818,7 +20981,7 @@ "AppNetworkAccessType": { "target": "com.amazonaws.sagemaker#AppNetworkAccessType", "traits": { - "smithy.api#documentation": "

Specifies the VPC used for non-EFS traffic. The default value is\n PublicInternetOnly.

\n
    \n
  • \n

    \n PublicInternetOnly - Non-EFS traffic is through a VPC managed by Amazon SageMaker, which allows direct internet access

    \n
  • \n
  • \n

    \n VpcOnly - All traffic is through the specified VPC and subnets

    \n
  • \n
" + "smithy.api#documentation": "

Specifies the VPC used for non-EFS traffic. The default value is\n PublicInternetOnly.

\n
    \n
  • \n

    \n PublicInternetOnly - Non-EFS traffic is through a VPC managed by Amazon SageMaker AI, which allows direct internet access

    \n
  • \n
  • \n

    \n VpcOnly - All traffic is through the specified VPC and subnets

    \n
  • \n
" } }, "HomeEfsFileSystemKmsKeyId": { @@ -22480,7 +22643,7 @@ } ], "traits": { - "smithy.api#documentation": "

Describes a SageMaker image.

", + "smithy.api#documentation": "

Describes a SageMaker AI image.

", "smithy.api#suppress": [ "WaitableTraitInvalidErrorType" ], @@ -22647,7 +22810,7 @@ "RoleArn": { "target": "com.amazonaws.sagemaker#RoleArn", "traits": { - "smithy.api#documentation": "

The ARN of the IAM role that enables Amazon SageMaker to perform tasks on your behalf.

" + "smithy.api#documentation": "

The ARN of the IAM role that enables Amazon SageMaker AI to perform tasks on your behalf.

" } } }, @@ -22669,7 +22832,7 @@ } ], "traits": { - "smithy.api#documentation": "

Describes a version of a SageMaker image.

", + "smithy.api#documentation": "

Describes a version of a SageMaker AI image.

", "smithy.api#suppress": [ "WaitableTraitInvalidErrorType" ], @@ -22829,7 +22992,7 @@ "JobType": { "target": "com.amazonaws.sagemaker#JobType", "traits": { - "smithy.api#documentation": "

Indicates SageMaker job type compatibility.

\n
    \n
  • \n

    \n TRAINING: The image version is compatible with SageMaker training jobs.

    \n
  • \n
  • \n

    \n INFERENCE: The image version is compatible with SageMaker inference jobs.

    \n
  • \n
  • \n

    \n NOTEBOOK_KERNEL: The image version is compatible with SageMaker notebook kernels.

    \n
  • \n
" + "smithy.api#documentation": "

Indicates SageMaker AI job type compatibility.

\n
    \n
  • \n

    \n TRAINING: The image version is compatible with SageMaker AI training jobs.

    \n
  • \n
  • \n

    \n INFERENCE: The image version is compatible with SageMaker AI inference jobs.

    \n
  • \n
  • \n

    \n NOTEBOOK_KERNEL: The image version is compatible with SageMaker AI notebook kernels.

    \n
  • \n
" } }, "MLFramework": { @@ -24632,7 +24795,7 @@ "target": "com.amazonaws.sagemaker#RoleArn", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker can \n assume to perform tasks on your behalf.

", + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker AI can \n assume to perform tasks on your behalf.

", "smithy.api#required": {} } }, @@ -24945,7 +25108,7 @@ "NotebookInstanceName": { "target": "com.amazonaws.sagemaker#NotebookInstanceName", "traits": { - "smithy.api#documentation": "

The name of the SageMaker notebook instance.

" + "smithy.api#documentation": "

The name of the SageMaker AI notebook instance.

" } }, "NotebookInstanceStatus": { @@ -24993,13 +25156,13 @@ "KmsKeyId": { "target": "com.amazonaws.sagemaker#KmsKeyId", "traits": { - "smithy.api#documentation": "

The Amazon Web Services KMS key ID SageMaker uses to encrypt data when\n storing it on the ML storage volume attached to the instance.

" + "smithy.api#documentation": "

The Amazon Web Services KMS key ID SageMaker AI uses to encrypt data when\n storing it on the ML storage volume attached to the instance.

" } }, "NetworkInterfaceId": { "target": "com.amazonaws.sagemaker#NetworkInterfaceId", "traits": { - "smithy.api#documentation": "

The network interface IDs that SageMaker created at the time of creating\n the instance.

" + "smithy.api#documentation": "

The network interface IDs that SageMaker AI created at the time of creating\n the instance.

" } }, "LastModifiedTime": { @@ -25023,7 +25186,7 @@ "DirectInternetAccess": { "target": "com.amazonaws.sagemaker#DirectInternetAccess", "traits": { - "smithy.api#documentation": "

Describes whether SageMaker provides internet access to the notebook instance.\n If this value is set to Disabled, the notebook instance does not\n have internet access, and cannot connect to SageMaker training and endpoint\n services.

\n

For more information, see Notebook Instances Are Internet-Enabled by Default.

" + "smithy.api#documentation": "

Describes whether SageMaker AI provides internet access to the notebook instance.\n If this value is set to Disabled, the notebook instance does not\n have internet access, and cannot connect to SageMaker AI training and endpoint\n services.

\n

For more information, see Notebook Instances Are Internet-Enabled by Default.

" } }, "VolumeSizeInGB": { @@ -25041,13 +25204,13 @@ "DefaultCodeRepository": { "target": "com.amazonaws.sagemaker#CodeRepositoryNameOrUrl", "traits": { - "smithy.api#documentation": "

The Git repository associated with the notebook instance as its default code\n repository. This can be either the name of a Git repository stored as a resource in your\n account, or the URL of a Git repository in Amazon Web Services CodeCommit\n or in any other Git repository. When you open a notebook instance, it opens in the\n directory that contains this repository. For more information, see Associating Git\n Repositories with SageMaker Notebook Instances.

" + "smithy.api#documentation": "

The Git repository associated with the notebook instance as its default code\n repository. This can be either the name of a Git repository stored as a resource in your\n account, or the URL of a Git repository in Amazon Web Services CodeCommit\n or in any other Git repository. When you open a notebook instance, it opens in the\n directory that contains this repository. For more information, see Associating Git\n Repositories with SageMaker AI Notebook Instances.

" } }, "AdditionalCodeRepositories": { "target": "com.amazonaws.sagemaker#AdditionalCodeRepositoryNamesOrUrls", "traits": { - "smithy.api#documentation": "

An array of up to three Git repositories associated with the notebook instance. These\n can be either the names of Git repositories stored as resources in your account, or the\n URL of Git repositories in Amazon Web Services CodeCommit\n or in any other Git repository. These repositories are cloned at the same level as the\n default repository of your notebook instance. For more information, see Associating Git\n Repositories with SageMaker Notebook Instances.

" + "smithy.api#documentation": "

An array of up to three Git repositories associated with the notebook instance. These\n can be either the names of Git repositories stored as resources in your account, or the\n URL of Git repositories in Amazon Web Services CodeCommit\n or in any other Git repository. These repositories are cloned at the same level as the\n default repository of your notebook instance. For more information, see Associating Git\n Repositories with SageMaker AI Notebook Instances.

" } }, "RootAccess": { @@ -26112,7 +26275,7 @@ } ], "traits": { - "smithy.api#documentation": "

Describes the Amazon SageMaker Studio Lifecycle Configuration.

" + "smithy.api#documentation": "

Describes the Amazon SageMaker AI Studio Lifecycle Configuration.

" } }, "com.amazonaws.sagemaker#DescribeStudioLifecycleConfigRequest": { @@ -26122,7 +26285,7 @@ "target": "com.amazonaws.sagemaker#StudioLifecycleConfigName", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The name of the Amazon SageMaker Studio Lifecycle Configuration to describe.

", + "smithy.api#documentation": "

The name of the Amazon SageMaker AI Studio Lifecycle Configuration to describe.

", "smithy.api#required": {} } } @@ -26143,25 +26306,25 @@ "StudioLifecycleConfigName": { "target": "com.amazonaws.sagemaker#StudioLifecycleConfigName", "traits": { - "smithy.api#documentation": "

The name of the Amazon SageMaker Studio Lifecycle Configuration that is\n described.

" + "smithy.api#documentation": "

The name of the Amazon SageMaker AI Studio Lifecycle Configuration that is\n described.

" } }, "CreationTime": { "target": "com.amazonaws.sagemaker#Timestamp", "traits": { - "smithy.api#documentation": "

The creation time of the Amazon SageMaker Studio Lifecycle Configuration.

" + "smithy.api#documentation": "

The creation time of the Amazon SageMaker AI Studio Lifecycle Configuration.

" } }, "LastModifiedTime": { "target": "com.amazonaws.sagemaker#Timestamp", "traits": { - "smithy.api#documentation": "

This value is equivalent to CreationTime because Amazon SageMaker Studio Lifecycle\n Configurations are immutable.

" + "smithy.api#documentation": "

This value is equivalent to CreationTime because Amazon SageMaker AI Studio Lifecycle\n Configurations are immutable.

" } }, "StudioLifecycleConfigContent": { "target": "com.amazonaws.sagemaker#StudioLifecycleConfigContent", "traits": { - "smithy.api#documentation": "

The content of your Amazon SageMaker Studio Lifecycle Configuration script.

" + "smithy.api#documentation": "

The content of your Amazon SageMaker AI Studio Lifecycle Configuration script.

" } }, "StudioLifecycleConfigAppType": { @@ -28239,7 +28402,7 @@ "ExecutionRoleIdentityConfig": { "target": "com.amazonaws.sagemaker#ExecutionRoleIdentityConfig", "traits": { - "smithy.api#documentation": "

The configuration for attaching a SageMaker user profile name to the execution\n role as a sts:SourceIdentity key.

" + "smithy.api#documentation": "

The configuration for attaching a SageMaker AI user profile name to the execution\n role as a sts:SourceIdentity key.

" } }, "DockerSettings": { @@ -28271,7 +28434,7 @@ "ExecutionRoleIdentityConfig": { "target": "com.amazonaws.sagemaker#ExecutionRoleIdentityConfig", "traits": { - "smithy.api#documentation": "

The configuration for attaching a SageMaker user profile name to the execution\n role as a sts:SourceIdentity key. This configuration can only be modified if there are no\n apps in the InService or Pending state.

" + "smithy.api#documentation": "

The configuration for attaching a SageMaker AI user profile name to the execution\n role as a sts:SourceIdentity key. This configuration can only be modified if there are no\n apps in the InService or Pending state.

" } }, "SecurityGroupIds": { @@ -28519,7 +28682,7 @@ } }, "traits": { - "smithy.api#documentation": "

A file system, created by you in Amazon EFS, that you assign to a user profile or\n space for an Amazon SageMaker Domain. Permitted users can access this file system in\n Amazon SageMaker Studio.

" + "smithy.api#documentation": "

A file system, created by you in Amazon EFS, that you assign to a user profile or\n space for an Amazon SageMaker AI Domain. Permitted users can access this file system in\n Amazon SageMaker AI Studio.

" } }, "com.amazonaws.sagemaker#EFSFileSystemConfig": { @@ -28536,12 +28699,12 @@ "FileSystemPath": { "target": "com.amazonaws.sagemaker#FileSystemPath", "traits": { - "smithy.api#documentation": "

The path to the file system directory that is accessible in Amazon SageMaker Studio.\n Permitted users can access only this directory and below.

" + "smithy.api#documentation": "

The path to the file system directory that is accessible in Amazon SageMaker AI Studio.\n Permitted users can access only this directory and below.

" } } }, "traits": { - "smithy.api#documentation": "

The settings for assigning a custom Amazon EFS file system to a user profile or\n space for an Amazon SageMaker Domain.

" + "smithy.api#documentation": "

The settings for assigning a custom Amazon EFS file system to a user profile or\n space for an Amazon SageMaker AI Domain.

" } }, "com.amazonaws.sagemaker#EMRStepMetadata": { @@ -31113,7 +31276,7 @@ } }, "traits": { - "smithy.api#documentation": "

The Amazon Elastic File System storage configuration for a SageMaker image.

" + "smithy.api#documentation": "

The Amazon Elastic File System storage configuration for a SageMaker AI image.

" } }, "com.amazonaws.sagemaker#FileSystemDataSource": { @@ -34425,7 +34588,7 @@ } }, "traits": { - "smithy.api#documentation": "

A SageMaker image. A SageMaker image represents a set of container images that are derived from\n a common base container image. Each of these container images is represented by a SageMaker\n ImageVersion.

" + "smithy.api#documentation": "

A SageMaker AI image. A SageMaker AI image represents a set of container images that are derived from\n a common base container image. Each of these container images is represented by a SageMaker AI\n ImageVersion.

" } }, "com.amazonaws.sagemaker#ImageArn": { @@ -34721,7 +34884,7 @@ } }, "traits": { - "smithy.api#documentation": "

A version of a SageMaker Image. A version represents an existing container\n image.

" + "smithy.api#documentation": "

A version of a SageMaker AI Image. A version represents an existing container\n image.

" } }, "com.amazonaws.sagemaker#ImageVersionAlias": { @@ -35173,7 +35336,7 @@ "ModelName": { "target": "com.amazonaws.sagemaker#ModelName", "traits": { - "smithy.api#documentation": "

The name of an existing SageMaker model object in your account that you want to\n deploy with the inference component.

" + "smithy.api#documentation": "

The name of an existing SageMaker AI model object in your account that you want to\n deploy with the inference component.

" } }, "Container": { @@ -35211,7 +35374,7 @@ "ModelName": { "target": "com.amazonaws.sagemaker#ModelName", "traits": { - "smithy.api#documentation": "

The name of the SageMaker model object that is deployed with the inference\n component.

" + "smithy.api#documentation": "

The name of the SageMaker AI model object that is deployed with the inference\n component.

" } }, "Container": { @@ -37507,7 +37670,7 @@ } }, "traits": { - "smithy.api#documentation": "

The configuration for the file system and kernels in a SageMaker image running as a JupyterLab app. The FileSystemConfig object is not supported.

" + "smithy.api#documentation": "

The configuration for the file system and kernels in a SageMaker AI image running as a JupyterLab app. The FileSystemConfig object is not supported.

" } }, "com.amazonaws.sagemaker#JupyterLabAppSettings": { @@ -37563,7 +37726,7 @@ "DefaultResourceSpec": { "target": "com.amazonaws.sagemaker#ResourceSpec", "traits": { - "smithy.api#documentation": "

The default instance type and the Amazon Resource Name (ARN) of the default SageMaker image used by the JupyterServer app. If you use the\n LifecycleConfigArns parameter, then this parameter is also required.

" + "smithy.api#documentation": "

The default instance type and the Amazon Resource Name (ARN) of the default SageMaker AI image used by the JupyterServer app. If you use the\n LifecycleConfigArns parameter, then this parameter is also required.

" } }, "LifecycleConfigArns": { @@ -37575,7 +37738,7 @@ "CodeRepositories": { "target": "com.amazonaws.sagemaker#CodeRepositories", "traits": { - "smithy.api#documentation": "

A list of Git repositories that SageMaker automatically displays to users for\n cloning in the JupyterServer application.

" + "smithy.api#documentation": "

A list of Git repositories that SageMaker AI automatically displays to users for\n cloning in the JupyterServer application.

" } } }, @@ -37622,13 +37785,13 @@ "DefaultResourceSpec": { "target": "com.amazonaws.sagemaker#ResourceSpec", "traits": { - "smithy.api#documentation": "

The default instance type and the Amazon Resource Name (ARN) of the default SageMaker image used by the KernelGateway app.

\n \n

The Amazon SageMaker Studio UI does not use the default instance type value set\n here. The default instance type set here is used when Apps are created using the CLI or CloudFormation and the instance type parameter value is not\n passed.

\n
" + "smithy.api#documentation": "

The default instance type and the Amazon Resource Name (ARN) of the default SageMaker AI image used by the KernelGateway app.

\n \n

The Amazon SageMaker AI Studio UI does not use the default instance type value set\n here. The default instance type set here is used when Apps are created using the CLI or CloudFormation and the instance type parameter value is not\n passed.

\n
" } }, "CustomImages": { "target": "com.amazonaws.sagemaker#CustomImages", "traits": { - "smithy.api#documentation": "

A list of custom SageMaker images that are configured to run as a KernelGateway\n app.

" + "smithy.api#documentation": "

A list of custom SageMaker AI images that are configured to run as a KernelGateway\n app.

" } }, "LifecycleConfigArns": { @@ -37656,12 +37819,12 @@ "FileSystemConfig": { "target": "com.amazonaws.sagemaker#FileSystemConfig", "traits": { - "smithy.api#documentation": "

The Amazon Elastic File System storage configuration for a SageMaker image.

" + "smithy.api#documentation": "

The Amazon Elastic File System storage configuration for a SageMaker AI image.

" } } }, "traits": { - "smithy.api#documentation": "

The configuration for the file system and kernels in a SageMaker image running as a\n KernelGateway app.

" + "smithy.api#documentation": "

The configuration for the file system and kernels in a SageMaker AI image running as a\n KernelGateway app.

" } }, "com.amazonaws.sagemaker#KernelName": { @@ -38715,7 +38878,7 @@ "SageMakerImageVersionAliases": { "target": "com.amazonaws.sagemaker#SageMakerImageVersionAliases", "traits": { - "smithy.api#documentation": "

A list of SageMaker image version aliases.

" + "smithy.api#documentation": "

A list of SageMaker AI image version aliases.

" } }, "NextToken": { @@ -39831,7 +39994,7 @@ "NextToken": { "target": "com.amazonaws.sagemaker#NextToken", "traits": { - "smithy.api#documentation": "

If the response is truncated, Amazon SageMaker returns this NextToken. To retrieve\n the next set of model compilation jobs, use this token in the next request.

" + "smithy.api#documentation": "

If the response is truncated, Amazon SageMaker AI returns this NextToken. To retrieve\n the next set of model compilation jobs, use this token in the next request.

" } } }, @@ -43610,7 +43773,7 @@ "NextToken": { "target": "com.amazonaws.sagemaker#NextToken", "traits": { - "smithy.api#documentation": "

If the response is truncated, Amazon SageMaker returns this token. To retrieve the\n next set of model quality monitoring job definitions, use it in the next request.

" + "smithy.api#documentation": "

If the response is truncated, Amazon SageMaker AI returns this token. To retrieve the\n next set of model quality monitoring job definitions, use it in the next request.

" } } }, @@ -44234,7 +44397,7 @@ "NextToken": { "target": "com.amazonaws.sagemaker#NextToken", "traits": { - "smithy.api#documentation": "

If the response is truncated, SageMaker returns this token. To get the next\n set of lifecycle configurations, use it in the next request.

" + "smithy.api#documentation": "

If the response is truncated, SageMaker AI returns this token. To get the next\n set of lifecycle configurations, use it in the next request.

" } }, "NotebookInstanceLifecycleConfigs": { @@ -44257,7 +44420,7 @@ "target": "com.amazonaws.sagemaker#ListNotebookInstancesOutput" }, "traits": { - "smithy.api#documentation": "

Returns a list of the SageMaker notebook instances in the requester's\n account in an Amazon Web Services Region.

", + "smithy.api#documentation": "

Returns a list of the SageMaker AI notebook instances in the requester's\n account in an Amazon Web Services Region.

", "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", @@ -44358,7 +44521,7 @@ "NextToken": { "target": "com.amazonaws.sagemaker#NextToken", "traits": { - "smithy.api#documentation": "

If the response to the previous ListNotebookInstances request was\n truncated, SageMaker returns this token. To retrieve the next set of notebook\n instances, use the token in the next request.

" + "smithy.api#documentation": "

If the response to the previous ListNotebookInstances request was\n truncated, SageMaker AI returns this token. To retrieve the next set of notebook\n instances, use the token in the next request.

" } }, "NotebookInstances": { @@ -45359,7 +45522,7 @@ } ], "traits": { - "smithy.api#documentation": "

Lists the Amazon SageMaker Studio Lifecycle Configurations in your Amazon Web Services\n Account.

", + "smithy.api#documentation": "

Lists the Amazon SageMaker AI Studio Lifecycle Configurations in your Amazon Web Services\n Account.

", "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", @@ -47107,12 +47270,6 @@ "smithy.api#enumValue": "PerformanceEvaluation" } }, - "HYPER_POD_CLUSTERS": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "HyperPodClusters" - } - }, "LAKERA_GUARD": { "target": "smithy.api#Unit", "traits": { @@ -47136,6 +47293,12 @@ "traits": { "smithy.api#enumValue": "Fiddler" } + }, + "HYPER_POD_CLUSTERS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "HyperPodClusters" + } } } }, @@ -50198,7 +50361,7 @@ "VolumeKmsKeyId": { "target": "com.amazonaws.sagemaker#KmsKeyId", "traits": { - "smithy.api#documentation": "

The Key Management Service (KMS) key that Amazon SageMaker uses to\n encrypt data on the storage volume attached to the ML compute instance(s) that run the\n model monitoring job.

" + "smithy.api#documentation": "

The Key Management Service (KMS) key that Amazon SageMaker AI uses to\n encrypt data on the storage volume attached to the ML compute instance(s) that run the\n model monitoring job.

" } } }, @@ -50471,7 +50634,7 @@ "target": "com.amazonaws.sagemaker#MonitoringInputs", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The array of inputs for the monitoring job. Currently we support monitoring an Amazon SageMaker Endpoint.

", + "smithy.api#documentation": "

The array of inputs for the monitoring job. Currently we support monitoring an Amazon SageMaker AI Endpoint.

", "smithy.api#required": {} } }, @@ -50521,7 +50684,7 @@ "target": "com.amazonaws.sagemaker#RoleArn", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker can \n assume to perform tasks on your behalf.

", + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker AI can \n assume to perform tasks on your behalf.

", "smithy.api#required": {} } } @@ -50689,7 +50852,7 @@ "KmsKeyId": { "target": "com.amazonaws.sagemaker#KmsKeyId", "traits": { - "smithy.api#documentation": "

The Key Management Service (KMS) key that Amazon SageMaker uses to\n encrypt the model artifacts at rest using Amazon S3 server-side encryption.

" + "smithy.api#documentation": "

The Key Management Service (KMS) key that Amazon SageMaker AI uses to\n encrypt the model artifacts at rest using Amazon S3 server-side encryption.

" } } }, @@ -50762,7 +50925,7 @@ "target": "com.amazonaws.sagemaker#MonitoringS3Uri", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

A URI that identifies the Amazon S3 storage location where Amazon SageMaker\n saves the results of a monitoring job.

", + "smithy.api#documentation": "

A URI that identifies the Amazon S3 storage location where Amazon SageMaker AI\n saves the results of a monitoring job.

", "smithy.api#required": {} } }, @@ -50770,7 +50933,7 @@ "target": "com.amazonaws.sagemaker#ProcessingLocalPath", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The local path to the Amazon S3 storage location where Amazon SageMaker\n saves the results of a monitoring job. LocalPath is an absolute path for the output\n data.

", + "smithy.api#documentation": "

The local path to the Amazon S3 storage location where Amazon SageMaker AI\n saves the results of a monitoring job. LocalPath is an absolute path for the output\n data.

", "smithy.api#required": {} } }, @@ -51140,7 +51303,7 @@ } }, "traits": { - "smithy.api#documentation": "

The VpcConfig configuration object that specifies the VPC that you want the\n compilation jobs to connect to. For more information on controlling access to your Amazon S3\n buckets used for compilation job, see Give Amazon SageMaker Compilation Jobs Access to\n Resources in Your Amazon VPC.

" + "smithy.api#documentation": "

The VpcConfig configuration object that specifies the VPC that you want the\n compilation jobs to connect to. For more information on controlling access to your Amazon S3\n buckets used for compilation job, see Give Amazon SageMaker AI Compilation Jobs Access to\n Resources in Your Amazon VPC.

" } }, "com.amazonaws.sagemaker#NeoVpcSecurityGroupId": { @@ -51647,18 +51810,18 @@ "DefaultCodeRepository": { "target": "com.amazonaws.sagemaker#CodeRepositoryNameOrUrl", "traits": { - "smithy.api#documentation": "

The Git repository associated with the notebook instance as its default code\n repository. This can be either the name of a Git repository stored as a resource in your\n account, or the URL of a Git repository in Amazon Web Services CodeCommit\n or in any other Git repository. When you open a notebook instance, it opens in the\n directory that contains this repository. For more information, see Associating Git\n Repositories with SageMaker Notebook Instances.

" + "smithy.api#documentation": "

The Git repository associated with the notebook instance as its default code\n repository. This can be either the name of a Git repository stored as a resource in your\n account, or the URL of a Git repository in Amazon Web Services CodeCommit\n or in any other Git repository. When you open a notebook instance, it opens in the\n directory that contains this repository. For more information, see Associating Git\n Repositories with SageMaker AI Notebook Instances.

" } }, "AdditionalCodeRepositories": { "target": "com.amazonaws.sagemaker#AdditionalCodeRepositoryNamesOrUrls", "traits": { - "smithy.api#documentation": "

An array of up to three Git repositories associated with the notebook instance. These\n can be either the names of Git repositories stored as resources in your account, or the\n URL of Git repositories in Amazon Web Services CodeCommit\n or in any other Git repository. These repositories are cloned at the same level as the\n default repository of your notebook instance. For more information, see Associating Git\n Repositories with SageMaker Notebook Instances.

" + "smithy.api#documentation": "

An array of up to three Git repositories associated with the notebook instance. These\n can be either the names of Git repositories stored as resources in your account, or the\n URL of Git repositories in Amazon Web Services CodeCommit\n or in any other Git repository. These repositories are cloned at the same level as the\n default repository of your notebook instance. For more information, see Associating Git\n Repositories with SageMaker AI Notebook Instances.

" } } }, "traits": { - "smithy.api#documentation": "

Provides summary information for an SageMaker notebook instance.

" + "smithy.api#documentation": "

Provides summary information for an SageMaker AI notebook instance.

" } }, "com.amazonaws.sagemaker#NotebookInstanceSummaryList": { @@ -52771,7 +52934,7 @@ "target": "com.amazonaws.sagemaker#S3Uri", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

Identifies the S3 bucket where you want Amazon SageMaker to store the model artifacts. For\n example, s3://bucket-name/key-name-prefix.

", + "smithy.api#documentation": "

Identifies the S3 bucket where you want Amazon SageMaker AI to store the model artifacts. For\n example, s3://bucket-name/key-name-prefix.

", "smithy.api#required": {} } }, @@ -52796,7 +52959,7 @@ "KmsKeyId": { "target": "com.amazonaws.sagemaker#KmsKeyId", "traits": { - "smithy.api#documentation": "

The Amazon Web Services Key Management Service key (Amazon Web Services KMS) that Amazon SageMaker\n uses to encrypt your output models with Amazon S3 server-side encryption after compilation\n job. If you don't provide a KMS key ID, Amazon SageMaker uses the default KMS key for Amazon S3 for your\n role's account. For more information, see KMS-Managed Encryption\n Keys in the Amazon Simple Storage Service Developer\n Guide.\n

\n

The KmsKeyId can be any of the following formats:

\n
    \n
  • \n

    Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab\n

    \n
  • \n
  • \n

    Key ARN:\n arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab\n

    \n
  • \n
  • \n

    Alias name: alias/ExampleAlias\n

    \n
  • \n
  • \n

    Alias name ARN:\n arn:aws:kms:us-west-2:111122223333:alias/ExampleAlias\n

    \n
  • \n
" + "smithy.api#documentation": "

The Amazon Web Services Key Management Service key (Amazon Web Services KMS) that Amazon SageMaker AI\n uses to encrypt your output models with Amazon S3 server-side encryption after compilation\n job. If you don't provide a KMS key ID, Amazon SageMaker AI uses the default KMS key for Amazon S3 for your\n role's account. For more information, see KMS-Managed Encryption\n Keys in the Amazon Simple Storage Service Developer\n Guide.\n

\n

The KmsKeyId can be any of the following formats:

\n
    \n
  • \n

    Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab\n

    \n
  • \n
  • \n

    Key ARN:\n arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab\n

    \n
  • \n
  • \n

    Alias name: alias/ExampleAlias\n

    \n
  • \n
  • \n

    Alias name ARN:\n arn:aws:kms:us-west-2:111122223333:alias/ExampleAlias\n

    \n
  • \n
" } } }, @@ -58099,7 +58262,7 @@ "CustomImages": { "target": "com.amazonaws.sagemaker#CustomImages", "traits": { - "smithy.api#documentation": "

A list of custom SageMaker images that are configured to run as a RSession\n app.

" + "smithy.api#documentation": "

A list of custom SageMaker AI images that are configured to run as a RSession\n app.

" } } }, @@ -60006,7 +60169,7 @@ "SageMakerImageArn": { "target": "com.amazonaws.sagemaker#ImageArn", "traits": { - "smithy.api#documentation": "

The ARN of the SageMaker image that the image version belongs to.

" + "smithy.api#documentation": "

The ARN of the SageMaker AI image that the image version belongs to.

" } }, "SageMakerImageVersionArn": { @@ -60035,7 +60198,7 @@ } }, "traits": { - "smithy.api#documentation": "

Specifies the ARN's of a SageMaker image and SageMaker image version, and the instance type that\n the version runs on.

" + "smithy.api#documentation": "

Specifies the ARN's of a SageMaker AI image and SageMaker AI image version, and the instance type that\n the version runs on.

" } }, "com.amazonaws.sagemaker#ResourceType": { @@ -62919,7 +63082,7 @@ "target": "com.amazonaws.sagemaker#ScheduleExpression", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

A cron expression that describes details about the monitoring schedule.

\n

The supported cron expressions are:

\n
    \n
  • \n

    If you want to set the job to start every hour, use the following:

    \n

    \n Hourly: cron(0 * ? * * *)\n

    \n
  • \n
  • \n

    If you want to start the job daily:

    \n

    \n cron(0 [00-23] ? * * *)\n

    \n
  • \n
  • \n

    If you want to run the job one time, immediately, use the following\n keyword:

    \n

    \n NOW\n

    \n
  • \n
\n

For example, the following are valid cron expressions:

\n
    \n
  • \n

    Daily at noon UTC: cron(0 12 ? * * *)\n

    \n
  • \n
  • \n

    Daily at midnight UTC: cron(0 0 ? * * *)\n

    \n
  • \n
\n

To support running every 6, 12 hours, the following are also supported:

\n

\n cron(0 [00-23]/[01-24] ? * * *)\n

\n

For example, the following are valid cron expressions:

\n
    \n
  • \n

    Every 12 hours, starting at 5pm UTC: cron(0 17/12 ? * * *)\n

    \n
  • \n
  • \n

    Every two hours starting at midnight: cron(0 0/2 ? * * *)\n

    \n
  • \n
\n \n
    \n
  • \n

    Even though the cron expression is set to start at 5PM UTC, note that there\n could be a delay of 0-20 minutes from the actual requested time to run the\n execution.

    \n
  • \n
  • \n

    We recommend that if you would like a daily schedule, you do not provide this\n parameter. Amazon SageMaker will pick a time for running every day.

    \n
  • \n
\n
\n

You can also specify the keyword NOW to run the monitoring job immediately,\n one time, without recurring.

", + "smithy.api#documentation": "

A cron expression that describes details about the monitoring schedule.

\n

The supported cron expressions are:

\n
    \n
  • \n

    If you want to set the job to start every hour, use the following:

    \n

    \n Hourly: cron(0 * ? * * *)\n

    \n
  • \n
  • \n

    If you want to start the job daily:

    \n

    \n cron(0 [00-23] ? * * *)\n

    \n
  • \n
  • \n

    If you want to run the job one time, immediately, use the following\n keyword:

    \n

    \n NOW\n

    \n
  • \n
\n

For example, the following are valid cron expressions:

\n
    \n
  • \n

    Daily at noon UTC: cron(0 12 ? * * *)\n

    \n
  • \n
  • \n

    Daily at midnight UTC: cron(0 0 ? * * *)\n

    \n
  • \n
\n

To support running every 6, 12 hours, the following are also supported:

\n

\n cron(0 [00-23]/[01-24] ? * * *)\n

\n

For example, the following are valid cron expressions:

\n
    \n
  • \n

    Every 12 hours, starting at 5pm UTC: cron(0 17/12 ? * * *)\n

    \n
  • \n
  • \n

    Every two hours starting at midnight: cron(0 0/2 ? * * *)\n

    \n
  • \n
\n \n
    \n
  • \n

    Even though the cron expression is set to start at 5PM UTC, note that there\n could be a delay of 0-20 minutes from the actual requested time to run the\n execution.

    \n
  • \n
  • \n

    We recommend that if you would like a daily schedule, you do not provide this\n parameter. Amazon SageMaker AI will pick a time for running every day.

    \n
  • \n
\n
\n

You can also specify the keyword NOW to run the monitoring job immediately,\n one time, without recurring.

", "smithy.api#required": {} } }, @@ -64005,7 +64168,7 @@ } }, "traits": { - "smithy.api#documentation": "

Specifies options for sharing Amazon SageMaker Studio notebooks. These settings are\n specified as part of DefaultUserSettings when the CreateDomain API\n is called, and as part of UserSettings when the CreateUserProfile\n API is called. When SharingSettings is not specified, notebook sharing isn't\n allowed.

" + "smithy.api#documentation": "

Specifies options for sharing Amazon SageMaker AI Studio notebooks. These settings are\n specified as part of DefaultUserSettings when the CreateDomain API\n is called, and as part of UserSettings when the CreateUserProfile\n API is called. When SharingSettings is not specified, notebook sharing isn't\n allowed.

" } }, "com.amazonaws.sagemaker#SharingType": { @@ -64680,7 +64843,7 @@ "AppType": { "target": "com.amazonaws.sagemaker#AppType", "traits": { - "smithy.api#documentation": "

The type of app created within the space.

" + "smithy.api#documentation": "

The type of app created within the space.

\n

If using the \n UpdateSpace API, you can't change the app type of your\n space by specifying a different value for this field.

" } }, "SpaceStorageSettings": { @@ -64692,7 +64855,7 @@ "CustomFileSystems": { "target": "com.amazonaws.sagemaker#CustomFileSystems", "traits": { - "smithy.api#documentation": "

A file system, created by you, that you assign to a space for an Amazon SageMaker\n Domain. Permitted users can access this file system in Amazon SageMaker Studio.

" + "smithy.api#documentation": "

A file system, created by you, that you assign to a space for an Amazon SageMaker AI\n Domain. Permitted users can access this file system in Amazon SageMaker AI Studio.

" } } }, @@ -65139,7 +65302,7 @@ } ], "traits": { - "smithy.api#documentation": "

Launches an ML compute instance with the latest version of the libraries and\n attaches your ML storage volume. After configuring the notebook instance, SageMaker sets the notebook instance status to InService. A notebook\n instance's status must be InService before you can connect to your Jupyter\n notebook.

" + "smithy.api#documentation": "

Launches an ML compute instance with the latest version of the libraries and\n attaches your ML storage volume. After configuring the notebook instance, SageMaker AI sets the notebook instance status to InService. A notebook\n instance's status must be InService before you can connect to your Jupyter\n notebook.

" } }, "com.amazonaws.sagemaker#StartNotebookInstanceInput": { @@ -65416,7 +65579,7 @@ } ], "traits": { - "smithy.api#documentation": "

Stops a model compilation job.

\n

To stop a job, Amazon SageMaker sends the algorithm the SIGTERM signal. This gracefully shuts the\n job down. If the job hasn't stopped, it sends the SIGKILL signal.

\n

When it receives a StopCompilationJob request, Amazon SageMaker changes the\n CompilationJobStatus of the job to Stopping. After Amazon\n SageMaker stops the job, it sets the CompilationJobStatus to\n Stopped.

" + "smithy.api#documentation": "

Stops a model compilation job.

\n

To stop a job, Amazon SageMaker AI sends the algorithm the SIGTERM signal. This gracefully shuts the\n job down. If the job hasn't stopped, it sends the SIGKILL signal.

\n

When it receives a StopCompilationJob request, Amazon SageMaker AI changes the\n CompilationJobStatus of the job to Stopping. After Amazon\n SageMaker stops the job, it sets the CompilationJobStatus to\n Stopped.

" } }, "com.amazonaws.sagemaker#StopCompilationJobRequest": { @@ -65768,7 +65931,7 @@ "target": "smithy.api#Unit" }, "traits": { - "smithy.api#documentation": "

Terminates the ML compute instance. Before terminating the instance, SageMaker disconnects the ML storage volume from it. SageMaker preserves the\n ML storage volume. SageMaker stops charging you for the ML compute instance when\n you call StopNotebookInstance.

\n

To access data on the ML storage volume for a notebook instance that has been\n terminated, call the StartNotebookInstance API.\n StartNotebookInstance launches another ML compute instance, configures\n it, and attaches the preserved ML storage volume so you can continue your work.\n

" + "smithy.api#documentation": "

Terminates the ML compute instance. Before terminating the instance, SageMaker AI disconnects the ML storage volume from it. SageMaker AI preserves the\n ML storage volume. SageMaker AI stops charging you for the ML compute instance when\n you call StopNotebookInstance.

\n

To access data on the ML storage volume for a notebook instance that has been\n terminated, call the StartNotebookInstance API.\n StartNotebookInstance launches another ML compute instance, configures\n it, and attaches the preserved ML storage volume so you can continue your work.\n

" } }, "com.amazonaws.sagemaker#StopNotebookInstanceInput": { @@ -66177,19 +66340,19 @@ "StudioLifecycleConfigName": { "target": "com.amazonaws.sagemaker#StudioLifecycleConfigName", "traits": { - "smithy.api#documentation": "

The name of the Amazon SageMaker Studio Lifecycle Configuration.

" + "smithy.api#documentation": "

The name of the Amazon SageMaker AI Studio Lifecycle Configuration.

" } }, "CreationTime": { "target": "com.amazonaws.sagemaker#Timestamp", "traits": { - "smithy.api#documentation": "

The creation time of the Amazon SageMaker Studio Lifecycle Configuration.

" + "smithy.api#documentation": "

The creation time of the Amazon SageMaker AI Studio Lifecycle Configuration.

" } }, "LastModifiedTime": { "target": "com.amazonaws.sagemaker#Timestamp", "traits": { - "smithy.api#documentation": "

This value is equivalent to CreationTime because Amazon SageMaker Studio Lifecycle\n Configurations are immutable.

" + "smithy.api#documentation": "

This value is equivalent to CreationTime because Amazon SageMaker AI Studio Lifecycle\n Configurations are immutable.

" } }, "StudioLifecycleConfigAppType": { @@ -66200,7 +66363,7 @@ } }, "traits": { - "smithy.api#documentation": "

Details of the Amazon SageMaker Studio Lifecycle Configuration.

" + "smithy.api#documentation": "

Details of the Amazon SageMaker AI Studio Lifecycle Configuration.

" } }, "com.amazonaws.sagemaker#StudioLifecycleConfigName": { @@ -67061,7 +67224,7 @@ "DefaultResourceSpec": { "target": "com.amazonaws.sagemaker#ResourceSpec", "traits": { - "smithy.api#documentation": "

The default instance type and the Amazon Resource Name (ARN) of the SageMaker\n image created on the instance.

" + "smithy.api#documentation": "

The default instance type and the Amazon Resource Name (ARN) of the SageMaker AI\n image created on the instance.

" } } }, @@ -70282,6 +70445,18 @@ "smithy.api#enumValue": "ml.g5.48xlarge" } }, + "ML_TRN1_2XLARGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ml.trn1.2xlarge" + } + }, + "ML_TRN1_32XLARGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ml.trn1.32xlarge" + } + }, "ML_INF2_XLARGE": { "target": "smithy.api#Unit", "traits": { @@ -72434,7 +72609,7 @@ "AppNetworkAccessType": { "target": "com.amazonaws.sagemaker#AppNetworkAccessType", "traits": { - "smithy.api#documentation": "

Specifies the VPC used for non-EFS traffic.

\n
    \n
  • \n

    \n PublicInternetOnly - Non-EFS traffic is through a VPC managed by Amazon SageMaker, which allows direct internet access.

    \n
  • \n
  • \n

    \n VpcOnly - All Studio traffic is through the specified VPC and\n subnets.

    \n
  • \n
\n

This configuration can only be modified if there are no apps in the\n InService, Pending, or Deleting state. The\n configuration cannot be updated if\n DomainSettings.RStudioServerProDomainSettings.DomainExecutionRoleArn is already\n set or DomainSettings.RStudioServerProDomainSettings.DomainExecutionRoleArn is\n provided as part of the same request.

" + "smithy.api#documentation": "

Specifies the VPC used for non-EFS traffic.

\n
    \n
  • \n

    \n PublicInternetOnly - Non-EFS traffic is through a VPC managed by Amazon SageMaker AI, which allows direct internet access.

    \n
  • \n
  • \n

    \n VpcOnly - All Studio traffic is through the specified VPC and\n subnets.

    \n
  • \n
\n

This configuration can only be modified if there are no apps in the\n InService, Pending, or Deleting state. The\n configuration cannot be updated if\n DomainSettings.RStudioServerProDomainSettings.DomainExecutionRoleArn is already\n set or DomainSettings.RStudioServerProDomainSettings.DomainExecutionRoleArn is\n provided as part of the same request.

" } }, "TagPropagation": { @@ -72872,7 +73047,7 @@ } ], "traits": { - "smithy.api#documentation": "

Updates the properties of a SageMaker image. To change the image's tags, use the\n AddTags and DeleteTags APIs.

" + "smithy.api#documentation": "

Updates the properties of a SageMaker AI image. To change the image's tags, use the\n AddTags and DeleteTags APIs.

" } }, "com.amazonaws.sagemaker#UpdateImageRequest": { @@ -72907,7 +73082,7 @@ "RoleArn": { "target": "com.amazonaws.sagemaker#RoleArn", "traits": { - "smithy.api#documentation": "

The new ARN for the IAM role that enables Amazon SageMaker to perform tasks on your behalf.

" + "smithy.api#documentation": "

The new ARN for the IAM role that enables Amazon SageMaker AI to perform tasks on your behalf.

" } } }, @@ -72946,7 +73121,7 @@ } ], "traits": { - "smithy.api#documentation": "

Updates the properties of a SageMaker image version.

" + "smithy.api#documentation": "

Updates the properties of a SageMaker AI image version.

" } }, "com.amazonaws.sagemaker#UpdateImageVersionRequest": { @@ -72993,7 +73168,7 @@ "JobType": { "target": "com.amazonaws.sagemaker#JobType", "traits": { - "smithy.api#documentation": "

Indicates SageMaker job type compatibility.

\n
    \n
  • \n

    \n TRAINING: The image version is compatible with SageMaker training jobs.

    \n
  • \n
  • \n

    \n INFERENCE: The image version is compatible with SageMaker inference jobs.

    \n
  • \n
  • \n

    \n NOTEBOOK_KERNEL: The image version is compatible with SageMaker notebook kernels.

    \n
  • \n
" + "smithy.api#documentation": "

Indicates SageMaker AI job type compatibility.

\n
    \n
  • \n

    \n TRAINING: The image version is compatible with SageMaker AI training jobs.

    \n
  • \n
  • \n

    \n INFERENCE: The image version is compatible with SageMaker AI inference jobs.

    \n
  • \n
  • \n

    \n NOTEBOOK_KERNEL: The image version is compatible with SageMaker AI notebook kernels.

    \n
  • \n
" } }, "MLFramework": { @@ -73677,7 +73852,7 @@ "RoleArn": { "target": "com.amazonaws.sagemaker#RoleArn", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the IAM role that SageMaker can assume to\n access the notebook instance. For more information, see SageMaker Roles.

\n \n

To be able to pass this role to SageMaker, the caller of this API must\n have the iam:PassRole permission.

\n
" + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the IAM role that SageMaker AI can assume to\n access the notebook instance. For more information, see SageMaker AI Roles.

\n \n

To be able to pass this role to SageMaker AI, the caller of this API must\n have the iam:PassRole permission.

\n
" } }, "LifecycleConfigName": { @@ -73695,19 +73870,19 @@ "VolumeSizeInGB": { "target": "com.amazonaws.sagemaker#NotebookInstanceVolumeSizeInGB", "traits": { - "smithy.api#documentation": "

The size, in GB, of the ML storage volume to attach to the notebook instance. The\n default value is 5 GB. ML storage volumes are encrypted, so SageMaker can't\n determine the amount of available free space on the volume. Because of this, you can\n increase the volume size when you update a notebook instance, but you can't decrease the\n volume size. If you want to decrease the size of the ML storage volume in use, create a\n new notebook instance with the desired size.

" + "smithy.api#documentation": "

The size, in GB, of the ML storage volume to attach to the notebook instance. The\n default value is 5 GB. ML storage volumes are encrypted, so SageMaker AI can't\n determine the amount of available free space on the volume. Because of this, you can\n increase the volume size when you update a notebook instance, but you can't decrease the\n volume size. If you want to decrease the size of the ML storage volume in use, create a\n new notebook instance with the desired size.

" } }, "DefaultCodeRepository": { "target": "com.amazonaws.sagemaker#CodeRepositoryNameOrUrl", "traits": { - "smithy.api#documentation": "

The Git repository to associate with the notebook instance as its default code\n repository. This can be either the name of a Git repository stored as a resource in your\n account, or the URL of a Git repository in Amazon Web Services CodeCommit\n or in any other Git repository. When you open a notebook instance, it opens in the\n directory that contains this repository. For more information, see Associating Git\n Repositories with SageMaker Notebook Instances.

" + "smithy.api#documentation": "

The Git repository to associate with the notebook instance as its default code\n repository. This can be either the name of a Git repository stored as a resource in your\n account, or the URL of a Git repository in Amazon Web Services CodeCommit\n or in any other Git repository. When you open a notebook instance, it opens in the\n directory that contains this repository. For more information, see Associating Git\n Repositories with SageMaker AI Notebook Instances.

" } }, "AdditionalCodeRepositories": { "target": "com.amazonaws.sagemaker#AdditionalCodeRepositoryNamesOrUrls", "traits": { - "smithy.api#documentation": "

An array of up to three Git repositories to associate with the notebook instance.\n These can be either the names of Git repositories stored as resources in your account,\n or the URL of Git repositories in Amazon Web Services CodeCommit\n or in any other Git repository. These repositories are cloned at the same level as the\n default repository of your notebook instance. For more information, see Associating Git\n Repositories with SageMaker Notebook Instances.

" + "smithy.api#documentation": "

An array of up to three Git repositories to associate with the notebook instance.\n These can be either the names of Git repositories stored as resources in your account,\n or the URL of Git repositories in Amazon Web Services CodeCommit\n or in any other Git repository. These repositories are cloned at the same level as the\n default repository of your notebook instance. For more information, see Associating Git\n Repositories with SageMaker AI Notebook Instances.

" } }, "AcceleratorTypes": { @@ -74138,7 +74313,7 @@ } ], "traits": { - "smithy.api#documentation": "

Updates the settings of a space.

" + "smithy.api#documentation": "

Updates the settings of a space.

\n \n

You can't edit the app type of a space in the SpaceSettings.

\n
" } }, "com.amazonaws.sagemaker#UpdateSpaceRequest": { @@ -74822,13 +74997,13 @@ "SecurityGroups": { "target": "com.amazonaws.sagemaker#SecurityGroupIds", "traits": { - "smithy.api#documentation": "

The security groups for the Amazon Virtual Private Cloud (VPC) that the domain uses for\n communication.

\n

Optional when the CreateDomain.AppNetworkAccessType parameter is set to\n PublicInternetOnly.

\n

Required when the CreateDomain.AppNetworkAccessType parameter is set to\n VpcOnly, unless specified as part of the DefaultUserSettings for\n the domain.

\n

Amazon SageMaker adds a security group to allow NFS traffic from Amazon SageMaker Studio. Therefore, the number of security groups that you can specify is one less than the\n maximum number shown.

\n

SageMaker applies these settings only to private spaces that the user creates in the domain. SageMaker doesn't apply these settings to shared spaces.

" + "smithy.api#documentation": "

The security groups for the Amazon Virtual Private Cloud (VPC) that the domain uses for\n communication.

\n

Optional when the CreateDomain.AppNetworkAccessType parameter is set to\n PublicInternetOnly.

\n

Required when the CreateDomain.AppNetworkAccessType parameter is set to\n VpcOnly, unless specified as part of the DefaultUserSettings for\n the domain.

\n

Amazon SageMaker AI adds a security group to allow NFS traffic from Amazon SageMaker AI Studio. Therefore, the number of security groups that you can specify is one less than the\n maximum number shown.

\n

SageMaker applies these settings only to private spaces that the user creates in the domain. SageMaker doesn't apply these settings to shared spaces.

" } }, "SharingSettings": { "target": "com.amazonaws.sagemaker#SharingSettings", "traits": { - "smithy.api#documentation": "

Specifies options for sharing Amazon SageMaker Studio notebooks.

" + "smithy.api#documentation": "

Specifies options for sharing Amazon SageMaker AI Studio notebooks.

" } }, "JupyterServerAppSettings": { @@ -74906,7 +75081,7 @@ "CustomFileSystemConfigs": { "target": "com.amazonaws.sagemaker#CustomFileSystemConfigs", "traits": { - "smithy.api#documentation": "

The settings for assigning a custom file system to a user profile. Permitted users can\n access this file system in Amazon SageMaker Studio.

\n

SageMaker applies these settings only to private spaces that the user creates in the domain. SageMaker doesn't apply these settings to shared spaces.

" + "smithy.api#documentation": "

The settings for assigning a custom file system to a user profile. Permitted users can\n access this file system in Amazon SageMaker AI Studio.

\n

SageMaker applies these settings only to private spaces that the user creates in the domain. SageMaker doesn't apply these settings to shared spaces.

" } }, "StudioWebPortalSettings": { diff --git a/models/securityhub.json b/models/securityhub.json index 86c6b6c2b4..bb61de1e91 100644 --- a/models/securityhub.json +++ b/models/securityhub.json @@ -434,7 +434,7 @@ } }, "traits": { - "smithy.api#documentation": "

\n Information about the threat actor identified in an Amazon GuardDuty Extended Threat Detection attack sequence. GuardDuty generates an attack \n sequence finding when multiple events align to a potentially suspicious activity. To receive GuardDuty attack sequence findings in Security Hub, you \n\t\t\t\tmust have GuardDuty and GuardDuty S3 Protection enabled. For more information, see GuardDuty Extended Threat Detection in the Amazon GuardDuty User Guide.

" + "smithy.api#documentation": "

\n Information about the threat actor identified in an Amazon GuardDuty Extended Threat Detection attack sequence. GuardDuty generates an attack \n sequence finding when multiple events align to a potentially suspicious activity. To receive GuardDuty attack sequence findings in Security Hub, you \n\t\t\t\tmust have GuardDuty enabled. For more information, see GuardDuty Extended Threat Detection in the Amazon GuardDuty User Guide.

" } }, "com.amazonaws.securityhub#ActorSession": { @@ -466,7 +466,7 @@ } }, "traits": { - "smithy.api#documentation": "

\n Contains information about the authenticated session used by the threat actor identified in an Amazon GuardDuty Extended Threat Detection attack sequence. GuardDuty generates an attack \n sequence finding when multiple events align to a potentially suspicious activity. To receive GuardDuty attack sequence findings in Security Hub, you \n\t\t\t\tmust have GuardDuty and GuardDuty S3 Protection enabled. For more information, see GuardDuty Extended Threat Detection in the Amazon GuardDuty User Guide.

" + "smithy.api#documentation": "

\n Contains information about the authenticated session used by the threat actor identified in an Amazon GuardDuty Extended Threat Detection attack sequence. GuardDuty generates an attack \n sequence finding when multiple events align to a potentially suspicious activity. To receive GuardDuty attack sequence findings in Security Hub, you \n\t\t\t\tmust have GuardDuty enabled. For more information, see GuardDuty Extended Threat Detection in the Amazon GuardDuty User Guide.

" } }, "com.amazonaws.securityhub#ActorSessionMfaStatus": { @@ -521,7 +521,7 @@ } }, "traits": { - "smithy.api#documentation": "

\n Contains information about the credentials used by the threat actor identified in an Amazon GuardDuty Extended Threat Detection attack sequence. GuardDuty generates an attack \n sequence finding when multiple events align to a potentially suspicious activity. To receive GuardDuty attack sequence findings in Security Hub, you \n\t\t\t\tmust have GuardDuty and GuardDuty S3 Protection enabled. For more information, see GuardDuty Extended Threat Detection in the Amazon GuardDuty User Guide.

" + "smithy.api#documentation": "

\n Contains information about the credentials used by the threat actor identified in an Amazon GuardDuty Extended Threat Detection attack sequence. GuardDuty generates an attack \n sequence finding when multiple events align to a potentially suspicious activity. To receive GuardDuty attack sequence findings in Security Hub, you \n\t\t\t\tmust have GuardDuty enabled. For more information, see GuardDuty Extended Threat Detection in the Amazon GuardDuty User Guide.

" } }, "com.amazonaws.securityhub#ActorsList": { @@ -890,13 +890,13 @@ "CreatedAt": { "target": "com.amazonaws.securityhub#Timestamp", "traits": { - "smithy.api#documentation": "

\n A timestamp that indicates when the rule was created.\n

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

\n A timestamp that indicates when the rule was created.\n

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "UpdatedAt": { "target": "com.amazonaws.securityhub#Timestamp", "traits": { - "smithy.api#documentation": "

\n A timestamp that indicates when the rule was most recently updated.\n

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

\n A timestamp that indicates when the rule was most recently updated.\n

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "CreatedBy": { @@ -1005,25 +1005,25 @@ "FirstObservedAt": { "target": "com.amazonaws.securityhub#DateFilterList", "traits": { - "smithy.api#documentation": "

\n A timestamp that indicates when the potential security issue captured by a \n finding was first observed by the security findings product.\n

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
\n

\n \t\tArray Members: Minimum number of 1 item. Maximum number of 20 items.\n \t

" + "smithy.api#documentation": "

\n A timestamp that indicates when the potential security issue captured by a \n finding was first observed by the security findings product.\n

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

\n

\n \t\tArray Members: Minimum number of 1 item. Maximum number of 20 items.\n \t

" } }, "LastObservedAt": { "target": "com.amazonaws.securityhub#DateFilterList", "traits": { - "smithy.api#documentation": "

\n A timestamp that indicates when the potential security issue captured by a finding \n was most recently observed by the security findings product.\n

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
\n

\n \t\tArray Members: Minimum number of 1 item. Maximum number of 20 items.\n \t

" + "smithy.api#documentation": "

\n A timestamp that indicates when the security findings provider most recently observed a change in the resource that is involved in the finding.\n

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

\n

\n \t\tArray Members: Minimum number of 1 item. Maximum number of 20 items.\n \t

" } }, "CreatedAt": { "target": "com.amazonaws.securityhub#DateFilterList", "traits": { - "smithy.api#documentation": "

\n A timestamp that indicates when this finding record was created.\n

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
\n

\n \t\tArray Members: Minimum number of 1 item. Maximum number of 20 items.\n \t

" + "smithy.api#documentation": "

\n A timestamp that indicates when this finding record was created.\n

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

\n

\n \t\tArray Members: Minimum number of 1 item. Maximum number of 20 items.\n \t

" } }, "UpdatedAt": { "target": "com.amazonaws.securityhub#DateFilterList", "traits": { - "smithy.api#documentation": "

\n A timestamp that indicates when the finding record was most recently updated. \n

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
\n

\n \t\tArray Members: Minimum number of 1 item. Maximum number of 20 items.\n \t

" + "smithy.api#documentation": "

\n A timestamp that indicates when the finding record was most recently updated. \n

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

\n

\n \t\tArray Members: Minimum number of 1 item. Maximum number of 20 items.\n \t

" } }, "Confidence": { @@ -1167,7 +1167,7 @@ "NoteUpdatedAt": { "target": "com.amazonaws.securityhub#DateFilterList", "traits": { - "smithy.api#documentation": "

\n The timestamp of when the note was updated.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
\n

\n \t\tArray Members: Minimum number of 1 item. Maximum number of 20 items.\n \t

" + "smithy.api#documentation": "

\n The timestamp of when the note was updated.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

\n

\n \t\tArray Members: Minimum number of 1 item. Maximum number of 20 items.\n \t

" } }, "NoteUpdatedBy": { @@ -1247,13 +1247,13 @@ "CreatedAt": { "target": "com.amazonaws.securityhub#Timestamp", "traits": { - "smithy.api#documentation": "

\n A timestamp that indicates when the rule was created.\n

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

\n A timestamp that indicates when the rule was created.\n

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "UpdatedAt": { "target": "com.amazonaws.securityhub#Timestamp", "traits": { - "smithy.api#documentation": "

\n A timestamp that indicates when the rule was most recently updated.\n

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

\n A timestamp that indicates when the rule was most recently updated.\n

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "CreatedBy": { @@ -1655,13 +1655,13 @@ "FirstSeen": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

A timestamp that indicates when the API call was first\n observed.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

A timestamp that indicates when the API call was first\n observed.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "LastSeen": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

A timestamp that indicates when the API call was most recently\n observed.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

A timestamp that indicates when the API call was most recently\n observed.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } } }, @@ -1859,7 +1859,7 @@ "CreatedDate": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

Indicates when the API was created.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

Indicates when the API was created.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "Version": { @@ -1981,13 +1981,13 @@ "CreatedDate": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

Indicates when the stage was created.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

Indicates when the stage was created.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "LastUpdatedDate": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

Indicates when the stage was most recently updated.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

Indicates when the stage was most recently updated.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "WebAclArn": { @@ -2025,7 +2025,7 @@ "CreatedDate": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

Indicates when the API was created.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

Indicates when the API was created.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "Description": { @@ -2119,7 +2119,7 @@ "CreatedDate": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

Indicates when the stage was created.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

Indicates when the stage was created.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "Description": { @@ -2143,7 +2143,7 @@ "LastUpdatedDate": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

Indicates when the stage was most recently updated.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

Indicates when the stage was most recently updated.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "RouteSettings": { @@ -2557,7 +2557,7 @@ "CreatedTime": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

Indicates when the auto scaling group was created.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

Indicates when the auto scaling group was created.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "MixedInstancesPolicy": { @@ -2863,7 +2863,7 @@ "CreatedTime": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

The creation date and time for the launch configuration.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

The creation date and time for the launch configuration.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "EbsOptimized": { @@ -3457,7 +3457,7 @@ "CreatedAt": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

Indicates when the certificate was requested.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

Indicates when the certificate was requested.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "DomainName": { @@ -3487,7 +3487,7 @@ "ImportedAt": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

Indicates when the certificate was imported. Provided if the certificate type is\n IMPORTED.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

Indicates when the certificate was imported. Provided if the certificate type is\n IMPORTED.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "InUseBy": { @@ -3499,7 +3499,7 @@ "IssuedAt": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

Indicates when the certificate was issued. Provided if the certificate type is\n AMAZON_ISSUED.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

Indicates when the certificate was issued. Provided if the certificate type is\n AMAZON_ISSUED.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "Issuer": { @@ -3523,13 +3523,13 @@ "NotAfter": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

The time after which the certificate becomes invalid.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

The time after which the certificate becomes invalid.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "NotBefore": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

The time before which the certificate is not valid.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

The time before which the certificate is not valid.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "Options": { @@ -3725,7 +3725,7 @@ "UpdatedAt": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

Indicates when the renewal summary was last updated.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

Indicates when the renewal summary was last updated.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } } }, @@ -3987,7 +3987,7 @@ "LastModifiedTime": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

Indicates when that the distribution was last modified.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

Indicates when that the distribution was last modified.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "Logging": { @@ -5311,7 +5311,7 @@ "LastUpdateToPayPerRequestDateTime": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

If the billing mode is PAY_PER_REQUEST, indicates when the billing mode was\n set to that value.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

If the billing mode is PAY_PER_REQUEST, indicates when the billing mode was\n set to that value.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } } }, @@ -5337,7 +5337,7 @@ "CreationDateTime": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

Indicates when the table was created.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

Indicates when the table was created.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "GlobalSecondaryIndexes": { @@ -5605,13 +5605,13 @@ "LastDecreaseDateTime": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

Indicates when the provisioned throughput was last decreased.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

Indicates when the provisioned throughput was last decreased.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "LastIncreaseDateTime": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

Indicates when the provisioned throughput was last increased.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

Indicates when the provisioned throughput was last increased.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "NumberOfDecreasesToday": { @@ -5745,7 +5745,7 @@ "RestoreDateTime": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

Indicates the point in time that the table was restored to.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

Indicates the point in time that the table was restored to.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "RestoreInProgress": { @@ -5765,7 +5765,7 @@ "InaccessibleEncryptionDateTime": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

If the key is inaccessible, the date and time when DynamoDB detected that the key was\n inaccessible.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

If the key is inaccessible, the date and time when DynamoDB detected that the key was\n inaccessible.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "Status": { @@ -6215,7 +6215,7 @@ "LaunchedAt": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

Indicates when the instance was launched.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

Indicates when the instance was launched.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "NetworkInterfaces": { @@ -7685,7 +7685,7 @@ "AttachTime": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

Indicates when the attachment initiated.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

Indicates when the attachment initiated.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "AttachmentId": { @@ -8321,7 +8321,7 @@ "CreateTime": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

Indicates when the volume was created.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

Indicates when the volume was created.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "DeviceName": { @@ -8871,7 +8871,7 @@ "LastStatusChange": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

The date and time of the last change in status.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

The date and time of the last change in status.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "OutsideIpAddress": { @@ -8939,7 +8939,7 @@ "ImagePublishedAt": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

The date and time when the image was pushed to the repository.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

The date and time when the image was pushed to the repository.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } } }, @@ -11948,7 +11948,7 @@ "CreatedTime": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

Indicates when the load balancer was created.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

Indicates when the load balancer was created.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "DnsName": { @@ -12240,7 +12240,7 @@ "CreatedTime": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

Indicates when the load balancer was created.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

Indicates when the load balancer was created.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "DNSName": { @@ -12785,7 +12785,7 @@ "CreatedAt": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

Indicates when the IAM access key was created.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

Indicates when the IAM access key was created.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "PrincipalId": { @@ -12861,7 +12861,7 @@ "CreationDate": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

Indicates when the session was created.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

Indicates when the session was created.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } } }, @@ -12962,7 +12962,7 @@ "CreateDate": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

Indicates when the IAM group was created.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

Indicates when the IAM group was created.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "GroupId": { @@ -13026,7 +13026,7 @@ "CreateDate": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

Indicates when the instance profile was created.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

Indicates when the instance profile was created.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "InstanceProfileId": { @@ -13082,7 +13082,7 @@ "CreateDate": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

Indicates when the role was created.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

Indicates when the role was created.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "Path": { @@ -13146,7 +13146,7 @@ "CreateDate": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

When the policy was created.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

When the policy was created.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "DefaultVersionId": { @@ -13200,7 +13200,7 @@ "UpdateDate": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

When the policy was most recently updated.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

When the policy was most recently updated.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } } }, @@ -13226,7 +13226,7 @@ "CreateDate": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

Indicates when the version was created.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

Indicates when the version was created.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } } }, @@ -13268,7 +13268,7 @@ "CreateDate": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

Indicates when the role was created.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

Indicates when the role was created.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "InstanceProfileList": { @@ -13347,7 +13347,7 @@ "CreateDate": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

Indicates when the user was created.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

Indicates when the user was created.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "GroupList": { @@ -13481,7 +13481,7 @@ "CreationDate": { "target": "com.amazonaws.securityhub#Double", "traits": { - "smithy.api#documentation": "

Indicates when the KMS key was created.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

Indicates when the KMS key was created.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "KeyId": { @@ -13619,7 +13619,7 @@ "LastModified": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

Indicates when the function was last updated.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

Indicates when the function was last updated.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "Layers": { @@ -13823,7 +13823,7 @@ "CreatedDate": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

Indicates when the version was created.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

Indicates when the version was created.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } } }, @@ -14846,7 +14846,7 @@ "ClusterCreateTime": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

Indicates when the DB cluster was created, in Universal Coordinated Time (UTC).

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

Indicates when the DB cluster was created, in Universal Coordinated Time (UTC).

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "EnabledCloudWatchLogsExports": { @@ -15046,7 +15046,7 @@ "SnapshotCreateTime": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

Indicates when the snapshot was taken.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

Indicates when the snapshot was taken.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "Engine": { @@ -15082,7 +15082,7 @@ "ClusterCreateTime": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

Indicates when the DB cluster was created, in Universal Coordinated Time (UTC).

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

Indicates when the DB cluster was created, in Universal Coordinated Time (UTC).

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "MasterUsername": { @@ -15310,7 +15310,7 @@ "InstanceCreateTime": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

Indicates when the DB instance was created.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

Indicates when the DB instance was created.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "KmsKeyId": { @@ -15424,7 +15424,7 @@ "LatestRestorableTime": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

Specifies the latest time to which a database can be restored with point-in-time\n restore.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

Specifies the latest time to which a database can be restored with point-in-time\n restore.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "AutoMinorVersionUpgrade": { @@ -16273,7 +16273,7 @@ "SubscriptionCreationTime": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

The datetime when the event notification subscription was created.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

The datetime when the event notification subscription was created.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } } }, @@ -16461,7 +16461,7 @@ "DeferMaintenanceEndTime": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

The end of the time window for which maintenance was deferred.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

The end of the time window for which maintenance was deferred.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "DeferMaintenanceIdentifier": { @@ -16473,7 +16473,7 @@ "DeferMaintenanceStartTime": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

The start of the time window for which maintenance was deferred.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

The start of the time window for which maintenance was deferred.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } } }, @@ -16517,7 +16517,7 @@ "ClusterCreateTime": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

Indicates when the cluster was created.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

Indicates when the cluster was created.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "ClusterIdentifier": { @@ -16625,7 +16625,7 @@ "ExpectedNextSnapshotScheduleTime": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

Indicates when the next snapshot is expected to be taken. The cluster must have a valid\n snapshot schedule and have backups enabled.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

Indicates when the next snapshot is expected to be taken. The cluster must have a valid\n snapshot schedule and have backups enabled.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "ExpectedNextSnapshotScheduleTimeStatus": { @@ -16673,7 +16673,7 @@ "NextMaintenanceWindowStartTime": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

Indicates the start of the next maintenance window.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

Indicates the start of the next maintenance window.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "NodeType": { @@ -16869,13 +16869,13 @@ "LastFailureTime": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

The last time when logs failed to be delivered.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

The last time when logs failed to be delivered.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "LastSuccessfulDeliveryTime": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

The last time that logs were delivered successfully.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

The last time that logs were delivered successfully.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "LoggingEnabled": { @@ -17316,7 +17316,7 @@ "ExpirationDate": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

The date when objects are moved or deleted.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

The date when objects are moved or deleted.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "ExpirationInDays": { @@ -17534,7 +17534,7 @@ "Date": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

A date on which to transition objects to the specified storage class. If you provide Date, you cannot provide Days.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

A date on which to transition objects to the specified storage class. If you provide Date, you cannot provide Days.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "Days": { @@ -17604,7 +17604,7 @@ "CreatedAt": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

Indicates when the S3 bucket was created.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

Indicates when the S3 bucket was created.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "ServerSideEncryptionConfiguration": { @@ -18077,7 +18077,7 @@ "LastModified": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

Indicates when the object was last modified.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

Indicates when the object was last modified.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "ETag": { @@ -18127,19 +18127,19 @@ "AdditionalCodeRepositories": { "target": "com.amazonaws.securityhub#NonEmptyStringList", "traits": { - "smithy.api#documentation": "

\n An array of up to three Git repositories associated with the notebook instance. These can be either the names of Git repositories stored as resources in your account, or the URL of Git repositories in CodeCommit or in any other Git repository. \n These repositories are cloned at the same level as the default repository of your notebook instance. For more information, see Associating Git repositories with SageMaker notebook instances in the Amazon SageMaker Developer Guide.\n

" + "smithy.api#documentation": "

\n An array of up to three Git repositories associated with the notebook instance. These can be either the names of Git repositories stored as resources in your account, or the URL of Git repositories in CodeCommit or in any other Git repository. \n These repositories are cloned at the same level as the default repository of your notebook instance. For more information, see Associating Git repositories with SageMaker AI notebook instances in the Amazon SageMaker AI Developer Guide.\n

" } }, "DefaultCodeRepository": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

\n The Git repository associated with the notebook instance as its default code repository. This can be either the name of a Git repository stored as a resource in your account, or the URL of a Git repository in CodeCommit or in any other Git repository. \n When you open a notebook instance, it opens in the directory that contains this repository. For more information, see Associating Git repositories with SageMaker notebook instances in the Amazon SageMaker Developer Guide.\n

" + "smithy.api#documentation": "

\n The Git repository associated with the notebook instance as its default code repository. This can be either the name of a Git repository stored as a resource in your account, or the URL of a Git repository in CodeCommit or in any other Git repository. \n When you open a notebook instance, it opens in the directory that contains this repository. For more information, see Associating Git repositories with SageMaker AI notebook instances in the Amazon SageMaker AI Developer Guide.\n

" } }, "DirectInternetAccess": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

\n Sets whether SageMaker provides internet access to the notebook instance. If you set this to Disabled, this notebook instance is able to access resources only in your VPC, and is not be able to connect to SageMaker training and endpoint services unless you configure a Network Address Translation (NAT) Gateway in your VPC.\n

" + "smithy.api#documentation": "

\n Sets whether SageMaker AI provides internet access to the notebook instance. If you set this to Disabled, this notebook instance is able to access resources only in your VPC, and is not be able to connect to SageMaker AI training and endpoint services unless you configure a Network Address Translation (NAT) Gateway in your VPC.\n

" } }, "FailureReason": { @@ -18163,13 +18163,13 @@ "KmsKeyId": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

\n The Amazon Resource Name (ARN) of an Key Management Service (KMS) key that SageMaker uses to encrypt data on the storage volume attached to your notebook instance. The KMS key you provide must be enabled. For information, see \n Enabling and disabling keys in the Key Management Service Developer Guide.\n

" + "smithy.api#documentation": "

\n The Amazon Resource Name (ARN) of an Key Management Service (KMS) key that SageMaker AI uses to encrypt data on the storage volume attached to your notebook instance. The KMS key you provide must be enabled. For information, see \n Enabling and disabling keys in the Key Management Service Developer Guide.\n

" } }, "NetworkInterfaceId": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

\n The network interface ID that SageMaker created when the instance was created.\n

" + "smithy.api#documentation": "

\n The network interface ID that SageMaker AI created when the instance was created.\n

" } }, "NotebookInstanceArn": { @@ -18240,7 +18240,7 @@ } }, "traits": { - "smithy.api#documentation": "

\n Provides details about an Amazon SageMaker notebook instance.\n

" + "smithy.api#documentation": "

\n Provides details about an Amazon SageMaker AI notebook instance.\n

" } }, "com.amazonaws.securityhub#AwsSageMakerNotebookInstanceMetadataServiceConfigurationDetails": { @@ -18397,20 +18397,20 @@ "FirstObservedAt": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

Indicates when the security findings provider first observed the potential security\n issue that a finding captured.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

Indicates when the security findings provider first observed the potential security\n issue that a finding captured.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "LastObservedAt": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

Indicates when the security findings provider most recently observed the potential\n security issue that a finding captured.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

Indicates when the security findings provider most recently observed a change in the resource that is involved in the finding.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "CreatedAt": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

Indicates when the security findings provider created the potential security issue that\n a finding captured.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
", + "smithy.api#documentation": "

Indicates when the security findings provider created the potential security issue that\n a finding captured.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

", "smithy.api#required": {} } }, @@ -18418,7 +18418,7 @@ "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

Indicates when the security findings provider last updated the finding record.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
", + "smithy.api#documentation": "

Indicates when the security findings provider last updated the finding record.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

", "smithy.api#required": {} } }, @@ -18605,7 +18605,7 @@ "ProcessedAt": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

A timestamp that indicates when Security Hub received a finding and begins to process it.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

A timestamp that indicates when Security Hub received a finding and begins to process it.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "AwsAccountName": { @@ -18617,7 +18617,7 @@ "Detection": { "target": "com.amazonaws.securityhub#Detection", "traits": { - "smithy.api#documentation": "

\n Provides details about an Amazon GuardDuty Extended Threat Detection attack sequence. GuardDuty generates an attack \n sequence finding when multiple events align to a potentially suspicious activity. To receive GuardDuty attack sequence findings in Security Hub, you \n\t\t\t\tmust have GuardDuty and GuardDuty S3 Protection enabled. For more information, see GuardDuty Extended Threat Detection in the Amazon GuardDuty User Guide.\n

" + "smithy.api#documentation": "

\n Provides details about an Amazon GuardDuty Extended Threat Detection attack sequence. GuardDuty generates an attack \n sequence finding when multiple events align to a potentially suspicious activity. To receive GuardDuty attack sequence findings in Security Hub, you \n\t\t\t\tmust have GuardDuty enabled. For more information, see GuardDuty Extended Threat Detection in the Amazon GuardDuty User Guide.\n

" } } }, @@ -18667,25 +18667,25 @@ "FirstObservedAt": { "target": "com.amazonaws.securityhub#DateFilterList", "traits": { - "smithy.api#documentation": "

A timestamp that indicates when the security findings provider first\n observed the potential security issue that a finding captured.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

A timestamp that indicates when the security findings provider first\n observed the potential security issue that a finding captured.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "LastObservedAt": { "target": "com.amazonaws.securityhub#DateFilterList", "traits": { - "smithy.api#documentation": "

A timestamp that indicates when the security findings provider most\n recently observed the potential security issue that a finding captured.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

A timestamp that indicates when the security findings provider most recently observed a change in the resource that is involved in the finding.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "CreatedAt": { "target": "com.amazonaws.securityhub#DateFilterList", "traits": { - "smithy.api#documentation": "

A timestamp that indicates when the security findings provider\n created the potential security issue that a finding reflects.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

A timestamp that indicates when the security findings provider\n created the potential security issue that a finding reflects.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "UpdatedAt": { "target": "com.amazonaws.securityhub#DateFilterList", "traits": { - "smithy.api#documentation": "

A timestamp that indicates when the security findings provider last\n updated the finding record.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

A timestamp that indicates when the security findings provider last\n updated the finding record.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "SeverityProduct": { @@ -18889,13 +18889,13 @@ "ProcessLaunchedAt": { "target": "com.amazonaws.securityhub#DateFilterList", "traits": { - "smithy.api#documentation": "

A timestamp that identifies when the process was launched.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

A timestamp that identifies when the process was launched.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "ProcessTerminatedAt": { "target": "com.amazonaws.securityhub#DateFilterList", "traits": { - "smithy.api#documentation": "

A timestamp that identifies when the process was terminated.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

A timestamp that identifies when the process was terminated.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "ThreatIntelIndicatorType": { @@ -18919,7 +18919,7 @@ "ThreatIntelIndicatorLastObservedAt": { "target": "com.amazonaws.securityhub#DateFilterList", "traits": { - "smithy.api#documentation": "

A timestamp that identifies the last observation of a threat intelligence indicator.

" + "smithy.api#documentation": "

A timestamp that identifies the last observation of a threat intelligence indicator.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "ThreatIntelIndicatorSource": { @@ -19084,7 +19084,7 @@ "ResourceContainerLaunchedAt": { "target": "com.amazonaws.securityhub#DateFilterList", "traits": { - "smithy.api#documentation": "

A timestamp that identifies when the container was started.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

A timestamp that identifies when the container was started.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "ResourceDetailsOther": { @@ -22636,7 +22636,7 @@ "LaunchedAt": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

Indicates when the container started.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

Indicates when the container started.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "VolumeMounts": { @@ -23628,13 +23628,13 @@ "Start": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

A timestamp that provides the start date for the date filter.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

A timestamp that provides the start date for the date filter.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "End": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

A timestamp that provides the end date for the date filter.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

A timestamp that provides the end date for the date filter.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "DateRange": { @@ -24399,7 +24399,7 @@ "AutoEnableControls": { "target": "com.amazonaws.securityhub#Boolean", "traits": { - "smithy.api#documentation": "

Whether to automatically enable new controls when they are added to standards that are\n enabled.

\n

If set to true, then new controls for enabled standards are enabled\n automatically. If set to false, then new controls are not enabled.

" + "smithy.api#documentation": "

Whether to automatically enable new controls when they are added to standards that are\n enabled.

\n

If set to true, then new controls for enabled standards are enabled\n automatically. If set to false, then new controls are not enabled.

\n

When you automatically enable new controls, you can interact with the controls in \n the console and programmatically immediately after release. However, automatically enabled controls have a temporary default status of \n DISABLED. It can take up to several days for Security Hub to process the control release and designate the \n control as ENABLED in your account. During the processing period, you can manually enable or disable a \n control, and Security Hub will maintain that designation regardless of whether you have AutoEnableControls set to \n true.

" } }, "ControlFindingGenerator": { @@ -24826,7 +24826,7 @@ } }, "traits": { - "smithy.api#documentation": "

\n A top-level object field that provides details about an Amazon GuardDuty Extended Threat Detection attack sequence. GuardDuty generates an attack \n sequence finding when multiple events align to a potentially suspicious activity. To receive GuardDuty attack sequence findings in Security Hub, you \n\t\t\t\tmust have GuardDuty and GuardDuty S3 Protection enabled. For more information, see GuardDuty Extended Threat Detection in the Amazon GuardDuty User Guide.

" + "smithy.api#documentation": "

\n A top-level object field that provides details about an Amazon GuardDuty Extended Threat Detection attack sequence. GuardDuty generates an attack \n sequence finding when multiple events align to a potentially suspicious activity. To receive GuardDuty attack sequence findings in Security Hub, you \n\t\t\t\tmust have GuardDuty enabled. For more information, see GuardDuty Extended Threat Detection in the Amazon GuardDuty User Guide.

" } }, "com.amazonaws.securityhub#DisableImportFindingsForProduct": { @@ -25608,7 +25608,7 @@ "UpdateTime": { "target": "com.amazonaws.securityhub#Timestamp", "traits": { - "smithy.api#documentation": "

A timestamp that indicates when Security Hub \n processed the updated finding record.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

A timestamp that indicates when Security Hub \n processed the updated finding record.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "FindingCreated": { @@ -26508,13 +26508,13 @@ "StartTime": { "target": "com.amazonaws.securityhub#Timestamp", "traits": { - "smithy.api#documentation": "

A timestamp that indicates the start time of the requested finding history.

\n

If you provide values for both StartTime and EndTime,\n Security Hub returns finding history for the specified time period. If you\n provide a value for StartTime but not for EndTime, Security Hub returns finding history from the StartTime to the time at\n which the API is called. If you provide a value for EndTime but not for\n StartTime, Security Hub returns finding history from the CreatedAt timestamp of the finding to the EndTime. If you\n provide neither StartTime nor EndTime, Security Hub\n returns finding history from the CreatedAt timestamp of the finding to the time at which\n the API is called. In all of these scenarios, the response is limited to 100 results, and the maximum time period is \n limited to 90 days.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

A timestamp that indicates the start time of the requested finding history.

\n

If you provide values for both StartTime and EndTime,\n Security Hub returns finding history for the specified time period. If you\n provide a value for StartTime but not for EndTime, Security Hub returns finding history from the StartTime to the time at\n which the API is called. If you provide a value for EndTime but not for\n StartTime, Security Hub returns finding history from the CreatedAt timestamp of the finding to the EndTime. If you\n provide neither StartTime nor EndTime, Security Hub\n returns finding history from the CreatedAt timestamp of the finding to the time at which\n the API is called. In all of these scenarios, the response is limited to 100 results, and the maximum time period is \n limited to 90 days.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "EndTime": { "target": "com.amazonaws.securityhub#Timestamp", "traits": { - "smithy.api#documentation": "

\n An ISO 8601-formatted timestamp that indicates the end time of the requested finding history.

\n

If you provide values for both StartTime and EndTime,\n Security Hub returns finding history for the specified time period. If you\n provide a value for StartTime but not for EndTime, Security Hub returns finding history from the StartTime to the time at\n which the API is called. If you provide a value for EndTime but not for\n StartTime, Security Hub returns finding history from the CreatedAt timestamp of the finding to the EndTime. If you\n provide neither StartTime nor EndTime, Security Hub\n returns finding history from the CreatedAt timestamp of the finding to the time at which\n the API is called. In all of these scenarios, the response is limited to 100 results, and the maximum time period is \n limited to 90 days.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

\n An ISO 8601-formatted timestamp that indicates the end time of the requested finding history.

\n

If you provide values for both StartTime and EndTime,\n Security Hub returns finding history for the specified time period. If you\n provide a value for StartTime but not for EndTime, Security Hub returns finding history from the StartTime to the time at\n which the API is called. If you provide a value for EndTime but not for\n StartTime, Security Hub returns finding history from the CreatedAt timestamp of the finding to the EndTime. If you\n provide neither StartTime nor EndTime, Security Hub\n returns finding history from the CreatedAt timestamp of the finding to the time at which\n the API is called. In all of these scenarios, the response is limited to 100 results, and the maximum time period is \n limited to 90 days.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "NextToken": { @@ -29317,7 +29317,7 @@ } }, "traits": { - "smithy.api#documentation": "

\n Contains information about the Autonomous System (AS) of the network \n endpoints involved in an Amazon GuardDuty Extended Threat Detection attack sequence. GuardDuty generates an attack \n sequence finding when multiple events align to a potentially suspicious activity. To receive GuardDuty attack sequence findings in Security Hub, you \n\t\t\t\tmust have GuardDuty and GuardDuty S3 Protection enabled. For more information, see GuardDuty Extended Threat Detection in the Amazon GuardDuty User Guide.\n

" + "smithy.api#documentation": "

\n Contains information about the Autonomous System (AS) of the network \n endpoints involved in an Amazon GuardDuty Extended Threat Detection attack sequence. GuardDuty generates an attack \n sequence finding when multiple events align to a potentially suspicious activity. To receive GuardDuty attack sequence findings in Security Hub, you \n\t\t\t\tmust have GuardDuty enabled. For more information, see GuardDuty Extended Threat Detection in the Amazon GuardDuty User Guide.\n

" } }, "com.amazonaws.securityhub#NetworkConnection": { @@ -29331,7 +29331,7 @@ } }, "traits": { - "smithy.api#documentation": "

\n Contains information about the network connection involved in an Amazon GuardDuty Extended Threat Detection attack sequence. GuardDuty generates an attack \n sequence finding when multiple events align to a potentially suspicious activity. To receive GuardDuty attack sequence findings in Security Hub, you \n\t\t\t\tmust have GuardDuty and GuardDuty S3 Protection enabled. For more information, see GuardDuty Extended Threat Detection in the Amazon GuardDuty User Guide.\n

" + "smithy.api#documentation": "

\n Contains information about the network connection involved in an Amazon GuardDuty Extended Threat Detection attack sequence. GuardDuty generates an attack \n sequence finding when multiple events align to a potentially suspicious activity. To receive GuardDuty attack sequence findings in Security Hub, you \n\t\t\t\tmust have GuardDuty enabled. For more information, see GuardDuty Extended Threat Detection in the Amazon GuardDuty User Guide.\n

" } }, "com.amazonaws.securityhub#NetworkConnectionAction": { @@ -29442,7 +29442,7 @@ } }, "traits": { - "smithy.api#documentation": "

\n Contains information about network endpoints involved in an Amazon GuardDuty Extended Threat Detection attack sequence. GuardDuty generates an attack \n sequence finding when multiple events align to a potentially suspicious activity. To receive GuardDuty attack sequence findings in Security Hub, you \n\t\t\t\tmust have GuardDuty and GuardDuty S3 Protection enabled. For more information, see GuardDuty Extended Threat Detection in the Amazon GuardDuty User Guide.\n

\n

This field can provide information about the network endpoints associated with the resource in the attack sequence finding, \nor about a specific network endpoint used for the attack.

" + "smithy.api#documentation": "

\n Contains information about network endpoints involved in an Amazon GuardDuty Extended Threat Detection attack sequence. GuardDuty generates an attack \n sequence finding when multiple events align to a potentially suspicious activity. To receive GuardDuty attack sequence findings in Security Hub, you \n\t\t\t\tmust have GuardDuty enabled. For more information, see GuardDuty Extended Threat Detection in the Amazon GuardDuty User Guide.\n

\n

This field can provide information about the network endpoints associated with the resource in the attack sequence finding, \nor about a specific network endpoint used for the attack.

" } }, "com.amazonaws.securityhub#NetworkEndpointsList": { @@ -29486,7 +29486,7 @@ } }, "traits": { - "smithy.api#documentation": "

\n Contains information about the location of a network endpoint involved in an Amazon GuardDuty Extended Threat Detection attack sequence. \nGuardDuty generates an attack \n sequence finding when multiple events align to a potentially suspicious activity. To receive GuardDuty attack sequence findings in Security Hub, you \n\t\t\t\tmust have GuardDuty and GuardDuty S3 Protection enabled. For more information, see GuardDuty Extended Threat Detection in the Amazon GuardDuty User Guide.\n

" + "smithy.api#documentation": "

\n Contains information about the location of a network endpoint involved in an Amazon GuardDuty Extended Threat Detection attack sequence. \nGuardDuty generates an attack \n sequence finding when multiple events align to a potentially suspicious activity. To receive GuardDuty attack sequence findings in Security Hub, you \n\t\t\t\tmust have GuardDuty enabled. For more information, see GuardDuty Extended Threat Detection in the Amazon GuardDuty User Guide.\n

" } }, "com.amazonaws.securityhub#NetworkHeader": { @@ -29611,7 +29611,7 @@ "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

A timestamp that indicates when the note was updated.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
", + "smithy.api#documentation": "

A timestamp that indicates when the note was updated.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

", "smithy.api#required": {} } } @@ -30034,13 +30034,13 @@ "OperationStartTime": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

Indicates when the operation started.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

Indicates when the operation started.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "OperationEndTime": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

Indicates when the operation completed.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

Indicates when the operation completed.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "RebootOption": { @@ -30202,13 +30202,13 @@ "LaunchedAt": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

Indicates when the process was launched.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

Indicates when the process was launched.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "TerminatedAt": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

Indicates when the process was terminated.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

Indicates when the process was terminated.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } } }, @@ -33480,7 +33480,7 @@ } }, "traits": { - "smithy.api#documentation": "

\n Contains information about an Amazon GuardDuty Extended Threat Detection attack sequence finding. GuardDuty generates an attack \n sequence finding when multiple events align to a potentially suspicious activity. To receive GuardDuty attack sequence findings in Security Hub, you \n\t\t\t\tmust have GuardDuty and GuardDuty S3 Protection enabled. For more information, see GuardDuty Extended Threat Detection in the Amazon GuardDuty User Guide.\n

" + "smithy.api#documentation": "

\n Contains information about an Amazon GuardDuty Extended Threat Detection attack sequence finding. GuardDuty generates an attack \n sequence finding when multiple events align to a potentially suspicious activity. To receive GuardDuty attack sequence findings in Security Hub, you \n\t\t\t\tmust have GuardDuty enabled. For more information, see GuardDuty Extended Threat Detection in the Amazon GuardDuty User Guide.\n

" } }, "com.amazonaws.securityhub#Severity": { @@ -33677,7 +33677,7 @@ "Severity": { "target": "com.amazonaws.securityhub#Double", "traits": { - "smithy.api#documentation": "

The severity associated with the signal. For more information about severity, see \n Findings severity levels\n in the Amazon GuardDuty User Guide.

" + "smithy.api#documentation": "

The severity associated with the signal. For more information about severity, see \n Severity levels for GuardDuty findings\n in the Amazon GuardDuty User Guide.

" } }, "Count": { @@ -35032,7 +35032,7 @@ "LastObservedAt": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

Indicates when the most recent instance of a threat intelligence indicator was\n observed.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

Indicates when the most recent instance of a threat intelligence indicator was\n observed.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "Source": { @@ -36338,7 +36338,7 @@ "AutoEnableControls": { "target": "com.amazonaws.securityhub#Boolean", "traits": { - "smithy.api#documentation": "

Whether to automatically enable new controls when they are added to standards that are\n enabled.

\n

By default, this is set to true, and new controls are enabled\n automatically. To not automatically enable new controls, set this to false.\n

" + "smithy.api#documentation": "

Whether to automatically enable new controls when they are added to standards that are\n enabled.

\n

By default, this is set to true, and new controls are enabled\n automatically. To not automatically enable new controls, set this to false.\n

\n

When you automatically enable new controls, you can interact with the controls in \n the console and programmatically immediately after release. However, automatically enabled controls have a temporary default status of \n DISABLED. It can take up to several days for Security Hub to process the control release and designate the \n control as ENABLED in your account. During the processing period, you can manually enable or disable a \n control, and Security Hub will maintain that designation regardless of whether you have AutoEnableControls set to \n true.

" } }, "ControlFindingGenerator": { @@ -36473,7 +36473,7 @@ } }, "traits": { - "smithy.api#documentation": "

\n Provides Amazon Web Services account information of the user involved in an Amazon GuardDuty Extended Threat Detection attack sequence. GuardDuty generates an attack \n sequence finding when multiple events align to a potentially suspicious activity. To receive GuardDuty attack sequence findings in Security Hub, you \n\t\t\t\tmust have GuardDuty and GuardDuty S3 Protection enabled. For more information, see GuardDuty Extended Threat Detection in the Amazon GuardDuty User Guide. \n

" + "smithy.api#documentation": "

\n Provides Amazon Web Services account information of the user involved in an Amazon GuardDuty Extended Threat Detection attack sequence. GuardDuty generates an attack \n sequence finding when multiple events align to a potentially suspicious activity. To receive GuardDuty attack sequence findings in Security Hub, you \n\t\t\t\tmust have GuardDuty enabled. For more information, see GuardDuty Extended Threat Detection in the Amazon GuardDuty User Guide. \n

" } }, "com.amazonaws.securityhub#VerificationState": { @@ -36777,13 +36777,13 @@ "VendorCreatedAt": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

Indicates when the vulnerability advisory was created.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

Indicates when the vulnerability advisory was created.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } }, "VendorUpdatedAt": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

Indicates when the vulnerability advisory was last updated.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

Indicates when the vulnerability advisory was last updated.

\n

For more information about the validation and formatting of timestamp fields in Security Hub, see Timestamps.

" } } }, diff --git a/models/ssm-sap.json b/models/ssm-sap.json index fb223f7620..e437809fe8 100644 --- a/models/ssm-sap.json +++ b/models/ssm-sap.json @@ -572,6 +572,47 @@ "target": "com.amazonaws.ssmsap#ComponentId" } }, + "com.amazonaws.ssmsap#ComponentInfo": { + "type": "structure", + "members": { + "ComponentType": { + "target": "com.amazonaws.ssmsap#ComponentType", + "traits": { + "smithy.api#documentation": "

This string is the type of the component.

\n

Accepted value is WD.

", + "smithy.api#required": {} + } + }, + "Sid": { + "target": "com.amazonaws.ssmsap#SID", + "traits": { + "smithy.api#documentation": "

This string is the SAP System ID of the component.

\n

Accepted values are alphanumeric.

", + "smithy.api#required": {} + } + }, + "Ec2InstanceId": { + "target": "com.amazonaws.ssmsap#InstanceId", + "traits": { + "smithy.api#documentation": "

This is the Amazon EC2 instance on which your SAP component is running.

\n

Accepted values are alphanumeric.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

This is information about the component of \n your SAP application, such as Web Dispatcher.

" + } + }, + "com.amazonaws.ssmsap#ComponentInfoList": { + "type": "list", + "member": { + "target": "com.amazonaws.ssmsap#ComponentInfo" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 5 + } + } + }, "com.amazonaws.ssmsap#ComponentStatus": { "type": "enum", "members": { @@ -2518,6 +2559,12 @@ "traits": { "smithy.api#documentation": "

The Amazon Resource Name of the SAP HANA database.

" } + }, + "ComponentsInfo": { + "target": "com.amazonaws.ssmsap#ComponentInfoList", + "traits": { + "smithy.api#documentation": "

This is an optional parameter for component details \n to which the SAP ABAP application is attached, \n such as Web Dispatcher.

\n

This is an array of ApplicationComponent objects. \n You may input 0 to 5 items.

" + } } } }, diff --git a/models/transfer.json b/models/transfer.json index fec686b645..9ed633734e 100644 --- a/models/transfer.json +++ b/models/transfer.json @@ -180,6 +180,12 @@ "traits": { "smithy.api#documentation": "

Provides Basic authentication support to the AS2 Connectors API. To use Basic authentication,\n you must provide the name or Amazon Resource Name (ARN) of a secret in Secrets Manager.

\n

The default value for this parameter is null, which indicates that Basic authentication is not enabled for the connector.

\n

If the connector should use Basic authentication, the secret needs to be in the following format:

\n

\n {\n \"Username\": \"user-name\",\n \"Password\": \"user-password\"\n }\n

\n

Replace user-name and user-password with the credentials for the actual user that is being authenticated.

\n

Note the following:

\n
    \n
  • \n

    You are storing these credentials in Secrets Manager, not passing them directly into this API.

    \n
  • \n
  • \n

    If you are using the API, SDKs, or CloudFormation to configure your connector, then you must create the secret before you can enable Basic authentication.\n However, if you are using the Amazon Web Services management console, you can have the system create the secret for you.

    \n
  • \n
\n

If you have previously enabled Basic authentication for a connector, you can disable it by using the UpdateConnector API call. For example, if you are using the CLI, you can run the following command to remove Basic authentication:

\n

\n update-connector --connector-id my-connector-id --as2-config 'BasicAuthSecretId=\"\"'\n

" } + }, + "PreserveContentType": { + "target": "com.amazonaws.transfer#PreserveContentType", + "traits": { + "smithy.api#documentation": "

Allows you to use the Amazon S3 Content-Type that is associated with objects in S3 instead of\n having the content type mapped based on the file extension. This parameter is enabled by default when you create an AS2 connector\n from the console, but disabled by default when you create an AS2 connector by calling the API directly.

" + } } }, "traits": { @@ -766,6 +772,18 @@ "traits": { "smithy.api#documentation": "

Key-value pairs that can be used to group and search for agreements.

" } + }, + "PreserveFilename": { + "target": "com.amazonaws.transfer#PreserveFilenameType", + "traits": { + "smithy.api#documentation": "

\n Determines whether or not Transfer Family appends a unique string of characters to the end of the AS2 message payload\n filename when saving it.\n

\n
    \n
  • \n

    \n ENABLED: the filename provided by your trading parter is preserved when the file is saved.

    \n
  • \n
  • \n

    \n DISABLED (default value): when Transfer Family saves the file, the filename is adjusted, as\n described in File names and locations.

    \n
  • \n
" + } + }, + "EnforceMessageSigning": { + "target": "com.amazonaws.transfer#EnforceMessageSigningType", + "traits": { + "smithy.api#documentation": "

\n Determines whether or not unsigned messages from your trading partners will be accepted.\n

\n
    \n
  • \n

    \n ENABLED: Transfer Family rejects unsigned messages from your trading partner.

    \n
  • \n
  • \n

    \n DISABLED (default value): Transfer Family accepts unsigned messages from your trading partner.

    \n
  • \n
" + } } }, "traits": { @@ -3218,6 +3236,18 @@ "traits": { "smithy.api#documentation": "

Key-value pairs that can be used to group and search for agreements.

" } + }, + "PreserveFilename": { + "target": "com.amazonaws.transfer#PreserveFilenameType", + "traits": { + "smithy.api#documentation": "

\n Determines whether or not Transfer Family appends a unique string of characters to the end of the AS2 message payload\n filename when saving it.\n

\n
    \n
  • \n

    \n ENABLED: the filename provided by your trading parter is preserved when the file is saved.

    \n
  • \n
  • \n

    \n DISABLED (default value): when Transfer Family saves the file, the filename is adjusted, as\n described in File names and locations.

    \n
  • \n
" + } + }, + "EnforceMessageSigning": { + "target": "com.amazonaws.transfer#EnforceMessageSigningType", + "traits": { + "smithy.api#documentation": "

\n Determines whether or not unsigned messages from your trading partners will be accepted.\n

\n
    \n
  • \n

    \n ENABLED: Transfer Family rejects unsigned messages from your trading partner.

    \n
  • \n
  • \n

    \n DISABLED (default value): Transfer Family accepts unsigned messages from your trading partner.

    \n
  • \n
" + } } }, "traits": { @@ -3250,7 +3280,7 @@ "Status": { "target": "com.amazonaws.transfer#CertificateStatusType", "traits": { - "smithy.api#documentation": "

The certificate can be either ACTIVE, PENDING_ROTATION, or\n INACTIVE. PENDING_ROTATION means that this certificate will\n replace the current certificate when it expires.

" + "smithy.api#documentation": "

Currently, the only available status is ACTIVE: all other values are reserved for future use.

" } }, "Certificate": { @@ -4227,6 +4257,23 @@ } } }, + "com.amazonaws.transfer#EnforceMessageSigningType": { + "type": "enum", + "members": { + "ENABLED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ENABLED" + } + }, + "DISABLED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DISABLED" + } + } + } + }, "com.amazonaws.transfer#ExecutionError": { "type": "structure", "members": { @@ -7017,6 +7064,40 @@ "smithy.api#pattern": "^[\\x09-\\x0D\\x20-\\x7E]*$" } }, + "com.amazonaws.transfer#PreserveContentType": { + "type": "enum", + "members": { + "ENABLED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ENABLED" + } + }, + "DISABLED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DISABLED" + } + } + } + }, + "com.amazonaws.transfer#PreserveFilenameType": { + "type": "enum", + "members": { + "ENABLED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ENABLED" + } + }, + "DISABLED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DISABLED" + } + } + } + }, "com.amazonaws.transfer#PrivateKeyType": { "type": "string", "traits": { @@ -9988,6 +10069,18 @@ "traits": { "smithy.api#documentation": "

Connectors are used to send files using either the AS2 or SFTP protocol. For the access role,\n provide the Amazon Resource Name (ARN) of the Identity and Access Management role to use.

\n

\n For AS2 connectors\n

\n

With AS2, you can send files by calling StartFileTransfer and specifying the\n file paths in the request parameter, SendFilePaths. We use the file’s parent\n directory (for example, for --send-file-paths /bucket/dir/file.txt, parent\n directory is /bucket/dir/) to temporarily store a processed AS2 message file,\n store the MDN when we receive them from the partner, and write a final JSON file containing\n relevant metadata of the transmission. So, the AccessRole needs to provide read\n and write access to the parent directory of the file location used in the\n StartFileTransfer request. Additionally, you need to provide read and write\n access to the parent directory of the files that you intend to send with\n StartFileTransfer.

\n

If you are using Basic authentication for your AS2 connector, the access role requires the\n secretsmanager:GetSecretValue permission for the secret. If the secret is encrypted using\n a customer-managed key instead of the Amazon Web Services managed key in Secrets Manager, then the role also\n needs the kms:Decrypt permission for that key.

\n

\n For SFTP connectors\n

\n

Make sure that the access role provides\n read and write access to the parent directory of the file location\n that's used in the StartFileTransfer request.\n Additionally, make sure that the role provides\n secretsmanager:GetSecretValue permission to Secrets Manager.

" } + }, + "PreserveFilename": { + "target": "com.amazonaws.transfer#PreserveFilenameType", + "traits": { + "smithy.api#documentation": "

\n Determines whether or not Transfer Family appends a unique string of characters to the end of the AS2 message payload\n filename when saving it.\n

\n
    \n
  • \n

    \n ENABLED: the filename provided by your trading parter is preserved when the file is saved.

    \n
  • \n
  • \n

    \n DISABLED (default value): when Transfer Family saves the file, the filename is adjusted, as\n described in File names and locations.

    \n
  • \n
" + } + }, + "EnforceMessageSigning": { + "target": "com.amazonaws.transfer#EnforceMessageSigningType", + "traits": { + "smithy.api#documentation": "

\n Determines whether or not unsigned messages from your trading partners will be accepted.\n

\n
    \n
  • \n

    \n ENABLED: Transfer Family rejects unsigned messages from your trading partner.

    \n
  • \n
  • \n

    \n DISABLED (default value): Transfer Family accepts unsigned messages from your trading partner.

    \n
  • \n
" + } } }, "traits": { diff --git a/models/workspaces.json b/models/workspaces.json index 8d64aa31b1..890c67e04c 100644 --- a/models/workspaces.json +++ b/models/workspaces.json @@ -29,6 +29,86 @@ ] }, "shapes": { + "com.amazonaws.workspaces#AGAModeForDirectoryEnum": { + "type": "enum", + "members": { + "ENABLED_AUTO": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ENABLED_AUTO" + } + }, + "DISABLED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DISABLED" + } + } + } + }, + "com.amazonaws.workspaces#AGAModeForWorkSpaceEnum": { + "type": "enum", + "members": { + "ENABLED_AUTO": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ENABLED_AUTO" + } + }, + "DISABLED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DISABLED" + } + }, + "INHERITED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "INHERITED" + } + } + } + }, + "com.amazonaws.workspaces#AGAPreferredProtocolForDirectory": { + "type": "enum", + "members": { + "TCP": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "TCP" + } + }, + "NONE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "NONE" + } + } + } + }, + "com.amazonaws.workspaces#AGAPreferredProtocolForWorkSpace": { + "type": "enum", + "members": { + "TCP": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "TCP" + } + }, + "NONE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "NONE" + } + }, + "INHERITED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "INHERITED" + } + } + } + }, "com.amazonaws.workspaces#ARN": { "type": "string", "traits": { @@ -5691,6 +5771,48 @@ "smithy.api#output": {} } }, + "com.amazonaws.workspaces#GlobalAcceleratorForDirectory": { + "type": "structure", + "members": { + "Mode": { + "target": "com.amazonaws.workspaces#AGAModeForDirectoryEnum", + "traits": { + "smithy.api#documentation": "

Indicates if Global Accelerator for directory is enabled or disabled.

", + "smithy.api#required": {} + } + }, + "PreferredProtocol": { + "target": "com.amazonaws.workspaces#AGAPreferredProtocolForDirectory", + "traits": { + "smithy.api#documentation": "

Indicates the preferred protocol for Global Accelerator.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Describes the Global Accelerator for directory

" + } + }, + "com.amazonaws.workspaces#GlobalAcceleratorForWorkSpace": { + "type": "structure", + "members": { + "Mode": { + "target": "com.amazonaws.workspaces#AGAModeForWorkSpaceEnum", + "traits": { + "smithy.api#documentation": "

Indicates if Global Accelerator for WorkSpaces is enabled, disabled, \n or the same mode as the associated directory.

", + "smithy.api#required": {} + } + }, + "PreferredProtocol": { + "target": "com.amazonaws.workspaces#AGAPreferredProtocolForWorkSpace", + "traits": { + "smithy.api#documentation": "

Indicates the preferred protocol for Global Accelerator.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Describes the Global Accelerator for WorkSpaces.

" + } + }, "com.amazonaws.workspaces#IDCConfig": { "type": "structure", "members": { @@ -8813,6 +8935,12 @@ "traits": { "smithy.api#documentation": "

Indicates the storage connector used

" } + }, + "GlobalAccelerator": { + "target": "com.amazonaws.workspaces#GlobalAcceleratorForDirectory", + "traits": { + "smithy.api#documentation": "

Indicates the Global Accelerator properties.

" + } } }, "traits": { @@ -11133,6 +11261,12 @@ "traits": { "smithy.api#documentation": "

The name of the operating system.

" } + }, + "GlobalAccelerator": { + "target": "com.amazonaws.workspaces#GlobalAcceleratorForWorkSpace", + "traits": { + "smithy.api#documentation": "

Indicates the Global Accelerator properties.

" + } } }, "traits": {