//===----------------------------------------------------------------------===//
//
// This source file is part of the Soto for AWS open source project
//
// Copyright (c) 2017-2024 the Soto project authors
// Licensed under Apache License v2.0
//
// See LICENSE.txt for license information
// See CONTRIBUTORS.txt for the list of Soto project authors
//
// SPDX-License-Identifier: Apache-2.0
//
//===----------------------------------------------------------------------===//

// THIS FILE IS AUTOMATICALLY GENERATED by https://github.com/soto-project/soto-codegenerator.
// DO NOT EDIT.

#if canImport(FoundationEssentials)
import FoundationEssentials
#else
import Foundation
#endif
@_exported import SotoCore

/// Service object for interacting with AWS CleanRoomsML service.
///
/// Welcome to the Amazon Web Services Clean Rooms ML API Reference. Amazon Web Services Clean Rooms ML provides a privacy-enhancing method for two parties to identify similar users in their data without the need to share their data with each other. The first party brings the training data to Clean Rooms so that they can create and configure an audience model (lookalike model) and associate it with a collaboration. The second party then brings their seed data to Clean Rooms and generates an audience (lookalike segment) that resembles the training data. To learn more about Amazon Web Services Clean Rooms ML concepts, procedures, and best practices, see the Clean Rooms User Guide. To learn more about SQL commands, functions, and conditions supported in Clean Rooms, see the Clean Rooms SQL Reference.
public struct CleanRoomsML: AWSService {
    // MARK: Member variables

    /// Client used for communication with AWS
    public let client: AWSClient
    /// Service configuration
    public let config: AWSServiceConfig

    // MARK: Initialization

    /// Initialize the CleanRoomsML client
    /// - parameters:
    ///     - client: AWSClient used to process requests
    ///     - region: Region of server you want to communicate with. This will override the partition parameter.
    ///     - partition: AWS partition where service resides, standard (.aws), china (.awscn), government (.awsusgov).
    ///     - endpoint: Custom endpoint URL to use instead of standard AWS servers
    ///     - middleware: Middleware chain used to edit requests before they are sent and responses before they are decoded 
    ///     - timeout: Timeout value for HTTP requests
    ///     - byteBufferAllocator: Allocator for ByteBuffers
    ///     - options: Service options
    public init(
        client: AWSClient,
        region: SotoCore.Region? = nil,
        partition: AWSPartition = .aws,
        endpoint: String? = nil,
        middleware: AWSMiddlewareProtocol? = nil,
        timeout: TimeAmount? = nil,
        byteBufferAllocator: ByteBufferAllocator = ByteBufferAllocator(),
        options: AWSServiceConfig.Options = []
    ) {
        self.client = client
        self.config = AWSServiceConfig(
            region: region,
            partition: region?.partition ?? partition,
            serviceName: "CleanRoomsML",
            serviceIdentifier: "cleanrooms-ml",
            serviceProtocol: .restjson,
            apiVersion: "2023-09-06",
            endpoint: endpoint,
            errorType: CleanRoomsMLErrorType.self,
            middleware: middleware,
            timeout: timeout,
            byteBufferAllocator: byteBufferAllocator,
            options: options
        )
    }





    // MARK: API Calls

    /// Submits a request to cancel the trained model job.
    @Sendable
    @inlinable
    public func cancelTrainedModel(_ input: CancelTrainedModelRequest, logger: Logger = AWSClient.loggingDisabled) async throws {
        try await self.client.execute(
            operation: "CancelTrainedModel", 
            path: "/memberships/{membershipIdentifier}/trained-models/{trainedModelArn}", 
            httpMethod: .PATCH, 
            serviceConfig: self.config, 
            input: input, 
            logger: logger
        )
    }
    /// Submits a request to cancel the trained model job.
    ///
    /// Parameters:
    ///   - membershipIdentifier: The membership ID of the trained model job that you want to cancel.
    ///   - trainedModelArn: The Amazon Resource Name (ARN) of the trained model job that you want to cancel.
    ///   - versionIdentifier: The version identifier of the trained model to cancel. This parameter allows you to specify which version of the trained model you want to cancel when multiple versions exist. If versionIdentifier is not specified, the base model will be cancelled.
    ///   - logger: Logger use during operation
    @inlinable
    public func cancelTrainedModel(
        membershipIdentifier: String,
        trainedModelArn: String,
        versionIdentifier: String? = nil,
        logger: Logger = AWSClient.loggingDisabled        
    ) async throws {
        let input = CancelTrainedModelRequest(
            membershipIdentifier: membershipIdentifier, 
            trainedModelArn: trainedModelArn, 
            versionIdentifier: versionIdentifier
        )
        return try await self.cancelTrainedModel(input, logger: logger)
    }

    /// Submits a request to cancel a trained model inference job.
    @Sendable
    @inlinable
    public func cancelTrainedModelInferenceJob(_ input: CancelTrainedModelInferenceJobRequest, logger: Logger = AWSClient.loggingDisabled) async throws {
        try await self.client.execute(
            operation: "CancelTrainedModelInferenceJob", 
            path: "/memberships/{membershipIdentifier}/trained-model-inference-jobs/{trainedModelInferenceJobArn}", 
            httpMethod: .PATCH, 
            serviceConfig: self.config, 
            input: input, 
            logger: logger
        )
    }
    /// Submits a request to cancel a trained model inference job.
    ///
    /// Parameters:
    ///   - membershipIdentifier: The membership ID of the trained model inference job that you want to cancel.
    ///   - trainedModelInferenceJobArn: The Amazon Resource Name (ARN) of the trained model inference job that you want to cancel.
    ///   - logger: Logger use during operation
    @inlinable
    public func cancelTrainedModelInferenceJob(
        membershipIdentifier: String,
        trainedModelInferenceJobArn: String,
        logger: Logger = AWSClient.loggingDisabled        
    ) async throws {
        let input = CancelTrainedModelInferenceJobRequest(
            membershipIdentifier: membershipIdentifier, 
            trainedModelInferenceJobArn: trainedModelInferenceJobArn
        )
        return try await self.cancelTrainedModelInferenceJob(input, logger: logger)
    }

    /// Defines the information necessary to create an audience model. An audience model is a machine learning model that Clean Rooms ML trains to measure similarity between users. Clean Rooms ML manages training and storing the audience model. The audience model can be used in multiple calls to the StartAudienceGenerationJob API.
    @Sendable
    @inlinable
    public func createAudienceModel(_ input: CreateAudienceModelRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateAudienceModelResponse {
        try await self.client.execute(
            operation: "CreateAudienceModel", 
            path: "/audience-model", 
            httpMethod: .POST, 
            serviceConfig: self.config, 
            input: input, 
            logger: logger
        )
    }
    /// Defines the information necessary to create an audience model. An audience model is a machine learning model that Clean Rooms ML trains to measure similarity between users. Clean Rooms ML manages training and storing the audience model. The audience model can be used in multiple calls to the StartAudienceGenerationJob API.
    ///
    /// Parameters:
    ///   - description: The description of the audience model.
    ///   - kmsKeyArn: The Amazon Resource Name (ARN) of the KMS key. This key is used to encrypt and decrypt customer-owned data in the trained ML model and the associated data.
    ///   - name: The name of the audience model resource.
    ///   - tags: The optional metadata that you apply to the resource to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define. The following basic restrictions apply to tags:   Maximum number of tags per resource - 50.   For each resource, each tag key must be unique, and each tag key can have only one value.   Maximum key length - 128 Unicode characters in UTF-8.   Maximum value length - 256 Unicode characters in UTF-8.   If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @.   Tag keys and values are case sensitive.   Do not use aws:, AWS:, or any upper or lowercase combination of such as a prefix for keys as it is reserved for AWS use. You cannot edit or delete tag keys with this prefix. Values can have this prefix. If a tag value has aws as its prefix but the key does not, then Clean Rooms ML considers it to be a user tag and will count against the limit of 50 tags. Tags with only the key prefix of aws do not count against your tags per resource limit.
    ///   - trainingDataEndTime: The end date and time of the training window.
    ///   - trainingDatasetArn: The Amazon Resource Name (ARN) of the training dataset for this audience model.
    ///   - trainingDataStartTime: The start date and time of the training window.
    ///   - logger: Logger use during operation
    @inlinable
    public func createAudienceModel(
        description: String? = nil,
        kmsKeyArn: String? = nil,
        name: String,
        tags: [String: String]? = nil,
        trainingDataEndTime: Date? = nil,
        trainingDatasetArn: String,
        trainingDataStartTime: Date? = nil,
        logger: Logger = AWSClient.loggingDisabled        
    ) async throws -> CreateAudienceModelResponse {
        let input = CreateAudienceModelRequest(
            description: description, 
            kmsKeyArn: kmsKeyArn, 
            name: name, 
            tags: tags, 
            trainingDataEndTime: trainingDataEndTime, 
            trainingDatasetArn: trainingDatasetArn, 
            trainingDataStartTime: trainingDataStartTime
        )
        return try await self.createAudienceModel(input, logger: logger)
    }

    /// Defines the information necessary to create a configured audience model.
    @Sendable
    @inlinable
    public func createConfiguredAudienceModel(_ input: CreateConfiguredAudienceModelRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateConfiguredAudienceModelResponse {
        try await self.client.execute(
            operation: "CreateConfiguredAudienceModel", 
            path: "/configured-audience-model", 
            httpMethod: .POST, 
            serviceConfig: self.config, 
            input: input, 
            logger: logger
        )
    }
    /// Defines the information necessary to create a configured audience model.
    ///
    /// Parameters:
    ///   - audienceModelArn: The Amazon Resource Name (ARN) of the audience model to use for the configured audience model.
    ///   - audienceSizeConfig: Configure the list of output sizes of audiences that can be created using this configured audience model. A request to StartAudienceGenerationJob that uses this configured audience model must have an audienceSize selected from this list. You can use the ABSOLUTE AudienceSize to configure out audience sizes using the count of identifiers in the output. You can use the Percentage AudienceSize to configure sizes in the range 1-100 percent.
    ///   - childResourceTagOnCreatePolicy: Configure how the service tags audience generation jobs created using this configured audience model. If you specify NONE, the tags from the StartAudienceGenerationJob request determine the tags of the audience generation job. If you specify FROM_PARENT_RESOURCE, the audience generation job inherits the tags from the configured audience model, by default. Tags in the StartAudienceGenerationJob will override the default. When the client is in a different account than the configured audience model, the tags from the client are never applied to a resource in the caller's account.
    ///   - description: The description of the configured audience model.
    ///   - minMatchingSeedSize: The minimum number of users from the seed audience that must match with users in the training data of the audience model. The default value is 500.
    ///   - name: The name of the configured audience model.
    ///   - outputConfig: Configure the Amazon S3 location and IAM Role for audiences created using this configured audience model. Each audience will have a unique location. The IAM Role must have s3:PutObject permission on the destination Amazon S3 location. If the destination is protected with Amazon S3 KMS-SSE, then the Role must also have the required KMS permissions.
    ///   - sharedAudienceMetrics: Whether audience metrics are shared.
    ///   - tags: The optional metadata that you apply to the resource to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define. The following basic restrictions apply to tags:   Maximum number of tags per resource - 50.   For each resource, each tag key must be unique, and each tag key can have only one value.   Maximum key length - 128 Unicode characters in UTF-8.   Maximum value length - 256 Unicode characters in UTF-8.   If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @.   Tag keys and values are case sensitive.   Do not use aws:, AWS:, or any upper or lowercase combination of such as a prefix for keys as it is reserved for AWS use. You cannot edit or delete tag keys with this prefix. Values can have this prefix. If a tag value has aws as its prefix but the key does not, then Clean Rooms ML considers it to be a user tag and will count against the limit of 50 tags. Tags with only the key prefix of aws do not count against your tags per resource limit.
    ///   - logger: Logger use during operation
    @inlinable
    public func createConfiguredAudienceModel(
        audienceModelArn: String,
        audienceSizeConfig: AudienceSizeConfig? = nil,
        childResourceTagOnCreatePolicy: TagOnCreatePolicy? = nil,
        description: String? = nil,
        minMatchingSeedSize: Int? = nil,
        name: String,
        outputConfig: ConfiguredAudienceModelOutputConfig,
        sharedAudienceMetrics: [SharedAudienceMetrics],
        tags: [String: String]? = nil,
        logger: Logger = AWSClient.loggingDisabled        
    ) async throws -> CreateConfiguredAudienceModelResponse {
        let input = CreateConfiguredAudienceModelRequest(
            audienceModelArn: audienceModelArn, 
            audienceSizeConfig: audienceSizeConfig, 
            childResourceTagOnCreatePolicy: childResourceTagOnCreatePolicy, 
            description: description, 
            minMatchingSeedSize: minMatchingSeedSize, 
            name: name, 
            outputConfig: outputConfig, 
            sharedAudienceMetrics: sharedAudienceMetrics, 
            tags: tags
        )
        return try await self.createConfiguredAudienceModel(input, logger: logger)
    }

    /// Creates a configured model algorithm using a container image stored in an ECR repository.
    @Sendable
    @inlinable
    public func createConfiguredModelAlgorithm(_ input: CreateConfiguredModelAlgorithmRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateConfiguredModelAlgorithmResponse {
        try await self.client.execute(
            operation: "CreateConfiguredModelAlgorithm", 
            path: "/configured-model-algorithms", 
            httpMethod: .POST, 
            serviceConfig: self.config, 
            input: input, 
            logger: logger
        )
    }
    /// Creates a configured model algorithm using a container image stored in an ECR repository.
    ///
    /// Parameters:
    ///   - description: The description of the configured model algorithm.
    ///   - inferenceContainerConfig: Configuration information for the inference container that is used when you run an inference job on a configured model algorithm.
    ///   - kmsKeyArn: The Amazon Resource Name (ARN) of the KMS key. This key is used to encrypt and decrypt customer-owned data in the configured ML model algorithm and associated data.
    ///   - name: The name of the configured model algorithm.
    ///   - roleArn: The Amazon Resource Name (ARN) of the role that is used to access the repository.
    ///   - tags: The optional metadata that you apply to the resource to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define. The following basic restrictions apply to tags:   Maximum number of tags per resource - 50.   For each resource, each tag key must be unique, and each tag key can have only one value.   Maximum key length - 128 Unicode characters in UTF-8.   Maximum value length - 256 Unicode characters in UTF-8.   If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @.   Tag keys and values are case sensitive.   Do not use aws:, AWS:, or any upper or lowercase combination of such as a prefix for keys as it is reserved for AWS use. You cannot edit or delete tag keys with this prefix. Values can have this prefix. If a tag value has aws as its prefix but the key does not, then Clean Rooms ML considers it to be a user tag and will count against the limit of 50 tags. Tags with only the key prefix of aws do not count against your tags per resource limit.
    ///   - trainingContainerConfig: Configuration information for the training container, including entrypoints and arguments.
    ///   - logger: Logger use during operation
    @inlinable
    public func createConfiguredModelAlgorithm(
        description: String? = nil,
        inferenceContainerConfig: InferenceContainerConfig? = nil,
        kmsKeyArn: String? = nil,
        name: String,
        roleArn: String,
        tags: [String: String]? = nil,
        trainingContainerConfig: ContainerConfig? = nil,
        logger: Logger = AWSClient.loggingDisabled        
    ) async throws -> CreateConfiguredModelAlgorithmResponse {
        let input = CreateConfiguredModelAlgorithmRequest(
            description: description, 
            inferenceContainerConfig: inferenceContainerConfig, 
            kmsKeyArn: kmsKeyArn, 
            name: name, 
            roleArn: roleArn, 
            tags: tags, 
            trainingContainerConfig: trainingContainerConfig
        )
        return try await self.createConfiguredModelAlgorithm(input, logger: logger)
    }

    /// Associates a configured model algorithm to a collaboration for use by any member of the collaboration.
    @Sendable
    @inlinable
    public func createConfiguredModelAlgorithmAssociation(_ input: CreateConfiguredModelAlgorithmAssociationRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateConfiguredModelAlgorithmAssociationResponse {
        try await self.client.execute(
            operation: "CreateConfiguredModelAlgorithmAssociation", 
            path: "/memberships/{membershipIdentifier}/configured-model-algorithm-associations", 
            httpMethod: .POST, 
            serviceConfig: self.config, 
            input: input, 
            logger: logger
        )
    }
    /// Associates a configured model algorithm to a collaboration for use by any member of the collaboration.
    ///
    /// Parameters:
    ///   - configuredModelAlgorithmArn: The Amazon Resource Name (ARN) of the configured model algorithm that you want to associate.
    ///   - description: The description of the configured model algorithm association.
    ///   - membershipIdentifier: The membership ID of the member who is associating this configured model algorithm.
    ///   - name: The name of the configured model algorithm association.
    ///   - privacyConfiguration: Specifies the privacy configuration information for the configured model algorithm association. This information includes the maximum data size that can be exported.
    ///   - tags: The optional metadata that you apply to the resource to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define. The following basic restrictions apply to tags:   Maximum number of tags per resource - 50.   For each resource, each tag key must be unique, and each tag key can have only one value.   Maximum key length - 128 Unicode characters in UTF-8.   Maximum value length - 256 Unicode characters in UTF-8.   If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @.   Tag keys and values are case sensitive.   Do not use aws:, AWS:, or any upper or lowercase combination of such as a prefix for keys as it is reserved for AWS use. You cannot edit or delete tag keys with this prefix. Values can have this prefix. If a tag value has aws as its prefix but the key does not, then Clean Rooms ML considers it to be a user tag and will count against the limit of 50 tags. Tags with only the key prefix of aws do not count against your tags per resource limit.
    ///   - logger: Logger use during operation
    @inlinable
    public func createConfiguredModelAlgorithmAssociation(
        configuredModelAlgorithmArn: String,
        description: String? = nil,
        membershipIdentifier: String,
        name: String,
        privacyConfiguration: PrivacyConfiguration? = nil,
        tags: [String: String]? = nil,
        logger: Logger = AWSClient.loggingDisabled        
    ) async throws -> CreateConfiguredModelAlgorithmAssociationResponse {
        let input = CreateConfiguredModelAlgorithmAssociationRequest(
            configuredModelAlgorithmArn: configuredModelAlgorithmArn, 
            description: description, 
            membershipIdentifier: membershipIdentifier, 
            name: name, 
            privacyConfiguration: privacyConfiguration, 
            tags: tags
        )
        return try await self.createConfiguredModelAlgorithmAssociation(input, logger: logger)
    }

    /// Provides the information to create an ML input channel. An ML input channel is the result of a query that can be used for ML modeling.
    @Sendable
    @inlinable
    public func createMLInputChannel(_ input: CreateMLInputChannelRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateMLInputChannelResponse {
        try await self.client.execute(
            operation: "CreateMLInputChannel", 
            path: "/memberships/{membershipIdentifier}/ml-input-channels", 
            httpMethod: .POST, 
            serviceConfig: self.config, 
            input: input, 
            logger: logger
        )
    }
    /// Provides the information to create an ML input channel. An ML input channel is the result of a query that can be used for ML modeling.
    ///
    /// Parameters:
    ///   - configuredModelAlgorithmAssociations: The associated configured model algorithms that are necessary to create this ML input channel.
    ///   - description: The description of the ML input channel.
    ///   - inputChannel: The input data that is used to create this ML input channel.
    ///   - kmsKeyArn: The Amazon Resource Name (ARN) of the KMS key that is used to access the input channel.
    ///   - membershipIdentifier: The membership ID of the member that is creating the ML input channel.
    ///   - name: The name of the ML input channel.
    ///   - retentionInDays: The number of days that the data in the ML input channel is retained.
    ///   - tags: The optional metadata that you apply to the resource to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define. The following basic restrictions apply to tags:   Maximum number of tags per resource - 50.   For each resource, each tag key must be unique, and each tag key can have only one value.   Maximum key length - 128 Unicode characters in UTF-8.   Maximum value length - 256 Unicode characters in UTF-8.   If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @.   Tag keys and values are case sensitive.   Do not use aws:, AWS:, or any upper or lowercase combination of such as a prefix for keys as it is reserved for AWS use. You cannot edit or delete tag keys with this prefix. Values can have this prefix. If a tag value has aws as its prefix but the key does not, then Clean Rooms ML considers it to be a user tag and will count against the limit of 50 tags. Tags with only the key prefix of aws do not count against your tags per resource limit.
    ///   - logger: Logger use during operation
    @inlinable
    public func createMLInputChannel(
        configuredModelAlgorithmAssociations: [String],
        description: String? = nil,
        inputChannel: InputChannel,
        kmsKeyArn: String? = nil,
        membershipIdentifier: String,
        name: String,
        retentionInDays: Int,
        tags: [String: String]? = nil,
        logger: Logger = AWSClient.loggingDisabled        
    ) async throws -> CreateMLInputChannelResponse {
        let input = CreateMLInputChannelRequest(
            configuredModelAlgorithmAssociations: configuredModelAlgorithmAssociations, 
            description: description, 
            inputChannel: inputChannel, 
            kmsKeyArn: kmsKeyArn, 
            membershipIdentifier: membershipIdentifier, 
            name: name, 
            retentionInDays: retentionInDays, 
            tags: tags
        )
        return try await self.createMLInputChannel(input, logger: logger)
    }

    /// Creates a trained model from an associated configured model algorithm using data from any member of the collaboration.
    @Sendable
    @inlinable
    public func createTrainedModel(_ input: CreateTrainedModelRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateTrainedModelResponse {
        try await self.client.execute(
            operation: "CreateTrainedModel", 
            path: "/memberships/{membershipIdentifier}/trained-models", 
            httpMethod: .POST, 
            serviceConfig: self.config, 
            input: input, 
            logger: logger
        )
    }
    /// Creates a trained model from an associated configured model algorithm using data from any member of the collaboration.
    ///
    /// Parameters:
    ///   - configuredModelAlgorithmAssociationArn: The associated configured model algorithm used to train this model.
    ///   - dataChannels: Defines the data channels that are used as input for the trained model request. Limit: Maximum of 20 channels total (including both dataChannels and incrementalTrainingDataChannels).
    ///   - description: The description of the trained model.
    ///   - environment: The environment variables to set in the Docker container.
    ///   - hyperparameters: Algorithm-specific parameters that influence the quality of the model. You set hyperparameters before you start the learning process.
    ///   - incrementalTrainingDataChannels: Specifies the incremental training data channels for the trained model.  Incremental training allows you to create a new trained model with updates without retraining from scratch. You can specify up to one incremental training data channel that references a previously trained model and its version. Limit: Maximum of 20 channels total (including both incrementalTrainingDataChannels and dataChannels).
    ///   - kmsKeyArn: The Amazon Resource Name (ARN) of the KMS key. This key is used to encrypt and decrypt customer-owned data in the trained ML model and the associated data.
    ///   - membershipIdentifier: The membership ID of the member that is creating the trained model.
    ///   - name: The name of the trained model.
    ///   - resourceConfig: Information about the EC2 resources that are used to train this model.
    ///   - stoppingCondition: The criteria that is used to stop model training.
    ///   - tags: The optional metadata that you apply to the resource to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define. The following basic restrictions apply to tags:   Maximum number of tags per resource - 50.   For each resource, each tag key must be unique, and each tag key can have only one value.   Maximum key length - 128 Unicode characters in UTF-8.   Maximum value length - 256 Unicode characters in UTF-8.   If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @.   Tag keys and values are case sensitive.   Do not use aws:, AWS:, or any upper or lowercase combination of such as a prefix for keys as it is reserved for AWS use. You cannot edit or delete tag keys with this prefix. Values can have this prefix. If a tag value has aws as its prefix but the key does not, then Clean Rooms ML considers it to be a user tag and will count against the limit of 50 tags. Tags with only the key prefix of aws do not count against your tags per resource limit.
    ///   - trainingInputMode: The input mode for accessing the training data. This parameter determines how the training data is made available to the training algorithm. Valid values are:    File - The training data is downloaded to the training instance and made available as files.    FastFile - The training data is streamed directly from Amazon S3 to the training algorithm, providing faster access for large datasets.    Pipe - The training data is streamed to the training algorithm using named pipes, which can improve performance for certain algorithms.
    ///   - logger: Logger use during operation
    @inlinable
    public func createTrainedModel(
        configuredModelAlgorithmAssociationArn: String,
        dataChannels: [ModelTrainingDataChannel],
        description: String? = nil,
        environment: [String: String]? = nil,
        hyperparameters: [String: String]? = nil,
        incrementalTrainingDataChannels: [IncrementalTrainingDataChannel]? = nil,
        kmsKeyArn: String? = nil,
        membershipIdentifier: String,
        name: String,
        resourceConfig: ResourceConfig,
        stoppingCondition: StoppingCondition? = nil,
        tags: [String: String]? = nil,
        trainingInputMode: TrainingInputMode? = nil,
        logger: Logger = AWSClient.loggingDisabled        
    ) async throws -> CreateTrainedModelResponse {
        let input = CreateTrainedModelRequest(
            configuredModelAlgorithmAssociationArn: configuredModelAlgorithmAssociationArn, 
            dataChannels: dataChannels, 
            description: description, 
            environment: environment, 
            hyperparameters: hyperparameters, 
            incrementalTrainingDataChannels: incrementalTrainingDataChannels, 
            kmsKeyArn: kmsKeyArn, 
            membershipIdentifier: membershipIdentifier, 
            name: name, 
            resourceConfig: resourceConfig, 
            stoppingCondition: stoppingCondition, 
            tags: tags, 
            trainingInputMode: trainingInputMode
        )
        return try await self.createTrainedModel(input, logger: logger)
    }

    /// Defines the information necessary to create a training dataset. In Clean Rooms ML, the TrainingDataset is metadata that points to a Glue table, which is read only during AudienceModel creation.
    @Sendable
    @inlinable
    public func createTrainingDataset(_ input: CreateTrainingDatasetRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateTrainingDatasetResponse {
        try await self.client.execute(
            operation: "CreateTrainingDataset", 
            path: "/training-dataset", 
            httpMethod: .POST, 
            serviceConfig: self.config, 
            input: input, 
            logger: logger
        )
    }
    /// Defines the information necessary to create a training dataset. In Clean Rooms ML, the TrainingDataset is metadata that points to a Glue table, which is read only during AudienceModel creation.
    ///
    /// Parameters:
    ///   - description: The description of the training dataset.
    ///   - name: The name of the training dataset. This name must be unique in your account and region.
    ///   - roleArn: The ARN of the IAM role that Clean Rooms ML can assume to read the data referred to in the dataSource field of each dataset. Passing a role across AWS accounts is not allowed. If you pass a role that isn't in your account, you get an AccessDeniedException error.
    ///   - tags: The optional metadata that you apply to the resource to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define. The following basic restrictions apply to tags:   Maximum number of tags per resource - 50.   For each resource, each tag key must be unique, and each tag key can have only one value.   Maximum key length - 128 Unicode characters in UTF-8.   Maximum value length - 256 Unicode characters in UTF-8.   If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @.   Tag keys and values are case sensitive.   Do not use aws:, AWS:, or any upper or lowercase combination of such as a prefix for keys as it is reserved for AWS use. You cannot edit or delete tag keys with this prefix. Values can have this prefix. If a tag value has aws as its prefix but the key does not, then Clean Rooms ML considers it to be a user tag and will count against the limit of 50 tags. Tags with only the key prefix of aws do not count against your tags per resource limit.
    ///   - trainingData: An array of information that lists the Dataset objects, which specifies the dataset type and details on its location and schema. You must provide a role that has read access to these tables.
    ///   - logger: Logger use during operation
    @inlinable
    public func createTrainingDataset(
        description: String? = nil,
        name: String,
        roleArn: String,
        tags: [String: String]? = nil,
        trainingData: [Dataset],
        logger: Logger = AWSClient.loggingDisabled        
    ) async throws -> CreateTrainingDatasetResponse {
        let input = CreateTrainingDatasetRequest(
            description: description, 
            name: name, 
            roleArn: roleArn, 
            tags: tags, 
            trainingData: trainingData
        )
        return try await self.createTrainingDataset(input, logger: logger)
    }

    /// Deletes the specified audience generation job, and removes all data associated with the job.
    @Sendable
    @inlinable
    public func deleteAudienceGenerationJob(_ input: DeleteAudienceGenerationJobRequest, logger: Logger = AWSClient.loggingDisabled) async throws {
        try await self.client.execute(
            operation: "DeleteAudienceGenerationJob", 
            path: "/audience-generation-job/{audienceGenerationJobArn}", 
            httpMethod: .DELETE, 
            serviceConfig: self.config, 
            input: input, 
            logger: logger
        )
    }
    /// Deletes the specified audience generation job, and removes all data associated with the job.
    ///
    /// Parameters:
    ///   - audienceGenerationJobArn: The Amazon Resource Name (ARN) of the audience generation job that you want to delete.
    ///   - logger: Logger use during operation
    @inlinable
    public func deleteAudienceGenerationJob(
        audienceGenerationJobArn: String,
        logger: Logger = AWSClient.loggingDisabled        
    ) async throws {
        let input = DeleteAudienceGenerationJobRequest(
            audienceGenerationJobArn: audienceGenerationJobArn
        )
        return try await self.deleteAudienceGenerationJob(input, logger: logger)
    }

    /// Specifies an audience model that you want to delete. You can't delete an audience model if there are any configured audience models that depend on the audience model.
    @Sendable
    @inlinable
    public func deleteAudienceModel(_ input: DeleteAudienceModelRequest, logger: Logger = AWSClient.loggingDisabled) async throws {
        try await self.client.execute(
            operation: "DeleteAudienceModel", 
            path: "/audience-model/{audienceModelArn}", 
            httpMethod: .DELETE, 
            serviceConfig: self.config, 
            input: input, 
            logger: logger
        )
    }
    /// Specifies an audience model that you want to delete. You can't delete an audience model if there are any configured audience models that depend on the audience model.
    ///
    /// Parameters:
    ///   - audienceModelArn: The Amazon Resource Name (ARN) of the audience model that you want to delete.
    ///   - logger: Logger use during operation
    @inlinable
    public func deleteAudienceModel(
        audienceModelArn: String,
        logger: Logger = AWSClient.loggingDisabled        
    ) async throws {
        let input = DeleteAudienceModelRequest(
            audienceModelArn: audienceModelArn
        )
        return try await self.deleteAudienceModel(input, logger: logger)
    }

    /// Deletes the specified configured audience model. You can't delete a configured audience model if there are any lookalike models that use the configured audience model. If you delete a configured audience model, it will be removed from any collaborations that it is associated to.
    @Sendable
    @inlinable
    public func deleteConfiguredAudienceModel(_ input: DeleteConfiguredAudienceModelRequest, logger: Logger = AWSClient.loggingDisabled) async throws {
        try await self.client.execute(
            operation: "DeleteConfiguredAudienceModel", 
            path: "/configured-audience-model/{configuredAudienceModelArn}", 
            httpMethod: .DELETE, 
            serviceConfig: self.config, 
            input: input, 
            logger: logger
        )
    }
    /// Deletes the specified configured audience model. You can't delete a configured audience model if there are any lookalike models that use the configured audience model. If you delete a configured audience model, it will be removed from any collaborations that it is associated to.
    ///
    /// Parameters:
    ///   - configuredAudienceModelArn: The Amazon Resource Name (ARN) of the configured audience model that you want to delete.
    ///   - logger: Logger use during operation
    @inlinable
    public func deleteConfiguredAudienceModel(
        configuredAudienceModelArn: String,
        logger: Logger = AWSClient.loggingDisabled        
    ) async throws {
        let input = DeleteConfiguredAudienceModelRequest(
            configuredAudienceModelArn: configuredAudienceModelArn
        )
        return try await self.deleteConfiguredAudienceModel(input, logger: logger)
    }

    /// Deletes the specified configured audience model policy.
    @Sendable
    @inlinable
    public func deleteConfiguredAudienceModelPolicy(_ input: DeleteConfiguredAudienceModelPolicyRequest, logger: Logger = AWSClient.loggingDisabled) async throws {
        try await self.client.execute(
            operation: "DeleteConfiguredAudienceModelPolicy", 
            path: "/configured-audience-model/{configuredAudienceModelArn}/policy", 
            httpMethod: .DELETE, 
            serviceConfig: self.config, 
            input: input, 
            logger: logger
        )
    }
    /// Deletes the specified configured audience model policy.
    ///
    /// Parameters:
    ///   - configuredAudienceModelArn: The Amazon Resource Name (ARN) of the configured audience model policy that you want to delete.
    ///   - logger: Logger use during operation
    @inlinable
    public func deleteConfiguredAudienceModelPolicy(
        configuredAudienceModelArn: String,
        logger: Logger = AWSClient.loggingDisabled        
    ) async throws {
        let input = DeleteConfiguredAudienceModelPolicyRequest(
            configuredAudienceModelArn: configuredAudienceModelArn
        )
        return try await self.deleteConfiguredAudienceModelPolicy(input, logger: logger)
    }

    /// Deletes a configured model algorithm.
    @Sendable
    @inlinable
    public func deleteConfiguredModelAlgorithm(_ input: DeleteConfiguredModelAlgorithmRequest, logger: Logger = AWSClient.loggingDisabled) async throws {
        try await self.client.execute(
            operation: "DeleteConfiguredModelAlgorithm", 
            path: "/configured-model-algorithms/{configuredModelAlgorithmArn}", 
            httpMethod: .DELETE, 
            serviceConfig: self.config, 
            input: input, 
            logger: logger
        )
    }
    /// Deletes a configured model algorithm.
    ///
    /// Parameters:
    ///   - configuredModelAlgorithmArn: The Amazon Resource Name (ARN) of the configured model algorithm that you want to delete.
    ///   - logger: Logger use during operation
    @inlinable
    public func deleteConfiguredModelAlgorithm(
        configuredModelAlgorithmArn: String,
        logger: Logger = AWSClient.loggingDisabled        
    ) async throws {
        let input = DeleteConfiguredModelAlgorithmRequest(
            configuredModelAlgorithmArn: configuredModelAlgorithmArn
        )
        return try await self.deleteConfiguredModelAlgorithm(input, logger: logger)
    }

    /// Deletes a configured model algorithm association.
    @Sendable
    @inlinable
    public func deleteConfiguredModelAlgorithmAssociation(_ input: DeleteConfiguredModelAlgorithmAssociationRequest, logger: Logger = AWSClient.loggingDisabled) async throws {
        try await self.client.execute(
            operation: "DeleteConfiguredModelAlgorithmAssociation", 
            path: "/memberships/{membershipIdentifier}/configured-model-algorithm-associations/{configuredModelAlgorithmAssociationArn}", 
            httpMethod: .DELETE, 
            serviceConfig: self.config, 
            input: input, 
            logger: logger
        )
    }
    /// Deletes a configured model algorithm association.
    ///
    /// Parameters:
    ///   - configuredModelAlgorithmAssociationArn: The Amazon Resource Name (ARN) of the configured model algorithm association that you want to delete.
    ///   - membershipIdentifier: The membership ID of the member that is deleting the configured model algorithm association.
    ///   - logger: Logger use during operation
    @inlinable
    public func deleteConfiguredModelAlgorithmAssociation(
        configuredModelAlgorithmAssociationArn: String,
        membershipIdentifier: String,
        logger: Logger = AWSClient.loggingDisabled        
    ) async throws {
        let input = DeleteConfiguredModelAlgorithmAssociationRequest(
            configuredModelAlgorithmAssociationArn: configuredModelAlgorithmAssociationArn, 
            membershipIdentifier: membershipIdentifier
        )
        return try await self.deleteConfiguredModelAlgorithmAssociation(input, logger: logger)
    }

    /// Deletes a ML modeling configuration.
    @Sendable
    @inlinable
    public func deleteMLConfiguration(_ input: DeleteMLConfigurationRequest, logger: Logger = AWSClient.loggingDisabled) async throws {
        try await self.client.execute(
            operation: "DeleteMLConfiguration", 
            path: "/memberships/{membershipIdentifier}/ml-configurations", 
            httpMethod: .DELETE, 
            serviceConfig: self.config, 
            input: input, 
            logger: logger
        )
    }
    /// Deletes a ML modeling configuration.
    ///
    /// Parameters:
    ///   - membershipIdentifier: The membership ID of the of the member that is deleting the ML modeling configuration.
    ///   - logger: Logger use during operation
    @inlinable
    public func deleteMLConfiguration(
        membershipIdentifier: String,
        logger: Logger = AWSClient.loggingDisabled        
    ) async throws {
        let input = DeleteMLConfigurationRequest(
            membershipIdentifier: membershipIdentifier
        )
        return try await self.deleteMLConfiguration(input, logger: logger)
    }

    /// Provides the information necessary to delete an ML input channel.
    @Sendable
    @inlinable
    public func deleteMLInputChannelData(_ input: DeleteMLInputChannelDataRequest, logger: Logger = AWSClient.loggingDisabled) async throws {
        try await self.client.execute(
            operation: "DeleteMLInputChannelData", 
            path: "/memberships/{membershipIdentifier}/ml-input-channels/{mlInputChannelArn}", 
            httpMethod: .DELETE, 
            serviceConfig: self.config, 
            input: input, 
            logger: logger
        )
    }
    /// Provides the information necessary to delete an ML input channel.
    ///
    /// Parameters:
    ///   - membershipIdentifier: The membership ID of the membership that contains the ML input channel you want to delete.
    ///   - mlInputChannelArn: The Amazon Resource Name (ARN) of the ML input channel that you want to delete.
    ///   - logger: Logger use during operation
    @inlinable
    public func deleteMLInputChannelData(
        membershipIdentifier: String,
        mlInputChannelArn: String,
        logger: Logger = AWSClient.loggingDisabled        
    ) async throws {
        let input = DeleteMLInputChannelDataRequest(
            membershipIdentifier: membershipIdentifier, 
            mlInputChannelArn: mlInputChannelArn
        )
        return try await self.deleteMLInputChannelData(input, logger: logger)
    }

    /// Deletes the model artifacts stored by the service.
    @Sendable
    @inlinable
    public func deleteTrainedModelOutput(_ input: DeleteTrainedModelOutputRequest, logger: Logger = AWSClient.loggingDisabled) async throws {
        try await self.client.execute(
            operation: "DeleteTrainedModelOutput", 
            path: "/memberships/{membershipIdentifier}/trained-models/{trainedModelArn}", 
            httpMethod: .DELETE, 
            serviceConfig: self.config, 
            input: input, 
            logger: logger
        )
    }
    /// Deletes the model artifacts stored by the service.
    ///
    /// Parameters:
    ///   - membershipIdentifier: The membership ID of the member that is deleting the trained model output.
    ///   - trainedModelArn: The Amazon Resource Name (ARN) of the trained model whose output you want to delete.
    ///   - versionIdentifier: The version identifier of the trained model to delete. If not specified, the operation will delete the base version of the trained model. When specified, only the particular version will be deleted.
    ///   - logger: Logger use during operation
    @inlinable
    public func deleteTrainedModelOutput(
        membershipIdentifier: String,
        trainedModelArn: String,
        versionIdentifier: String? = nil,
        logger: Logger = AWSClient.loggingDisabled        
    ) async throws {
        let input = DeleteTrainedModelOutputRequest(
            membershipIdentifier: membershipIdentifier, 
            trainedModelArn: trainedModelArn, 
            versionIdentifier: versionIdentifier
        )
        return try await self.deleteTrainedModelOutput(input, logger: logger)
    }

    /// Specifies a training dataset that you want to delete. You can't delete a training dataset if there are any audience models that depend on the training dataset. In Clean Rooms ML, the TrainingDataset is metadata that points to a Glue table, which is read only during AudienceModel creation. This action deletes the metadata.
    @Sendable
    @inlinable
    public func deleteTrainingDataset(_ input: DeleteTrainingDatasetRequest, logger: Logger = AWSClient.loggingDisabled) async throws {
        try await self.client.execute(
            operation: "DeleteTrainingDataset", 
            path: "/training-dataset/{trainingDatasetArn}", 
            httpMethod: .DELETE, 
            serviceConfig: self.config, 
            input: input, 
            logger: logger
        )
    }
    /// Specifies a training dataset that you want to delete. You can't delete a training dataset if there are any audience models that depend on the training dataset. In Clean Rooms ML, the TrainingDataset is metadata that points to a Glue table, which is read only during AudienceModel creation. This action deletes the metadata.
    ///
    /// Parameters:
    ///   - trainingDatasetArn: The Amazon Resource Name (ARN) of the training dataset that you want to delete.
    ///   - logger: Logger use during operation
    @inlinable
    public func deleteTrainingDataset(
        trainingDatasetArn: String,
        logger: Logger = AWSClient.loggingDisabled        
    ) async throws {
        let input = DeleteTrainingDatasetRequest(
            trainingDatasetArn: trainingDatasetArn
        )
        return try await self.deleteTrainingDataset(input, logger: logger)
    }

    /// Returns information about an audience generation job.
    @Sendable
    @inlinable
    public func getAudienceGenerationJob(_ input: GetAudienceGenerationJobRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetAudienceGenerationJobResponse {
        try await self.client.execute(
            operation: "GetAudienceGenerationJob", 
            path: "/audience-generation-job/{audienceGenerationJobArn}", 
            httpMethod: .GET, 
            serviceConfig: self.config, 
            input: input, 
            logger: logger
        )
    }
    /// Returns information about an audience generation job.
    ///
    /// Parameters:
    ///   - audienceGenerationJobArn: The Amazon Resource Name (ARN) of the audience generation job that you are interested in.
    ///   - logger: Logger use during operation
    @inlinable
    public func getAudienceGenerationJob(
        audienceGenerationJobArn: String,
        logger: Logger = AWSClient.loggingDisabled        
    ) async throws -> GetAudienceGenerationJobResponse {
        let input = GetAudienceGenerationJobRequest(
            audienceGenerationJobArn: audienceGenerationJobArn
        )
        return try await self.getAudienceGenerationJob(input, logger: logger)
    }

    /// Returns information about an audience model
    @Sendable
    @inlinable
    public func getAudienceModel(_ input: GetAudienceModelRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetAudienceModelResponse {
        try await self.client.execute(
            operation: "GetAudienceModel", 
            path: "/audience-model/{audienceModelArn}", 
            httpMethod: .GET, 
            serviceConfig: self.config, 
            input: input, 
            logger: logger
        )
    }
    /// Returns information about an audience model
    ///
    /// Parameters:
    ///   - audienceModelArn: The Amazon Resource Name (ARN) of the audience model that you are interested in.
    ///   - logger: Logger use during operation
    @inlinable
    public func getAudienceModel(
        audienceModelArn: String,
        logger: Logger = AWSClient.loggingDisabled        
    ) async throws -> GetAudienceModelResponse {
        let input = GetAudienceModelRequest(
            audienceModelArn: audienceModelArn
        )
        return try await self.getAudienceModel(input, logger: logger)
    }

    /// Returns information about the configured model algorithm association in a collaboration.
    @Sendable
    @inlinable
    public func getCollaborationConfiguredModelAlgorithmAssociation(_ input: GetCollaborationConfiguredModelAlgorithmAssociationRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetCollaborationConfiguredModelAlgorithmAssociationResponse {
        try await self.client.execute(
            operation: "GetCollaborationConfiguredModelAlgorithmAssociation", 
            path: "/collaborations/{collaborationIdentifier}/configured-model-algorithm-associations/{configuredModelAlgorithmAssociationArn}", 
            httpMethod: .GET, 
            serviceConfig: self.config, 
            input: input, 
            logger: logger
        )
    }
    /// Returns information about the configured model algorithm association in a collaboration.
    ///
    /// Parameters:
    ///   - collaborationIdentifier: The collaboration ID for the collaboration that contains the configured model algorithm association that you want to return information about.
    ///   - configuredModelAlgorithmAssociationArn: The Amazon Resource Name (ARN) of the configured model algorithm association that you want to return information about.
    ///   - logger: Logger use during operation
    @inlinable
    public func getCollaborationConfiguredModelAlgorithmAssociation(
        collaborationIdentifier: String,
        configuredModelAlgorithmAssociationArn: String,
        logger: Logger = AWSClient.loggingDisabled        
    ) async throws -> GetCollaborationConfiguredModelAlgorithmAssociationResponse {
        let input = GetCollaborationConfiguredModelAlgorithmAssociationRequest(
            collaborationIdentifier: collaborationIdentifier, 
            configuredModelAlgorithmAssociationArn: configuredModelAlgorithmAssociationArn
        )
        return try await self.getCollaborationConfiguredModelAlgorithmAssociation(input, logger: logger)
    }

    /// Returns information about a specific ML input channel in a collaboration.
    @Sendable
    @inlinable
    public func getCollaborationMLInputChannel(_ input: GetCollaborationMLInputChannelRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetCollaborationMLInputChannelResponse {
        try await self.client.execute(
            operation: "GetCollaborationMLInputChannel", 
            path: "/collaborations/{collaborationIdentifier}/ml-input-channels/{mlInputChannelArn}", 
            httpMethod: .GET, 
            serviceConfig: self.config, 
            input: input, 
            logger: logger
        )
    }
    /// Returns information about a specific ML input channel in a collaboration.
    ///
    /// Parameters:
    ///   - collaborationIdentifier: The collaboration ID of the collaboration that contains the ML input channel that you want to get.
    ///   - mlInputChannelArn: The Amazon Resource Name (ARN) of the ML input channel that you want to get.
    ///   - logger: Logger use during operation
    @inlinable
    public func getCollaborationMLInputChannel(
        collaborationIdentifier: String,
        mlInputChannelArn: String,
        logger: Logger = AWSClient.loggingDisabled        
    ) async throws -> GetCollaborationMLInputChannelResponse {
        let input = GetCollaborationMLInputChannelRequest(
            collaborationIdentifier: collaborationIdentifier, 
            mlInputChannelArn: mlInputChannelArn
        )
        return try await self.getCollaborationMLInputChannel(input, logger: logger)
    }

    /// Returns information about a trained model in a collaboration.
    @Sendable
    @inlinable
    public func getCollaborationTrainedModel(_ input: GetCollaborationTrainedModelRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetCollaborationTrainedModelResponse {
        try await self.client.execute(
            operation: "GetCollaborationTrainedModel", 
            path: "/collaborations/{collaborationIdentifier}/trained-models/{trainedModelArn}", 
            httpMethod: .GET, 
            serviceConfig: self.config, 
            input: input, 
            logger: logger
        )
    }
    /// Returns information about a trained model in a collaboration.
    ///
    /// Parameters:
    ///   - collaborationIdentifier: The collaboration ID that contains the trained model that you want to return information about.
    ///   - trainedModelArn: The Amazon Resource Name (ARN) of the trained model that you want to return information about.
    ///   - versionIdentifier: The version identifier of the trained model to retrieve. If not specified, the operation returns information about the latest version of the trained model.
    ///   - logger: Logger use during operation
    @inlinable
    public func getCollaborationTrainedModel(
        collaborationIdentifier: String,
        trainedModelArn: String,
        versionIdentifier: String? = nil,
        logger: Logger = AWSClient.loggingDisabled        
    ) async throws -> GetCollaborationTrainedModelResponse {
        let input = GetCollaborationTrainedModelRequest(
            collaborationIdentifier: collaborationIdentifier, 
            trainedModelArn: trainedModelArn, 
            versionIdentifier: versionIdentifier
        )
        return try await self.getCollaborationTrainedModel(input, logger: logger)
    }

    /// Returns information about a specified configured audience model.
    @Sendable
    @inlinable
    public func getConfiguredAudienceModel(_ input: GetConfiguredAudienceModelRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetConfiguredAudienceModelResponse {
        try await self.client.execute(
            operation: "GetConfiguredAudienceModel", 
            path: "/configured-audience-model/{configuredAudienceModelArn}", 
            httpMethod: .GET, 
            serviceConfig: self.config, 
            input: input, 
            logger: logger
        )
    }
    /// Returns information about a specified configured audience model.
    ///
    /// Parameters:
    ///   - configuredAudienceModelArn: The Amazon Resource Name (ARN) of the configured audience model that you are interested in.
    ///   - logger: Logger use during operation
    @inlinable
    public func getConfiguredAudienceModel(
        configuredAudienceModelArn: String,
        logger: Logger = AWSClient.loggingDisabled        
    ) async throws -> GetConfiguredAudienceModelResponse {
        let input = GetConfiguredAudienceModelRequest(
            configuredAudienceModelArn: configuredAudienceModelArn
        )
        return try await self.getConfiguredAudienceModel(input, logger: logger)
    }

    /// Returns information about a configured audience model policy.
    @Sendable
    @inlinable
    public func getConfiguredAudienceModelPolicy(_ input: GetConfiguredAudienceModelPolicyRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetConfiguredAudienceModelPolicyResponse {
        try await self.client.execute(
            operation: "GetConfiguredAudienceModelPolicy", 
            path: "/configured-audience-model/{configuredAudienceModelArn}/policy", 
            httpMethod: .GET, 
            serviceConfig: self.config, 
            input: input, 
            logger: logger
        )
    }
    /// Returns information about a configured audience model policy.
    ///
    /// Parameters:
    ///   - configuredAudienceModelArn: The Amazon Resource Name (ARN) of the configured audience model that you are interested in.
    ///   - logger: Logger use during operation
    @inlinable
    public func getConfiguredAudienceModelPolicy(
        configuredAudienceModelArn: String,
        logger: Logger = AWSClient.loggingDisabled        
    ) async throws -> GetConfiguredAudienceModelPolicyResponse {
        let input = GetConfiguredAudienceModelPolicyRequest(
            configuredAudienceModelArn: configuredAudienceModelArn
        )
        return try await self.getConfiguredAudienceModelPolicy(input, logger: logger)
    }

    /// Returns information about a configured model algorithm.
    @Sendable
    @inlinable
    public func getConfiguredModelAlgorithm(_ input: GetConfiguredModelAlgorithmRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetConfiguredModelAlgorithmResponse {
        try await self.client.execute(
            operation: "GetConfiguredModelAlgorithm", 
            path: "/configured-model-algorithms/{configuredModelAlgorithmArn}", 
            httpMethod: .GET, 
            serviceConfig: self.config, 
            input: input, 
            logger: logger
        )
    }
    /// Returns information about a configured model algorithm.
    ///
    /// Parameters:
    ///   - configuredModelAlgorithmArn: The Amazon Resource Name (ARN) of the configured model algorithm that you want to return information about.
    ///   - logger: Logger use during operation
    @inlinable
    public func getConfiguredModelAlgorithm(
        configuredModelAlgorithmArn: String,
        logger: Logger = AWSClient.loggingDisabled        
    ) async throws -> GetConfiguredModelAlgorithmResponse {
        let input = GetConfiguredModelAlgorithmRequest(
            configuredModelAlgorithmArn: configuredModelAlgorithmArn
        )
        return try await self.getConfiguredModelAlgorithm(input, logger: logger)
    }

    /// Returns information about a configured model algorithm association.
    @Sendable
    @inlinable
    public func getConfiguredModelAlgorithmAssociation(_ input: GetConfiguredModelAlgorithmAssociationRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetConfiguredModelAlgorithmAssociationResponse {
        try await self.client.execute(
            operation: "GetConfiguredModelAlgorithmAssociation", 
            path: "/memberships/{membershipIdentifier}/configured-model-algorithm-associations/{configuredModelAlgorithmAssociationArn}", 
            httpMethod: .GET, 
            serviceConfig: self.config, 
            input: input, 
            logger: logger
        )
    }
    /// Returns information about a configured model algorithm association.
    ///
    /// Parameters:
    ///   - configuredModelAlgorithmAssociationArn: The Amazon Resource Name (ARN) of the configured model algorithm association that you want to return information about.
    ///   - membershipIdentifier: The membership ID of the member that created the configured model algorithm association.
    ///   - logger: Logger use during operation
    @inlinable
    public func getConfiguredModelAlgorithmAssociation(
        configuredModelAlgorithmAssociationArn: String,
        membershipIdentifier: String,
        logger: Logger = AWSClient.loggingDisabled        
    ) async throws -> GetConfiguredModelAlgorithmAssociationResponse {
        let input = GetConfiguredModelAlgorithmAssociationRequest(
            configuredModelAlgorithmAssociationArn: configuredModelAlgorithmAssociationArn, 
            membershipIdentifier: membershipIdentifier
        )
        return try await self.getConfiguredModelAlgorithmAssociation(input, logger: logger)
    }

    /// Returns information about a specific ML configuration.
    @Sendable
    @inlinable
    public func getMLConfiguration(_ input: GetMLConfigurationRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetMLConfigurationResponse {
        try await self.client.execute(
            operation: "GetMLConfiguration", 
            path: "/memberships/{membershipIdentifier}/ml-configurations", 
            httpMethod: .GET, 
            serviceConfig: self.config, 
            input: input, 
            logger: logger
        )
    }
    /// Returns information about a specific ML configuration.
    ///
    /// Parameters:
    ///   - membershipIdentifier: The membership ID of the member that owns the ML configuration you want to return information about.
    ///   - logger: Logger use during operation
    @inlinable
    public func getMLConfiguration(
        membershipIdentifier: String,
        logger: Logger = AWSClient.loggingDisabled        
    ) async throws -> GetMLConfigurationResponse {
        let input = GetMLConfigurationRequest(
            membershipIdentifier: membershipIdentifier
        )
        return try await self.getMLConfiguration(input, logger: logger)
    }

    /// Returns information about an ML input channel.
    @Sendable
    @inlinable
    public func getMLInputChannel(_ input: GetMLInputChannelRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetMLInputChannelResponse {
        try await self.client.execute(
            operation: "GetMLInputChannel", 
            path: "/memberships/{membershipIdentifier}/ml-input-channels/{mlInputChannelArn}", 
            httpMethod: .GET, 
            serviceConfig: self.config, 
            input: input, 
            logger: logger
        )
    }
    /// Returns information about an ML input channel.
    ///
    /// Parameters:
    ///   - membershipIdentifier: The membership ID of the membership that contains the ML input channel that you want to get.
    ///   - mlInputChannelArn: The Amazon Resource Name (ARN) of the ML input channel that you want to get.
    ///   - logger: Logger use during operation
    @inlinable
    public func getMLInputChannel(
        membershipIdentifier: String,
        mlInputChannelArn: String,
        logger: Logger = AWSClient.loggingDisabled        
    ) async throws -> GetMLInputChannelResponse {
        let input = GetMLInputChannelRequest(
            membershipIdentifier: membershipIdentifier, 
            mlInputChannelArn: mlInputChannelArn
        )
        return try await self.getMLInputChannel(input, logger: logger)
    }

    /// Returns information about a trained model.
    @Sendable
    @inlinable
    public func getTrainedModel(_ input: GetTrainedModelRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetTrainedModelResponse {
        try await self.client.execute(
            operation: "GetTrainedModel", 
            path: "/memberships/{membershipIdentifier}/trained-models/{trainedModelArn}", 
            httpMethod: .GET, 
            serviceConfig: self.config, 
            input: input, 
            logger: logger
        )
    }
    /// Returns information about a trained model.
    ///
    /// Parameters:
    ///   - membershipIdentifier: The membership ID of the member that created the trained model that you are interested in.
    ///   - trainedModelArn: The Amazon Resource Name (ARN) of the trained model that you are interested in.
    ///   - versionIdentifier: The version identifier of the trained model to retrieve. If not specified, the operation returns information about the latest version of the trained model.
    ///   - logger: Logger use during operation
    @inlinable
    public func getTrainedModel(
        membershipIdentifier: String,
        trainedModelArn: String,
        versionIdentifier: String? = nil,
        logger: Logger = AWSClient.loggingDisabled        
    ) async throws -> GetTrainedModelResponse {
        let input = GetTrainedModelRequest(
            membershipIdentifier: membershipIdentifier, 
            trainedModelArn: trainedModelArn, 
            versionIdentifier: versionIdentifier
        )
        return try await self.getTrainedModel(input, logger: logger)
    }

    /// Returns information about a trained model inference job.
    @Sendable
    @inlinable
    public func getTrainedModelInferenceJob(_ input: GetTrainedModelInferenceJobRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetTrainedModelInferenceJobResponse {
        try await self.client.execute(
            operation: "GetTrainedModelInferenceJob", 
            path: "/memberships/{membershipIdentifier}/trained-model-inference-jobs/{trainedModelInferenceJobArn}", 
            httpMethod: .GET, 
            serviceConfig: self.config, 
            input: input, 
            logger: logger
        )
    }
    /// Returns information about a trained model inference job.
    ///
    /// Parameters:
    ///   - membershipIdentifier: Provides the membership ID of the membership that contains the trained model inference job that you are interested in.
    ///   - trainedModelInferenceJobArn: Provides the Amazon Resource Name (ARN) of the trained model inference job that you are interested in.
    ///   - logger: Logger use during operation
    @inlinable
    public func getTrainedModelInferenceJob(
        membershipIdentifier: String,
        trainedModelInferenceJobArn: String,
        logger: Logger = AWSClient.loggingDisabled        
    ) async throws -> GetTrainedModelInferenceJobResponse {
        let input = GetTrainedModelInferenceJobRequest(
            membershipIdentifier: membershipIdentifier, 
            trainedModelInferenceJobArn: trainedModelInferenceJobArn
        )
        return try await self.getTrainedModelInferenceJob(input, logger: logger)
    }

    /// Returns information about a training dataset.
    @Sendable
    @inlinable
    public func getTrainingDataset(_ input: GetTrainingDatasetRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetTrainingDatasetResponse {
        try await self.client.execute(
            operation: "GetTrainingDataset", 
            path: "/training-dataset/{trainingDatasetArn}", 
            httpMethod: .GET, 
            serviceConfig: self.config, 
            input: input, 
            logger: logger
        )
    }
    /// Returns information about a training dataset.
    ///
    /// Parameters:
    ///   - trainingDatasetArn: The Amazon Resource Name (ARN) of the training dataset that you are interested in.
    ///   - logger: Logger use during operation
    @inlinable
    public func getTrainingDataset(
        trainingDatasetArn: String,
        logger: Logger = AWSClient.loggingDisabled        
    ) async throws -> GetTrainingDatasetResponse {
        let input = GetTrainingDatasetRequest(
            trainingDatasetArn: trainingDatasetArn
        )
        return try await self.getTrainingDataset(input, logger: logger)
    }

    /// Returns a list of the audience export jobs.
    @Sendable
    @inlinable
    public func listAudienceExportJobs(_ input: ListAudienceExportJobsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListAudienceExportJobsResponse {
        try await self.client.execute(
            operation: "ListAudienceExportJobs", 
            path: "/audience-export-job", 
            httpMethod: .GET, 
            serviceConfig: self.config, 
            input: input, 
            logger: logger
        )
    }
    /// Returns a list of the audience export jobs.
    ///
    /// Parameters:
    ///   - audienceGenerationJobArn: The Amazon Resource Name (ARN) of the audience generation job that you are interested in.
    ///   - maxResults: The maximum size of the results that is returned per call.
    ///   - nextToken: The token value retrieved from a previous call to access the next page of results.
    ///   - logger: Logger use during operation
    @inlinable
    public func listAudienceExportJobs(
        audienceGenerationJobArn: String? = nil,
        maxResults: Int? = nil,
        nextToken: String? = nil,
        logger: Logger = AWSClient.loggingDisabled        
    ) async throws -> ListAudienceExportJobsResponse {
        let input = ListAudienceExportJobsRequest(
            audienceGenerationJobArn: audienceGenerationJobArn, 
            maxResults: maxResults, 
            nextToken: nextToken
        )
        return try await self.listAudienceExportJobs(input, logger: logger)
    }

    /// Returns a list of audience generation jobs.
    @Sendable
    @inlinable
    public func listAudienceGenerationJobs(_ input: ListAudienceGenerationJobsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListAudienceGenerationJobsResponse {
        try await self.client.execute(
            operation: "ListAudienceGenerationJobs", 
            path: "/audience-generation-job", 
            httpMethod: .GET, 
            serviceConfig: self.config, 
            input: input, 
            logger: logger
        )
    }
    /// Returns a list of audience generation jobs.
    ///
    /// Parameters:
    ///   - collaborationId: The identifier of the collaboration that contains the audience generation jobs that you are interested in.
    ///   - configuredAudienceModelArn: The Amazon Resource Name (ARN) of the configured audience model that was used for the audience generation jobs that you are interested in.
    ///   - maxResults: The maximum size of the results that is returned per call.
    ///   - nextToken: The token value retrieved from a previous call to access the next page of results.
    ///   - logger: Logger use during operation
    @inlinable
    public func listAudienceGenerationJobs(
        collaborationId: String? = nil,
        configuredAudienceModelArn: String? = nil,
        maxResults: Int? = nil,
        nextToken: String? = nil,
        logger: Logger = AWSClient.loggingDisabled        
    ) async throws -> ListAudienceGenerationJobsResponse {
        let input = ListAudienceGenerationJobsRequest(
            collaborationId: collaborationId, 
            configuredAudienceModelArn: configuredAudienceModelArn, 
            maxResults: maxResults, 
            nextToken: nextToken
        )
        return try await self.listAudienceGenerationJobs(input, logger: logger)
    }

    /// Returns a list of audience models.
    @Sendable
    @inlinable
    public func listAudienceModels(_ input: ListAudienceModelsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListAudienceModelsResponse {
        try await self.client.execute(
            operation: "ListAudienceModels", 
            path: "/audience-model", 
            httpMethod: .GET, 
            serviceConfig: self.config, 
            input: input, 
            logger: logger
        )
    }
    /// Returns a list of audience models.
    ///
    /// Parameters:
    ///   - maxResults: The maximum size of the results that is returned per call.
    ///   - nextToken: The token value retrieved from a previous call to access the next page of results.
    ///   - logger: Logger use during operation
    @inlinable
    public func listAudienceModels(
        maxResults: Int? = nil,
        nextToken: String? = nil,
        logger: Logger = AWSClient.loggingDisabled        
    ) async throws -> ListAudienceModelsResponse {
        let input = ListAudienceModelsRequest(
            maxResults: maxResults, 
            nextToken: nextToken
        )
        return try await self.listAudienceModels(input, logger: logger)
    }

    /// Returns a list of the configured model algorithm associations in a collaboration.
    @Sendable
    @inlinable
    public func listCollaborationConfiguredModelAlgorithmAssociations(_ input: ListCollaborationConfiguredModelAlgorithmAssociationsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListCollaborationConfiguredModelAlgorithmAssociationsResponse {
        try await self.client.execute(
            operation: "ListCollaborationConfiguredModelAlgorithmAssociations", 
            path: "/collaborations/{collaborationIdentifier}/configured-model-algorithm-associations", 
            httpMethod: .GET, 
            serviceConfig: self.config, 
            input: input, 
            logger: logger
        )
    }
    /// Returns a list of the configured model algorithm associations in a collaboration.
    ///
    /// Parameters:
    ///   - collaborationIdentifier: The collaboration ID of the collaboration that contains the configured model algorithm associations that you are interested in.
    ///   - maxResults: The maximum size of the results that is returned per call.
    ///   - nextToken: The token value retrieved from a previous call to access the next page of results.
    ///   - logger: Logger use during operation
    @inlinable
    public func listCollaborationConfiguredModelAlgorithmAssociations(
        collaborationIdentifier: String,
        maxResults: Int? = nil,
        nextToken: String? = nil,
        logger: Logger = AWSClient.loggingDisabled        
    ) async throws -> ListCollaborationConfiguredModelAlgorithmAssociationsResponse {
        let input = ListCollaborationConfiguredModelAlgorithmAssociationsRequest(
            collaborationIdentifier: collaborationIdentifier, 
            maxResults: maxResults, 
            nextToken: nextToken
        )
        return try await self.listCollaborationConfiguredModelAlgorithmAssociations(input, logger: logger)
    }

    /// Returns a list of the ML input channels in a collaboration.
    @Sendable
    @inlinable
    public func listCollaborationMLInputChannels(_ input: ListCollaborationMLInputChannelsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListCollaborationMLInputChannelsResponse {
        try await self.client.execute(
            operation: "ListCollaborationMLInputChannels", 
            path: "/collaborations/{collaborationIdentifier}/ml-input-channels", 
            httpMethod: .GET, 
            serviceConfig: self.config, 
            input: input, 
            logger: logger
        )
    }
    /// Returns a list of the ML input channels in a collaboration.
    ///
    /// Parameters:
    ///   - collaborationIdentifier: The collaboration ID of the collaboration that contains the ML input channels that you want to list.
    ///   - maxResults: The maximum number of results to return.
    ///   - nextToken: The token value retrieved from a previous call to access the next page of results.
    ///   - logger: Logger use during operation
    @inlinable
    public func listCollaborationMLInputChannels(
        collaborationIdentifier: String,
        maxResults: Int? = nil,
        nextToken: String? = nil,
        logger: Logger = AWSClient.loggingDisabled        
    ) async throws -> ListCollaborationMLInputChannelsResponse {
        let input = ListCollaborationMLInputChannelsRequest(
            collaborationIdentifier: collaborationIdentifier, 
            maxResults: maxResults, 
            nextToken: nextToken
        )
        return try await self.listCollaborationMLInputChannels(input, logger: logger)
    }

    /// Returns a list of the export jobs for a trained model in a collaboration.
    @Sendable
    @inlinable
    public func listCollaborationTrainedModelExportJobs(_ input: ListCollaborationTrainedModelExportJobsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListCollaborationTrainedModelExportJobsResponse {
        try await self.client.execute(
            operation: "ListCollaborationTrainedModelExportJobs", 
            path: "/collaborations/{collaborationIdentifier}/trained-models/{trainedModelArn}/export-jobs", 
            httpMethod: .GET, 
            serviceConfig: self.config, 
            input: input, 
            logger: logger
        )
    }
    /// Returns a list of the export jobs for a trained model in a collaboration.
    ///
    /// Parameters:
    ///   - collaborationIdentifier: The collaboration ID of the collaboration that contains the trained model export jobs that you are interested in.
    ///   - maxResults: The maximum size of the results that is returned per call.
    ///   - nextToken: The token value retrieved from a previous call to access the next page of results.
    ///   - trainedModelArn: The Amazon Resource Name (ARN) of the trained model that was used to create the export jobs that you are interested in.
    ///   - trainedModelVersionIdentifier: The version identifier of the trained model to filter export jobs by. When specified, only export jobs for this specific version of the trained model are returned.
    ///   - logger: Logger use during operation
    @inlinable
    public func listCollaborationTrainedModelExportJobs(
        collaborationIdentifier: String,
        maxResults: Int? = nil,
        nextToken: String? = nil,
        trainedModelArn: String,
        trainedModelVersionIdentifier: String? = nil,
        logger: Logger = AWSClient.loggingDisabled        
    ) async throws -> ListCollaborationTrainedModelExportJobsResponse {
        let input = ListCollaborationTrainedModelExportJobsRequest(
            collaborationIdentifier: collaborationIdentifier, 
            maxResults: maxResults, 
            nextToken: nextToken, 
            trainedModelArn: trainedModelArn, 
            trainedModelVersionIdentifier: trainedModelVersionIdentifier
        )
        return try await self.listCollaborationTrainedModelExportJobs(input, logger: logger)
    }

    /// Returns a list of trained model inference jobs in a specified collaboration.
    @Sendable
    @inlinable
    public func listCollaborationTrainedModelInferenceJobs(_ input: ListCollaborationTrainedModelInferenceJobsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListCollaborationTrainedModelInferenceJobsResponse {
        try await self.client.execute(
            operation: "ListCollaborationTrainedModelInferenceJobs", 
            path: "/collaborations/{collaborationIdentifier}/trained-model-inference-jobs", 
            httpMethod: .GET, 
            serviceConfig: self.config, 
            input: input, 
            logger: logger
        )
    }
    /// Returns a list of trained model inference jobs in a specified collaboration.
    ///
    /// Parameters:
    ///   - collaborationIdentifier: The collaboration ID of the collaboration that contains the trained model inference jobs that you are interested in.
    ///   - maxResults: The maximum size of the results that is returned per call.
    ///   - nextToken: The token value retrieved from a previous call to access the next page of results.
    ///   - trainedModelArn: The Amazon Resource Name (ARN) of the trained model that was used to create the trained model inference jobs that you are interested in.
    ///   - trainedModelVersionIdentifier: The version identifier of the trained model to filter inference jobs by. When specified, only inference jobs that used this specific version of the trained model are returned.
    ///   - logger: Logger use during operation
    @inlinable
    public func listCollaborationTrainedModelInferenceJobs(
        collaborationIdentifier: String,
        maxResults: Int? = nil,
        nextToken: String? = nil,
        trainedModelArn: String? = nil,
        trainedModelVersionIdentifier: String? = nil,
        logger: Logger = AWSClient.loggingDisabled        
    ) async throws -> ListCollaborationTrainedModelInferenceJobsResponse {
        let input = ListCollaborationTrainedModelInferenceJobsRequest(
            collaborationIdentifier: collaborationIdentifier, 
            maxResults: maxResults, 
            nextToken: nextToken, 
            trainedModelArn: trainedModelArn, 
            trainedModelVersionIdentifier: trainedModelVersionIdentifier
        )
        return try await self.listCollaborationTrainedModelInferenceJobs(input, logger: logger)
    }

    /// Returns a list of the trained models in a collaboration.
    @Sendable
    @inlinable
    public func listCollaborationTrainedModels(_ input: ListCollaborationTrainedModelsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListCollaborationTrainedModelsResponse {
        try await self.client.execute(
            operation: "ListCollaborationTrainedModels", 
            path: "/collaborations/{collaborationIdentifier}/trained-models", 
            httpMethod: .GET, 
            serviceConfig: self.config, 
            input: input, 
            logger: logger
        )
    }
    /// Returns a list of the trained models in a collaboration.
    ///
    /// Parameters:
    ///   - collaborationIdentifier: The collaboration ID of the collaboration that contains the trained models you are interested in.
    ///   - maxResults: The maximum size of the results that is returned per call.
    ///   - nextToken: The token value retrieved from a previous call to access the next page of results.
    ///   - logger: Logger use during operation
    @inlinable
    public func listCollaborationTrainedModels(
        collaborationIdentifier: String,
        maxResults: Int? = nil,
        nextToken: String? = nil,
        logger: Logger = AWSClient.loggingDisabled        
    ) async throws -> ListCollaborationTrainedModelsResponse {
        let input = ListCollaborationTrainedModelsRequest(
            collaborationIdentifier: collaborationIdentifier, 
            maxResults: maxResults, 
            nextToken: nextToken
        )
        return try await self.listCollaborationTrainedModels(input, logger: logger)
    }

    /// Returns a list of the configured audience models.
    @Sendable
    @inlinable
    public func listConfiguredAudienceModels(_ input: ListConfiguredAudienceModelsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListConfiguredAudienceModelsResponse {
        try await self.client.execute(
            operation: "ListConfiguredAudienceModels", 
            path: "/configured-audience-model", 
            httpMethod: .GET, 
            serviceConfig: self.config, 
            input: input, 
            logger: logger
        )
    }
    /// Returns a list of the configured audience models.
    ///
    /// Parameters:
    ///   - maxResults: The maximum size of the results that is returned per call.
    ///   - nextToken: The token value retrieved from a previous call to access the next page of results.
    ///   - logger: Logger use during operation
    @inlinable
    public func listConfiguredAudienceModels(
        maxResults: Int? = nil,
        nextToken: String? = nil,
        logger: Logger = AWSClient.loggingDisabled        
    ) async throws -> ListConfiguredAudienceModelsResponse {
        let input = ListConfiguredAudienceModelsRequest(
            maxResults: maxResults, 
            nextToken: nextToken
        )
        return try await self.listConfiguredAudienceModels(input, logger: logger)
    }

    /// Returns a list of configured model algorithm associations.
    @Sendable
    @inlinable
    public func listConfiguredModelAlgorithmAssociations(_ input: ListConfiguredModelAlgorithmAssociationsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListConfiguredModelAlgorithmAssociationsResponse {
        try await self.client.execute(
            operation: "ListConfiguredModelAlgorithmAssociations", 
            path: "/memberships/{membershipIdentifier}/configured-model-algorithm-associations", 
            httpMethod: .GET, 
            serviceConfig: self.config, 
            input: input, 
            logger: logger
        )
    }
    /// Returns a list of configured model algorithm associations.
    ///
    /// Parameters:
    ///   - maxResults: The maximum size of the results that is returned per call.
    ///   - membershipIdentifier: The membership ID of the member that created the configured model algorithm associations you are interested in.
    ///   - nextToken: The token value retrieved from a previous call to access the next page of results.
    ///   - logger: Logger use during operation
    @inlinable
    public func listConfiguredModelAlgorithmAssociations(
        maxResults: Int? = nil,
        membershipIdentifier: String,
        nextToken: String? = nil,
        logger: Logger = AWSClient.loggingDisabled        
    ) async throws -> ListConfiguredModelAlgorithmAssociationsResponse {
        let input = ListConfiguredModelAlgorithmAssociationsRequest(
            maxResults: maxResults, 
            membershipIdentifier: membershipIdentifier, 
            nextToken: nextToken
        )
        return try await self.listConfiguredModelAlgorithmAssociations(input, logger: logger)
    }

    /// Returns a list of configured model algorithms.
    @Sendable
    @inlinable
    public func listConfiguredModelAlgorithms(_ input: ListConfiguredModelAlgorithmsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListConfiguredModelAlgorithmsResponse {
        try await self.client.execute(
            operation: "ListConfiguredModelAlgorithms", 
            path: "/configured-model-algorithms", 
            httpMethod: .GET, 
            serviceConfig: self.config, 
            input: input, 
            logger: logger
        )
    }
    /// Returns a list of configured model algorithms.
    ///
    /// Parameters:
    ///   - maxResults: The maximum size of the results that is returned per call.
    ///   - nextToken: The token value retrieved from a previous call to access the next page of results.
    ///   - logger: Logger use during operation
    @inlinable
    public func listConfiguredModelAlgorithms(
        maxResults: Int? = nil,
        nextToken: String? = nil,
        logger: Logger = AWSClient.loggingDisabled        
    ) async throws -> ListConfiguredModelAlgorithmsResponse {
        let input = ListConfiguredModelAlgorithmsRequest(
            maxResults: maxResults, 
            nextToken: nextToken
        )
        return try await self.listConfiguredModelAlgorithms(input, logger: logger)
    }

    /// Returns a list of ML input channels.
    @Sendable
    @inlinable
    public func listMLInputChannels(_ input: ListMLInputChannelsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListMLInputChannelsResponse {
        try await self.client.execute(
            operation: "ListMLInputChannels", 
            path: "/memberships/{membershipIdentifier}/ml-input-channels", 
            httpMethod: .GET, 
            serviceConfig: self.config, 
            input: input, 
            logger: logger
        )
    }
    /// Returns a list of ML input channels.
    ///
    /// Parameters:
    ///   - maxResults: The maximum number of ML input channels to return.
    ///   - membershipIdentifier: The membership ID of the membership that contains the ML input channels that you want to list.
    ///   - nextToken: The token value retrieved from a previous call to access the next page of results.
    ///   - logger: Logger use during operation
    @inlinable
    public func listMLInputChannels(
        maxResults: Int? = nil,
        membershipIdentifier: String,
        nextToken: String? = nil,
        logger: Logger = AWSClient.loggingDisabled        
    ) async throws -> ListMLInputChannelsResponse {
        let input = ListMLInputChannelsRequest(
            maxResults: maxResults, 
            membershipIdentifier: membershipIdentifier, 
            nextToken: nextToken
        )
        return try await self.listMLInputChannels(input, logger: logger)
    }

    /// Returns a list of tags for a provided resource.
    @Sendable
    @inlinable
    public func listTagsForResource(_ input: ListTagsForResourceRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListTagsForResourceResponse {
        try await self.client.execute(
            operation: "ListTagsForResource", 
            path: "/tags/{resourceArn}", 
            httpMethod: .GET, 
            serviceConfig: self.config, 
            input: input, 
            logger: logger
        )
    }
    /// Returns a list of tags for a provided resource.
    ///
    /// Parameters:
    ///   - resourceArn: The Amazon Resource Name (ARN) of the resource that you are interested in.
    ///   - logger: Logger use during operation
    @inlinable
    public func listTagsForResource(
        resourceArn: String,
        logger: Logger = AWSClient.loggingDisabled        
    ) async throws -> ListTagsForResourceResponse {
        let input = ListTagsForResourceRequest(
            resourceArn: resourceArn
        )
        return try await self.listTagsForResource(input, logger: logger)
    }

    /// Returns a list of trained model inference jobs that match the request parameters.
    @Sendable
    @inlinable
    public func listTrainedModelInferenceJobs(_ input: ListTrainedModelInferenceJobsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListTrainedModelInferenceJobsResponse {
        try await self.client.execute(
            operation: "ListTrainedModelInferenceJobs", 
            path: "/memberships/{membershipIdentifier}/trained-model-inference-jobs", 
            httpMethod: .GET, 
            serviceConfig: self.config, 
            input: input, 
            logger: logger
        )
    }
    /// Returns a list of trained model inference jobs that match the request parameters.
    ///
    /// Parameters:
    ///   - maxResults: The maximum size of the results that is returned per call.
    ///   - membershipIdentifier: The membership
    ///   - nextToken: The token value retrieved from a previous call to access the next page of results.
    ///   - trainedModelArn: The Amazon Resource Name (ARN) of a trained model that was used to create the trained model inference jobs that you are interested in.
    ///   - trainedModelVersionIdentifier: The version identifier of the trained model to filter inference jobs by. When specified, only inference jobs that used this specific version of the trained model are returned.
    ///   - logger: Logger use during operation
    @inlinable
    public func listTrainedModelInferenceJobs(
        maxResults: Int? = nil,
        membershipIdentifier: String,
        nextToken: String? = nil,
        trainedModelArn: String? = nil,
        trainedModelVersionIdentifier: String? = nil,
        logger: Logger = AWSClient.loggingDisabled        
    ) async throws -> ListTrainedModelInferenceJobsResponse {
        let input = ListTrainedModelInferenceJobsRequest(
            maxResults: maxResults, 
            membershipIdentifier: membershipIdentifier, 
            nextToken: nextToken, 
            trainedModelArn: trainedModelArn, 
            trainedModelVersionIdentifier: trainedModelVersionIdentifier
        )
        return try await self.listTrainedModelInferenceJobs(input, logger: logger)
    }

    /// Returns a list of trained model versions for a specified trained model. This operation allows you to view all versions of a trained model, including information about their status and creation details. You can use this to track the evolution of your trained models and select specific versions for inference or further training.
    @Sendable
    @inlinable
    public func listTrainedModelVersions(_ input: ListTrainedModelVersionsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListTrainedModelVersionsResponse {
        try await self.client.execute(
            operation: "ListTrainedModelVersions", 
            path: "/memberships/{membershipIdentifier}/trained-models/{trainedModelArn}/versions", 
            httpMethod: .GET, 
            serviceConfig: self.config, 
            input: input, 
            logger: logger
        )
    }
    /// Returns a list of trained model versions for a specified trained model. This operation allows you to view all versions of a trained model, including information about their status and creation details. You can use this to track the evolution of your trained models and select specific versions for inference or further training.
    ///
    /// Parameters:
    ///   - maxResults: The maximum number of trained model versions to return in a single page. The default value is 10, and the maximum value is 100.
    ///   - membershipIdentifier: The membership identifier for the collaboration that contains the trained model.
    ///   - nextToken: The pagination token from a previous ListTrainedModelVersions request. Use this token to retrieve the next page of results.
    ///   - status: Filter the results to only include trained model versions with the specified status. Valid values include CREATE_PENDING, CREATE_IN_PROGRESS, ACTIVE, CREATE_FAILED, and others.
    ///   - trainedModelArn: The Amazon Resource Name (ARN) of the trained model for which to list versions.
    ///   - logger: Logger use during operation
    @inlinable
    public func listTrainedModelVersions(
        maxResults: Int? = nil,
        membershipIdentifier: String,
        nextToken: String? = nil,
        status: TrainedModelStatus? = nil,
        trainedModelArn: String,
        logger: Logger = AWSClient.loggingDisabled        
    ) async throws -> ListTrainedModelVersionsResponse {
        let input = ListTrainedModelVersionsRequest(
            maxResults: maxResults, 
            membershipIdentifier: membershipIdentifier, 
            nextToken: nextToken, 
            status: status, 
            trainedModelArn: trainedModelArn
        )
        return try await self.listTrainedModelVersions(input, logger: logger)
    }

    /// Returns a list of trained models.
    @Sendable
    @inlinable
    public func listTrainedModels(_ input: ListTrainedModelsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListTrainedModelsResponse {
        try await self.client.execute(
            operation: "ListTrainedModels", 
            path: "/memberships/{membershipIdentifier}/trained-models", 
            httpMethod: .GET, 
            serviceConfig: self.config, 
            input: input, 
            logger: logger
        )
    }
    /// Returns a list of trained models.
    ///
    /// Parameters:
    ///   - maxResults: The maximum size of the results that is returned per call.
    ///   - membershipIdentifier: The membership ID of the member that created the trained models you are interested in.
    ///   - nextToken: The token value retrieved from a previous call to access the next page of results.
    ///   - logger: Logger use during operation
    @inlinable
    public func listTrainedModels(
        maxResults: Int? = nil,
        membershipIdentifier: String,
        nextToken: String? = nil,
        logger: Logger = AWSClient.loggingDisabled        
    ) async throws -> ListTrainedModelsResponse {
        let input = ListTrainedModelsRequest(
            maxResults: maxResults, 
            membershipIdentifier: membershipIdentifier, 
            nextToken: nextToken
        )
        return try await self.listTrainedModels(input, logger: logger)
    }

    /// Returns a list of training datasets.
    @Sendable
    @inlinable
    public func listTrainingDatasets(_ input: ListTrainingDatasetsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListTrainingDatasetsResponse {
        try await self.client.execute(
            operation: "ListTrainingDatasets", 
            path: "/training-dataset", 
            httpMethod: .GET, 
            serviceConfig: self.config, 
            input: input, 
            logger: logger
        )
    }
    /// Returns a list of training datasets.
    ///
    /// Parameters:
    ///   - maxResults: The maximum size of the results that is returned per call.
    ///   - nextToken: The token value retrieved from a previous call to access the next page of results.
    ///   - logger: Logger use during operation
    @inlinable
    public func listTrainingDatasets(
        maxResults: Int? = nil,
        nextToken: String? = nil,
        logger: Logger = AWSClient.loggingDisabled        
    ) async throws -> ListTrainingDatasetsResponse {
        let input = ListTrainingDatasetsRequest(
            maxResults: maxResults, 
            nextToken: nextToken
        )
        return try await self.listTrainingDatasets(input, logger: logger)
    }

    /// Create or update the resource policy for a configured audience model.
    @Sendable
    @inlinable
    public func putConfiguredAudienceModelPolicy(_ input: PutConfiguredAudienceModelPolicyRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> PutConfiguredAudienceModelPolicyResponse {
        try await self.client.execute(
            operation: "PutConfiguredAudienceModelPolicy", 
            path: "/configured-audience-model/{configuredAudienceModelArn}/policy", 
            httpMethod: .PUT, 
            serviceConfig: self.config, 
            input: input, 
            logger: logger
        )
    }
    /// Create or update the resource policy for a configured audience model.
    ///
    /// Parameters:
    ///   - configuredAudienceModelArn: The Amazon Resource Name (ARN) of the configured audience model that the resource policy will govern.
    ///   - configuredAudienceModelPolicy: The IAM resource policy.
    ///   - policyExistenceCondition: Use this to prevent unexpected concurrent modification of the policy.
    ///   - previousPolicyHash: A cryptographic hash of the contents of the policy used to prevent unexpected concurrent modification of the policy.
    ///   - logger: Logger use during operation
    @inlinable
    public func putConfiguredAudienceModelPolicy(
        configuredAudienceModelArn: String,
        configuredAudienceModelPolicy: String,
        policyExistenceCondition: PolicyExistenceCondition? = nil,
        previousPolicyHash: String? = nil,
        logger: Logger = AWSClient.loggingDisabled        
    ) async throws -> PutConfiguredAudienceModelPolicyResponse {
        let input = PutConfiguredAudienceModelPolicyRequest(
            configuredAudienceModelArn: configuredAudienceModelArn, 
            configuredAudienceModelPolicy: configuredAudienceModelPolicy, 
            policyExistenceCondition: policyExistenceCondition, 
            previousPolicyHash: previousPolicyHash
        )
        return try await self.putConfiguredAudienceModelPolicy(input, logger: logger)
    }

    /// Assigns information about an ML configuration.
    @Sendable
    @inlinable
    public func putMLConfiguration(_ input: PutMLConfigurationRequest, logger: Logger = AWSClient.loggingDisabled) async throws {
        try await self.client.execute(
            operation: "PutMLConfiguration", 
            path: "/memberships/{membershipIdentifier}/ml-configurations", 
            httpMethod: .PUT, 
            serviceConfig: self.config, 
            input: input, 
            logger: logger
        )
    }
    /// Assigns information about an ML configuration.
    ///
    /// Parameters:
    ///   - defaultOutputLocation: The default Amazon S3 location where ML output is stored for the specified member.
    ///   - membershipIdentifier: The membership ID of the member that is being configured.
    ///   - logger: Logger use during operation
    @inlinable
    public func putMLConfiguration(
        defaultOutputLocation: MLOutputConfiguration,
        membershipIdentifier: String,
        logger: Logger = AWSClient.loggingDisabled        
    ) async throws {
        let input = PutMLConfigurationRequest(
            defaultOutputLocation: defaultOutputLocation, 
            membershipIdentifier: membershipIdentifier
        )
        return try await self.putMLConfiguration(input, logger: logger)
    }

    /// Export an audience of a specified size after you have generated an audience.
    @Sendable
    @inlinable
    public func startAudienceExportJob(_ input: StartAudienceExportJobRequest, logger: Logger = AWSClient.loggingDisabled) async throws {
        try await self.client.execute(
            operation: "StartAudienceExportJob", 
            path: "/audience-export-job", 
            httpMethod: .POST, 
            serviceConfig: self.config, 
            input: input, 
            logger: logger
        )
    }
    /// Export an audience of a specified size after you have generated an audience.
    ///
    /// Parameters:
    ///   - audienceGenerationJobArn: The Amazon Resource Name (ARN) of the audience generation job that you want to export.
    ///   - audienceSize: 
    ///   - description: The description of the audience export job.
    ///   - name: The name of the audience export job.
    ///   - logger: Logger use during operation
    @inlinable
    public func startAudienceExportJob(
        audienceGenerationJobArn: String,
        audienceSize: AudienceSize,
        description: String? = nil,
        name: String,
        logger: Logger = AWSClient.loggingDisabled        
    ) async throws {
        let input = StartAudienceExportJobRequest(
            audienceGenerationJobArn: audienceGenerationJobArn, 
            audienceSize: audienceSize, 
            description: description, 
            name: name
        )
        return try await self.startAudienceExportJob(input, logger: logger)
    }

    /// Information necessary to start the audience generation job.
    @Sendable
    @inlinable
    public func startAudienceGenerationJob(_ input: StartAudienceGenerationJobRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> StartAudienceGenerationJobResponse {
        try await self.client.execute(
            operation: "StartAudienceGenerationJob", 
            path: "/audience-generation-job", 
            httpMethod: .POST, 
            serviceConfig: self.config, 
            input: input, 
            logger: logger
        )
    }
    /// Information necessary to start the audience generation job.
    ///
    /// Parameters:
    ///   - collaborationId: The identifier of the collaboration that contains the audience generation job.
    ///   - configuredAudienceModelArn: The Amazon Resource Name (ARN) of the configured audience model that is used for this audience generation job.
    ///   - description: The description of the audience generation job.
    ///   - includeSeedInOutput: Whether the seed audience is included in the audience generation output.
    ///   - name: The name of the audience generation job.
    ///   - seedAudience: The seed audience that is used to generate the audience.
    ///   - tags: The optional metadata that you apply to the resource to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define. The following basic restrictions apply to tags:   Maximum number of tags per resource - 50.   For each resource, each tag key must be unique, and each tag key can have only one value.   Maximum key length - 128 Unicode characters in UTF-8.   Maximum value length - 256 Unicode characters in UTF-8.   If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @.   Tag keys and values are case sensitive.   Do not use aws:, AWS:, or any upper or lowercase combination of such as a prefix for keys as it is reserved for AWS use. You cannot edit or delete tag keys with this prefix. Values can have this prefix. If a tag value has aws as its prefix but the key does not, then Clean Rooms ML considers it to be a user tag and will count against the limit of 50 tags. Tags with only the key prefix of aws do not count against your tags per resource limit.
    ///   - logger: Logger use during operation
    @inlinable
    public func startAudienceGenerationJob(
        collaborationId: String? = nil,
        configuredAudienceModelArn: String,
        description: String? = nil,
        includeSeedInOutput: Bool? = nil,
        name: String,
        seedAudience: AudienceGenerationJobDataSource,
        tags: [String: String]? = nil,
        logger: Logger = AWSClient.loggingDisabled        
    ) async throws -> StartAudienceGenerationJobResponse {
        let input = StartAudienceGenerationJobRequest(
            collaborationId: collaborationId, 
            configuredAudienceModelArn: configuredAudienceModelArn, 
            description: description, 
            includeSeedInOutput: includeSeedInOutput, 
            name: name, 
            seedAudience: seedAudience, 
            tags: tags
        )
        return try await self.startAudienceGenerationJob(input, logger: logger)
    }

    /// Provides the information necessary to start a trained model export job.
    @Sendable
    @inlinable
    public func startTrainedModelExportJob(_ input: StartTrainedModelExportJobRequest, logger: Logger = AWSClient.loggingDisabled) async throws {
        try await self.client.execute(
            operation: "StartTrainedModelExportJob", 
            path: "/memberships/{membershipIdentifier}/trained-models/{trainedModelArn}/export-jobs", 
            httpMethod: .POST, 
            serviceConfig: self.config, 
            input: input, 
            logger: logger
        )
    }
    /// Provides the information necessary to start a trained model export job.
    ///
    /// Parameters:
    ///   - description: The description of the trained model export job.
    ///   - membershipIdentifier: The membership ID of the member that is receiving the exported trained model artifacts.
    ///   - name: The name of the trained model export job.
    ///   - outputConfiguration: The output configuration information for the trained model export job.
    ///   - trainedModelArn: The Amazon Resource Name (ARN) of the trained model that you want to export.
    ///   - trainedModelVersionIdentifier: The version identifier of the trained model to export. This specifies which version of the trained model should be exported to the specified destination.
    ///   - logger: Logger use during operation
    @inlinable
    public func startTrainedModelExportJob(
        description: String? = nil,
        membershipIdentifier: String,
        name: String,
        outputConfiguration: TrainedModelExportOutputConfiguration,
        trainedModelArn: String,
        trainedModelVersionIdentifier: String? = nil,
        logger: Logger = AWSClient.loggingDisabled        
    ) async throws {
        let input = StartTrainedModelExportJobRequest(
            description: description, 
            membershipIdentifier: membershipIdentifier, 
            name: name, 
            outputConfiguration: outputConfiguration, 
            trainedModelArn: trainedModelArn, 
            trainedModelVersionIdentifier: trainedModelVersionIdentifier
        )
        return try await self.startTrainedModelExportJob(input, logger: logger)
    }

    /// Defines the information necessary to begin a trained model inference job.
    @Sendable
    @inlinable
    public func startTrainedModelInferenceJob(_ input: StartTrainedModelInferenceJobRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> StartTrainedModelInferenceJobResponse {
        try await self.client.execute(
            operation: "StartTrainedModelInferenceJob", 
            path: "/memberships/{membershipIdentifier}/trained-model-inference-jobs", 
            httpMethod: .POST, 
            serviceConfig: self.config, 
            input: input, 
            logger: logger
        )
    }
    /// Defines the information necessary to begin a trained model inference job.
    ///
    /// Parameters:
    ///   - configuredModelAlgorithmAssociationArn: The Amazon Resource Name (ARN) of the configured model algorithm association that is used for this trained model inference job.
    ///   - containerExecutionParameters: The execution parameters for the container.
    ///   - dataSource: Defines the data source that is used for the trained model inference job.
    ///   - description: The description of the trained model inference job.
    ///   - environment: The environment variables to set in the Docker container.
    ///   - kmsKeyArn: The Amazon Resource Name (ARN) of the KMS key. This key is used to encrypt and decrypt customer-owned data in the ML inference job and associated data.
    ///   - membershipIdentifier: The membership ID of the membership that contains the trained model inference job.
    ///   - name: The name of the trained model inference job.
    ///   - outputConfiguration: Defines the output configuration information for the trained model inference job.
    ///   - resourceConfig: Defines the resource configuration for the trained model inference job.
    ///   - tags: The optional metadata that you apply to the resource to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define. The following basic restrictions apply to tags:   Maximum number of tags per resource - 50.   For each resource, each tag key must be unique, and each tag key can have only one value.   Maximum key length - 128 Unicode characters in UTF-8.   Maximum value length - 256 Unicode characters in UTF-8.   If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @.   Tag keys and values are case sensitive.   Do not use aws:, AWS:, or any upper or lowercase combination of such as a prefix for keys as it is reserved for AWS use. You cannot edit or delete tag keys with this prefix. Values can have this prefix. If a tag value has aws as its prefix but the key does not, then Clean Rooms ML considers it to be a user tag and will count against the limit of 50 tags. Tags with only the key prefix of aws do not count against your tags per resource limit.
    ///   - trainedModelArn: The Amazon Resource Name (ARN) of the trained model that is used for this trained model inference job.
    ///   - trainedModelVersionIdentifier: The version identifier of the trained model to use for inference. This specifies which version of the trained model should be used to generate predictions on the input data.
    ///   - logger: Logger use during operation
    @inlinable
    public func startTrainedModelInferenceJob(
        configuredModelAlgorithmAssociationArn: String? = nil,
        containerExecutionParameters: InferenceContainerExecutionParameters? = nil,
        dataSource: ModelInferenceDataSource,
        description: String? = nil,
        environment: [String: String]? = nil,
        kmsKeyArn: String? = nil,
        membershipIdentifier: String,
        name: String,
        outputConfiguration: InferenceOutputConfiguration,
        resourceConfig: InferenceResourceConfig,
        tags: [String: String]? = nil,
        trainedModelArn: String,
        trainedModelVersionIdentifier: String? = nil,
        logger: Logger = AWSClient.loggingDisabled        
    ) async throws -> StartTrainedModelInferenceJobResponse {
        let input = StartTrainedModelInferenceJobRequest(
            configuredModelAlgorithmAssociationArn: configuredModelAlgorithmAssociationArn, 
            containerExecutionParameters: containerExecutionParameters, 
            dataSource: dataSource, 
            description: description, 
            environment: environment, 
            kmsKeyArn: kmsKeyArn, 
            membershipIdentifier: membershipIdentifier, 
            name: name, 
            outputConfiguration: outputConfiguration, 
            resourceConfig: resourceConfig, 
            tags: tags, 
            trainedModelArn: trainedModelArn, 
            trainedModelVersionIdentifier: trainedModelVersionIdentifier
        )
        return try await self.startTrainedModelInferenceJob(input, logger: logger)
    }

    /// Adds metadata tags to a specified resource.
    @Sendable
    @inlinable
    public func tagResource(_ input: TagResourceRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> TagResourceResponse {
        try await self.client.execute(
            operation: "TagResource", 
            path: "/tags/{resourceArn}", 
            httpMethod: .POST, 
            serviceConfig: self.config, 
            input: input, 
            logger: logger
        )
    }
    /// Adds metadata tags to a specified resource.
    ///
    /// Parameters:
    ///   - resourceArn: The Amazon Resource Name (ARN) of the resource that you want to assign tags.
    ///   - tags: The optional metadata that you apply to the resource to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define. The following basic restrictions apply to tags:   Maximum number of tags per resource - 50.   For each resource, each tag key must be unique, and each tag key can have only one value.   Maximum key length - 128 Unicode characters in UTF-8.   Maximum value length - 256 Unicode characters in UTF-8.   If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @.   Tag keys and values are case sensitive.   Do not use aws:, AWS:, or any upper or lowercase combination of such as a prefix for keys as it is reserved for AWS use. You cannot edit or delete tag keys with this prefix. Values can have this prefix. If a tag value has aws as its prefix but the key does not, then Clean Rooms considers it to be a user tag and will count against the limit of 50 tags. Tags with only the key prefix of aws do not count against your tags per resource limit.
    ///   - logger: Logger use during operation
    @inlinable
    public func tagResource(
        resourceArn: String,
        tags: [String: String],
        logger: Logger = AWSClient.loggingDisabled        
    ) async throws -> TagResourceResponse {
        let input = TagResourceRequest(
            resourceArn: resourceArn, 
            tags: tags
        )
        return try await self.tagResource(input, logger: logger)
    }

    /// Removes metadata tags from a specified resource.
    @Sendable
    @inlinable
    public func untagResource(_ input: UntagResourceRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UntagResourceResponse {
        try await self.client.execute(
            operation: "UntagResource", 
            path: "/tags/{resourceArn}", 
            httpMethod: .DELETE, 
            serviceConfig: self.config, 
            input: input, 
            logger: logger
        )
    }
    /// Removes metadata tags from a specified resource.
    ///
    /// Parameters:
    ///   - resourceArn: The Amazon Resource Name (ARN) of the resource that you want to remove tags from.
    ///   - tagKeys: The key values of tags that you want to remove.
    ///   - logger: Logger use during operation
    @inlinable
    public func untagResource(
        resourceArn: String,
        tagKeys: [String],
        logger: Logger = AWSClient.loggingDisabled        
    ) async throws -> UntagResourceResponse {
        let input = UntagResourceRequest(
            resourceArn: resourceArn, 
            tagKeys: tagKeys
        )
        return try await self.untagResource(input, logger: logger)
    }

    /// Provides the information necessary to update a configured audience model. Updates that impact audience generation jobs take effect when a new job starts, but do not impact currently running jobs.
    @Sendable
    @inlinable
    public func updateConfiguredAudienceModel(_ input: UpdateConfiguredAudienceModelRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateConfiguredAudienceModelResponse {
        try await self.client.execute(
            operation: "UpdateConfiguredAudienceModel", 
            path: "/configured-audience-model/{configuredAudienceModelArn}", 
            httpMethod: .PATCH, 
            serviceConfig: self.config, 
            input: input, 
            logger: logger
        )
    }
    /// Provides the information necessary to update a configured audience model. Updates that impact audience generation jobs take effect when a new job starts, but do not impact currently running jobs.
    ///
    /// Parameters:
    ///   - audienceModelArn: The Amazon Resource Name (ARN) of the new audience model that you want to use.
    ///   - audienceSizeConfig: The new audience size configuration.
    ///   - configuredAudienceModelArn: The Amazon Resource Name (ARN) of the configured audience model that you want to update.
    ///   - description: The new description of the configured audience model.
    ///   - minMatchingSeedSize: The minimum number of users from the seed audience that must match with users in the training data of the audience model.
    ///   - outputConfig: The new output configuration.
    ///   - sharedAudienceMetrics: The new value for whether to share audience metrics.
    ///   - logger: Logger use during operation
    @inlinable
    public func updateConfiguredAudienceModel(
        audienceModelArn: String? = nil,
        audienceSizeConfig: AudienceSizeConfig? = nil,
        configuredAudienceModelArn: String,
        description: String? = nil,
        minMatchingSeedSize: Int? = nil,
        outputConfig: ConfiguredAudienceModelOutputConfig? = nil,
        sharedAudienceMetrics: [SharedAudienceMetrics]? = nil,
        logger: Logger = AWSClient.loggingDisabled        
    ) async throws -> UpdateConfiguredAudienceModelResponse {
        let input = UpdateConfiguredAudienceModelRequest(
            audienceModelArn: audienceModelArn, 
            audienceSizeConfig: audienceSizeConfig, 
            configuredAudienceModelArn: configuredAudienceModelArn, 
            description: description, 
            minMatchingSeedSize: minMatchingSeedSize, 
            outputConfig: outputConfig, 
            sharedAudienceMetrics: sharedAudienceMetrics
        )
        return try await self.updateConfiguredAudienceModel(input, logger: logger)
    }
}

extension CleanRoomsML {
    /// Initializer required by `AWSService.with(middlewares:timeout:byteBufferAllocator:options)`. You are not able to use this initializer directly as there are not public
    /// initializers for `AWSServiceConfig.Patch`. Please use `AWSService.with(middlewares:timeout:byteBufferAllocator:options)` instead.
    public init(from: CleanRoomsML, patch: AWSServiceConfig.Patch) {
        self.client = from.client
        self.config = from.config.with(patch: patch)
    }
}

// MARK: Paginators

@available(macOS 10.15, iOS 13.0, tvOS 13.0, watchOS 6.0, *)
extension CleanRoomsML {
    /// Return PaginatorSequence for operation ``listAudienceExportJobs(_:logger:)``.
    ///
    /// - Parameters:
    ///   - input: Input for operation
    ///   - logger: Logger used for logging
    @inlinable
    public func listAudienceExportJobsPaginator(
        _ input: ListAudienceExportJobsRequest,
        logger: Logger = AWSClient.loggingDisabled
    ) -> AWSClient.PaginatorSequence<ListAudienceExportJobsRequest, ListAudienceExportJobsResponse> {
        return .init(
            input: input,
            command: self.listAudienceExportJobs,
            inputKey: \ListAudienceExportJobsRequest.nextToken,
            outputKey: \ListAudienceExportJobsResponse.nextToken,
            logger: logger
        )
    }
    /// Return PaginatorSequence for operation ``listAudienceExportJobs(_:logger:)``.
    ///
    /// - Parameters:
    ///   - audienceGenerationJobArn: The Amazon Resource Name (ARN) of the audience generation job that you are interested in.
    ///   - maxResults: The maximum size of the results that is returned per call.
    ///   - logger: Logger used for logging
    @inlinable
    public func listAudienceExportJobsPaginator(
        audienceGenerationJobArn: String? = nil,
        maxResults: Int? = nil,
        logger: Logger = AWSClient.loggingDisabled        
    ) -> AWSClient.PaginatorSequence<ListAudienceExportJobsRequest, ListAudienceExportJobsResponse> {
        let input = ListAudienceExportJobsRequest(
            audienceGenerationJobArn: audienceGenerationJobArn, 
            maxResults: maxResults
        )
        return self.listAudienceExportJobsPaginator(input, logger: logger)
    }

    /// Return PaginatorSequence for operation ``listAudienceGenerationJobs(_:logger:)``.
    ///
    /// - Parameters:
    ///   - input: Input for operation
    ///   - logger: Logger used for logging
    @inlinable
    public func listAudienceGenerationJobsPaginator(
        _ input: ListAudienceGenerationJobsRequest,
        logger: Logger = AWSClient.loggingDisabled
    ) -> AWSClient.PaginatorSequence<ListAudienceGenerationJobsRequest, ListAudienceGenerationJobsResponse> {
        return .init(
            input: input,
            command: self.listAudienceGenerationJobs,
            inputKey: \ListAudienceGenerationJobsRequest.nextToken,
            outputKey: \ListAudienceGenerationJobsResponse.nextToken,
            logger: logger
        )
    }
    /// Return PaginatorSequence for operation ``listAudienceGenerationJobs(_:logger:)``.
    ///
    /// - Parameters:
    ///   - collaborationId: The identifier of the collaboration that contains the audience generation jobs that you are interested in.
    ///   - configuredAudienceModelArn: The Amazon Resource Name (ARN) of the configured audience model that was used for the audience generation jobs that you are interested in.
    ///   - maxResults: The maximum size of the results that is returned per call.
    ///   - logger: Logger used for logging
    @inlinable
    public func listAudienceGenerationJobsPaginator(
        collaborationId: String? = nil,
        configuredAudienceModelArn: String? = nil,
        maxResults: Int? = nil,
        logger: Logger = AWSClient.loggingDisabled        
    ) -> AWSClient.PaginatorSequence<ListAudienceGenerationJobsRequest, ListAudienceGenerationJobsResponse> {
        let input = ListAudienceGenerationJobsRequest(
            collaborationId: collaborationId, 
            configuredAudienceModelArn: configuredAudienceModelArn, 
            maxResults: maxResults
        )
        return self.listAudienceGenerationJobsPaginator(input, logger: logger)
    }

    /// Return PaginatorSequence for operation ``listAudienceModels(_:logger:)``.
    ///
    /// - Parameters:
    ///   - input: Input for operation
    ///   - logger: Logger used for logging
    @inlinable
    public func listAudienceModelsPaginator(
        _ input: ListAudienceModelsRequest,
        logger: Logger = AWSClient.loggingDisabled
    ) -> AWSClient.PaginatorSequence<ListAudienceModelsRequest, ListAudienceModelsResponse> {
        return .init(
            input: input,
            command: self.listAudienceModels,
            inputKey: \ListAudienceModelsRequest.nextToken,
            outputKey: \ListAudienceModelsResponse.nextToken,
            logger: logger
        )
    }
    /// Return PaginatorSequence for operation ``listAudienceModels(_:logger:)``.
    ///
    /// - Parameters:
    ///   - maxResults: The maximum size of the results that is returned per call.
    ///   - logger: Logger used for logging
    @inlinable
    public func listAudienceModelsPaginator(
        maxResults: Int? = nil,
        logger: Logger = AWSClient.loggingDisabled        
    ) -> AWSClient.PaginatorSequence<ListAudienceModelsRequest, ListAudienceModelsResponse> {
        let input = ListAudienceModelsRequest(
            maxResults: maxResults
        )
        return self.listAudienceModelsPaginator(input, logger: logger)
    }

    /// Return PaginatorSequence for operation ``listCollaborationConfiguredModelAlgorithmAssociations(_:logger:)``.
    ///
    /// - Parameters:
    ///   - input: Input for operation
    ///   - logger: Logger used for logging
    @inlinable
    public func listCollaborationConfiguredModelAlgorithmAssociationsPaginator(
        _ input: ListCollaborationConfiguredModelAlgorithmAssociationsRequest,
        logger: Logger = AWSClient.loggingDisabled
    ) -> AWSClient.PaginatorSequence<ListCollaborationConfiguredModelAlgorithmAssociationsRequest, ListCollaborationConfiguredModelAlgorithmAssociationsResponse> {
        return .init(
            input: input,
            command: self.listCollaborationConfiguredModelAlgorithmAssociations,
            inputKey: \ListCollaborationConfiguredModelAlgorithmAssociationsRequest.nextToken,
            outputKey: \ListCollaborationConfiguredModelAlgorithmAssociationsResponse.nextToken,
            logger: logger
        )
    }
    /// Return PaginatorSequence for operation ``listCollaborationConfiguredModelAlgorithmAssociations(_:logger:)``.
    ///
    /// - Parameters:
    ///   - collaborationIdentifier: The collaboration ID of the collaboration that contains the configured model algorithm associations that you are interested in.
    ///   - maxResults: The maximum size of the results that is returned per call.
    ///   - logger: Logger used for logging
    @inlinable
    public func listCollaborationConfiguredModelAlgorithmAssociationsPaginator(
        collaborationIdentifier: String,
        maxResults: Int? = nil,
        logger: Logger = AWSClient.loggingDisabled        
    ) -> AWSClient.PaginatorSequence<ListCollaborationConfiguredModelAlgorithmAssociationsRequest, ListCollaborationConfiguredModelAlgorithmAssociationsResponse> {
        let input = ListCollaborationConfiguredModelAlgorithmAssociationsRequest(
            collaborationIdentifier: collaborationIdentifier, 
            maxResults: maxResults
        )
        return self.listCollaborationConfiguredModelAlgorithmAssociationsPaginator(input, logger: logger)
    }

    /// Return PaginatorSequence for operation ``listCollaborationMLInputChannels(_:logger:)``.
    ///
    /// - Parameters:
    ///   - input: Input for operation
    ///   - logger: Logger used for logging
    @inlinable
    public func listCollaborationMLInputChannelsPaginator(
        _ input: ListCollaborationMLInputChannelsRequest,
        logger: Logger = AWSClient.loggingDisabled
    ) -> AWSClient.PaginatorSequence<ListCollaborationMLInputChannelsRequest, ListCollaborationMLInputChannelsResponse> {
        return .init(
            input: input,
            command: self.listCollaborationMLInputChannels,
            inputKey: \ListCollaborationMLInputChannelsRequest.nextToken,
            outputKey: \ListCollaborationMLInputChannelsResponse.nextToken,
            logger: logger
        )
    }
    /// Return PaginatorSequence for operation ``listCollaborationMLInputChannels(_:logger:)``.
    ///
    /// - Parameters:
    ///   - collaborationIdentifier: The collaboration ID of the collaboration that contains the ML input channels that you want to list.
    ///   - maxResults: The maximum number of results to return.
    ///   - logger: Logger used for logging
    @inlinable
    public func listCollaborationMLInputChannelsPaginator(
        collaborationIdentifier: String,
        maxResults: Int? = nil,
        logger: Logger = AWSClient.loggingDisabled        
    ) -> AWSClient.PaginatorSequence<ListCollaborationMLInputChannelsRequest, ListCollaborationMLInputChannelsResponse> {
        let input = ListCollaborationMLInputChannelsRequest(
            collaborationIdentifier: collaborationIdentifier, 
            maxResults: maxResults
        )
        return self.listCollaborationMLInputChannelsPaginator(input, logger: logger)
    }

    /// Return PaginatorSequence for operation ``listCollaborationTrainedModelExportJobs(_:logger:)``.
    ///
    /// - Parameters:
    ///   - input: Input for operation
    ///   - logger: Logger used for logging
    @inlinable
    public func listCollaborationTrainedModelExportJobsPaginator(
        _ input: ListCollaborationTrainedModelExportJobsRequest,
        logger: Logger = AWSClient.loggingDisabled
    ) -> AWSClient.PaginatorSequence<ListCollaborationTrainedModelExportJobsRequest, ListCollaborationTrainedModelExportJobsResponse> {
        return .init(
            input: input,
            command: self.listCollaborationTrainedModelExportJobs,
            inputKey: \ListCollaborationTrainedModelExportJobsRequest.nextToken,
            outputKey: \ListCollaborationTrainedModelExportJobsResponse.nextToken,
            logger: logger
        )
    }
    /// Return PaginatorSequence for operation ``listCollaborationTrainedModelExportJobs(_:logger:)``.
    ///
    /// - Parameters:
    ///   - collaborationIdentifier: The collaboration ID of the collaboration that contains the trained model export jobs that you are interested in.
    ///   - maxResults: The maximum size of the results that is returned per call.
    ///   - trainedModelArn: The Amazon Resource Name (ARN) of the trained model that was used to create the export jobs that you are interested in.
    ///   - trainedModelVersionIdentifier: The version identifier of the trained model to filter export jobs by. When specified, only export jobs for this specific version of the trained model are returned.
    ///   - logger: Logger used for logging
    @inlinable
    public func listCollaborationTrainedModelExportJobsPaginator(
        collaborationIdentifier: String,
        maxResults: Int? = nil,
        trainedModelArn: String,
        trainedModelVersionIdentifier: String? = nil,
        logger: Logger = AWSClient.loggingDisabled        
    ) -> AWSClient.PaginatorSequence<ListCollaborationTrainedModelExportJobsRequest, ListCollaborationTrainedModelExportJobsResponse> {
        let input = ListCollaborationTrainedModelExportJobsRequest(
            collaborationIdentifier: collaborationIdentifier, 
            maxResults: maxResults, 
            trainedModelArn: trainedModelArn, 
            trainedModelVersionIdentifier: trainedModelVersionIdentifier
        )
        return self.listCollaborationTrainedModelExportJobsPaginator(input, logger: logger)
    }

    /// Return PaginatorSequence for operation ``listCollaborationTrainedModelInferenceJobs(_:logger:)``.
    ///
    /// - Parameters:
    ///   - input: Input for operation
    ///   - logger: Logger used for logging
    @inlinable
    public func listCollaborationTrainedModelInferenceJobsPaginator(
        _ input: ListCollaborationTrainedModelInferenceJobsRequest,
        logger: Logger = AWSClient.loggingDisabled
    ) -> AWSClient.PaginatorSequence<ListCollaborationTrainedModelInferenceJobsRequest, ListCollaborationTrainedModelInferenceJobsResponse> {
        return .init(
            input: input,
            command: self.listCollaborationTrainedModelInferenceJobs,
            inputKey: \ListCollaborationTrainedModelInferenceJobsRequest.nextToken,
            outputKey: \ListCollaborationTrainedModelInferenceJobsResponse.nextToken,
            logger: logger
        )
    }
    /// Return PaginatorSequence for operation ``listCollaborationTrainedModelInferenceJobs(_:logger:)``.
    ///
    /// - Parameters:
    ///   - collaborationIdentifier: The collaboration ID of the collaboration that contains the trained model inference jobs that you are interested in.
    ///   - maxResults: The maximum size of the results that is returned per call.
    ///   - trainedModelArn: The Amazon Resource Name (ARN) of the trained model that was used to create the trained model inference jobs that you are interested in.
    ///   - trainedModelVersionIdentifier: The version identifier of the trained model to filter inference jobs by. When specified, only inference jobs that used this specific version of the trained model are returned.
    ///   - logger: Logger used for logging
    @inlinable
    public func listCollaborationTrainedModelInferenceJobsPaginator(
        collaborationIdentifier: String,
        maxResults: Int? = nil,
        trainedModelArn: String? = nil,
        trainedModelVersionIdentifier: String? = nil,
        logger: Logger = AWSClient.loggingDisabled        
    ) -> AWSClient.PaginatorSequence<ListCollaborationTrainedModelInferenceJobsRequest, ListCollaborationTrainedModelInferenceJobsResponse> {
        let input = ListCollaborationTrainedModelInferenceJobsRequest(
            collaborationIdentifier: collaborationIdentifier, 
            maxResults: maxResults, 
            trainedModelArn: trainedModelArn, 
            trainedModelVersionIdentifier: trainedModelVersionIdentifier
        )
        return self.listCollaborationTrainedModelInferenceJobsPaginator(input, logger: logger)
    }

    /// Return PaginatorSequence for operation ``listCollaborationTrainedModels(_:logger:)``.
    ///
    /// - Parameters:
    ///   - input: Input for operation
    ///   - logger: Logger used for logging
    @inlinable
    public func listCollaborationTrainedModelsPaginator(
        _ input: ListCollaborationTrainedModelsRequest,
        logger: Logger = AWSClient.loggingDisabled
    ) -> AWSClient.PaginatorSequence<ListCollaborationTrainedModelsRequest, ListCollaborationTrainedModelsResponse> {
        return .init(
            input: input,
            command: self.listCollaborationTrainedModels,
            inputKey: \ListCollaborationTrainedModelsRequest.nextToken,
            outputKey: \ListCollaborationTrainedModelsResponse.nextToken,
            logger: logger
        )
    }
    /// Return PaginatorSequence for operation ``listCollaborationTrainedModels(_:logger:)``.
    ///
    /// - Parameters:
    ///   - collaborationIdentifier: The collaboration ID of the collaboration that contains the trained models you are interested in.
    ///   - maxResults: The maximum size of the results that is returned per call.
    ///   - logger: Logger used for logging
    @inlinable
    public func listCollaborationTrainedModelsPaginator(
        collaborationIdentifier: String,
        maxResults: Int? = nil,
        logger: Logger = AWSClient.loggingDisabled        
    ) -> AWSClient.PaginatorSequence<ListCollaborationTrainedModelsRequest, ListCollaborationTrainedModelsResponse> {
        let input = ListCollaborationTrainedModelsRequest(
            collaborationIdentifier: collaborationIdentifier, 
            maxResults: maxResults
        )
        return self.listCollaborationTrainedModelsPaginator(input, logger: logger)
    }

    /// Return PaginatorSequence for operation ``listConfiguredAudienceModels(_:logger:)``.
    ///
    /// - Parameters:
    ///   - input: Input for operation
    ///   - logger: Logger used for logging
    @inlinable
    public func listConfiguredAudienceModelsPaginator(
        _ input: ListConfiguredAudienceModelsRequest,
        logger: Logger = AWSClient.loggingDisabled
    ) -> AWSClient.PaginatorSequence<ListConfiguredAudienceModelsRequest, ListConfiguredAudienceModelsResponse> {
        return .init(
            input: input,
            command: self.listConfiguredAudienceModels,
            inputKey: \ListConfiguredAudienceModelsRequest.nextToken,
            outputKey: \ListConfiguredAudienceModelsResponse.nextToken,
            logger: logger
        )
    }
    /// Return PaginatorSequence for operation ``listConfiguredAudienceModels(_:logger:)``.
    ///
    /// - Parameters:
    ///   - maxResults: The maximum size of the results that is returned per call.
    ///   - logger: Logger used for logging
    @inlinable
    public func listConfiguredAudienceModelsPaginator(
        maxResults: Int? = nil,
        logger: Logger = AWSClient.loggingDisabled        
    ) -> AWSClient.PaginatorSequence<ListConfiguredAudienceModelsRequest, ListConfiguredAudienceModelsResponse> {
        let input = ListConfiguredAudienceModelsRequest(
            maxResults: maxResults
        )
        return self.listConfiguredAudienceModelsPaginator(input, logger: logger)
    }

    /// Return PaginatorSequence for operation ``listConfiguredModelAlgorithmAssociations(_:logger:)``.
    ///
    /// - Parameters:
    ///   - input: Input for operation
    ///   - logger: Logger used for logging
    @inlinable
    public func listConfiguredModelAlgorithmAssociationsPaginator(
        _ input: ListConfiguredModelAlgorithmAssociationsRequest,
        logger: Logger = AWSClient.loggingDisabled
    ) -> AWSClient.PaginatorSequence<ListConfiguredModelAlgorithmAssociationsRequest, ListConfiguredModelAlgorithmAssociationsResponse> {
        return .init(
            input: input,
            command: self.listConfiguredModelAlgorithmAssociations,
            inputKey: \ListConfiguredModelAlgorithmAssociationsRequest.nextToken,
            outputKey: \ListConfiguredModelAlgorithmAssociationsResponse.nextToken,
            logger: logger
        )
    }
    /// Return PaginatorSequence for operation ``listConfiguredModelAlgorithmAssociations(_:logger:)``.
    ///
    /// - Parameters:
    ///   - maxResults: The maximum size of the results that is returned per call.
    ///   - membershipIdentifier: The membership ID of the member that created the configured model algorithm associations you are interested in.
    ///   - logger: Logger used for logging
    @inlinable
    public func listConfiguredModelAlgorithmAssociationsPaginator(
        maxResults: Int? = nil,
        membershipIdentifier: String,
        logger: Logger = AWSClient.loggingDisabled        
    ) -> AWSClient.PaginatorSequence<ListConfiguredModelAlgorithmAssociationsRequest, ListConfiguredModelAlgorithmAssociationsResponse> {
        let input = ListConfiguredModelAlgorithmAssociationsRequest(
            maxResults: maxResults, 
            membershipIdentifier: membershipIdentifier
        )
        return self.listConfiguredModelAlgorithmAssociationsPaginator(input, logger: logger)
    }

    /// Return PaginatorSequence for operation ``listConfiguredModelAlgorithms(_:logger:)``.
    ///
    /// - Parameters:
    ///   - input: Input for operation
    ///   - logger: Logger used for logging
    @inlinable
    public func listConfiguredModelAlgorithmsPaginator(
        _ input: ListConfiguredModelAlgorithmsRequest,
        logger: Logger = AWSClient.loggingDisabled
    ) -> AWSClient.PaginatorSequence<ListConfiguredModelAlgorithmsRequest, ListConfiguredModelAlgorithmsResponse> {
        return .init(
            input: input,
            command: self.listConfiguredModelAlgorithms,
            inputKey: \ListConfiguredModelAlgorithmsRequest.nextToken,
            outputKey: \ListConfiguredModelAlgorithmsResponse.nextToken,
            logger: logger
        )
    }
    /// Return PaginatorSequence for operation ``listConfiguredModelAlgorithms(_:logger:)``.
    ///
    /// - Parameters:
    ///   - maxResults: The maximum size of the results that is returned per call.
    ///   - logger: Logger used for logging
    @inlinable
    public func listConfiguredModelAlgorithmsPaginator(
        maxResults: Int? = nil,
        logger: Logger = AWSClient.loggingDisabled        
    ) -> AWSClient.PaginatorSequence<ListConfiguredModelAlgorithmsRequest, ListConfiguredModelAlgorithmsResponse> {
        let input = ListConfiguredModelAlgorithmsRequest(
            maxResults: maxResults
        )
        return self.listConfiguredModelAlgorithmsPaginator(input, logger: logger)
    }

    /// Return PaginatorSequence for operation ``listMLInputChannels(_:logger:)``.
    ///
    /// - Parameters:
    ///   - input: Input for operation
    ///   - logger: Logger used for logging
    @inlinable
    public func listMLInputChannelsPaginator(
        _ input: ListMLInputChannelsRequest,
        logger: Logger = AWSClient.loggingDisabled
    ) -> AWSClient.PaginatorSequence<ListMLInputChannelsRequest, ListMLInputChannelsResponse> {
        return .init(
            input: input,
            command: self.listMLInputChannels,
            inputKey: \ListMLInputChannelsRequest.nextToken,
            outputKey: \ListMLInputChannelsResponse.nextToken,
            logger: logger
        )
    }
    /// Return PaginatorSequence for operation ``listMLInputChannels(_:logger:)``.
    ///
    /// - Parameters:
    ///   - maxResults: The maximum number of ML input channels to return.
    ///   - membershipIdentifier: The membership ID of the membership that contains the ML input channels that you want to list.
    ///   - logger: Logger used for logging
    @inlinable
    public func listMLInputChannelsPaginator(
        maxResults: Int? = nil,
        membershipIdentifier: String,
        logger: Logger = AWSClient.loggingDisabled        
    ) -> AWSClient.PaginatorSequence<ListMLInputChannelsRequest, ListMLInputChannelsResponse> {
        let input = ListMLInputChannelsRequest(
            maxResults: maxResults, 
            membershipIdentifier: membershipIdentifier
        )
        return self.listMLInputChannelsPaginator(input, logger: logger)
    }

    /// Return PaginatorSequence for operation ``listTrainedModelInferenceJobs(_:logger:)``.
    ///
    /// - Parameters:
    ///   - input: Input for operation
    ///   - logger: Logger used for logging
    @inlinable
    public func listTrainedModelInferenceJobsPaginator(
        _ input: ListTrainedModelInferenceJobsRequest,
        logger: Logger = AWSClient.loggingDisabled
    ) -> AWSClient.PaginatorSequence<ListTrainedModelInferenceJobsRequest, ListTrainedModelInferenceJobsResponse> {
        return .init(
            input: input,
            command: self.listTrainedModelInferenceJobs,
            inputKey: \ListTrainedModelInferenceJobsRequest.nextToken,
            outputKey: \ListTrainedModelInferenceJobsResponse.nextToken,
            logger: logger
        )
    }
    /// Return PaginatorSequence for operation ``listTrainedModelInferenceJobs(_:logger:)``.
    ///
    /// - Parameters:
    ///   - maxResults: The maximum size of the results that is returned per call.
    ///   - membershipIdentifier: The membership
    ///   - trainedModelArn: The Amazon Resource Name (ARN) of a trained model that was used to create the trained model inference jobs that you are interested in.
    ///   - trainedModelVersionIdentifier: The version identifier of the trained model to filter inference jobs by. When specified, only inference jobs that used this specific version of the trained model are returned.
    ///   - logger: Logger used for logging
    @inlinable
    public func listTrainedModelInferenceJobsPaginator(
        maxResults: Int? = nil,
        membershipIdentifier: String,
        trainedModelArn: String? = nil,
        trainedModelVersionIdentifier: String? = nil,
        logger: Logger = AWSClient.loggingDisabled        
    ) -> AWSClient.PaginatorSequence<ListTrainedModelInferenceJobsRequest, ListTrainedModelInferenceJobsResponse> {
        let input = ListTrainedModelInferenceJobsRequest(
            maxResults: maxResults, 
            membershipIdentifier: membershipIdentifier, 
            trainedModelArn: trainedModelArn, 
            trainedModelVersionIdentifier: trainedModelVersionIdentifier
        )
        return self.listTrainedModelInferenceJobsPaginator(input, logger: logger)
    }

    /// Return PaginatorSequence for operation ``listTrainedModelVersions(_:logger:)``.
    ///
    /// - Parameters:
    ///   - input: Input for operation
    ///   - logger: Logger used for logging
    @inlinable
    public func listTrainedModelVersionsPaginator(
        _ input: ListTrainedModelVersionsRequest,
        logger: Logger = AWSClient.loggingDisabled
    ) -> AWSClient.PaginatorSequence<ListTrainedModelVersionsRequest, ListTrainedModelVersionsResponse> {
        return .init(
            input: input,
            command: self.listTrainedModelVersions,
            inputKey: \ListTrainedModelVersionsRequest.nextToken,
            outputKey: \ListTrainedModelVersionsResponse.nextToken,
            logger: logger
        )
    }
    /// Return PaginatorSequence for operation ``listTrainedModelVersions(_:logger:)``.
    ///
    /// - Parameters:
    ///   - maxResults: The maximum number of trained model versions to return in a single page. The default value is 10, and the maximum value is 100.
    ///   - membershipIdentifier: The membership identifier for the collaboration that contains the trained model.
    ///   - status: Filter the results to only include trained model versions with the specified status. Valid values include CREATE_PENDING, CREATE_IN_PROGRESS, ACTIVE, CREATE_FAILED, and others.
    ///   - trainedModelArn: The Amazon Resource Name (ARN) of the trained model for which to list versions.
    ///   - logger: Logger used for logging
    @inlinable
    public func listTrainedModelVersionsPaginator(
        maxResults: Int? = nil,
        membershipIdentifier: String,
        status: TrainedModelStatus? = nil,
        trainedModelArn: String,
        logger: Logger = AWSClient.loggingDisabled        
    ) -> AWSClient.PaginatorSequence<ListTrainedModelVersionsRequest, ListTrainedModelVersionsResponse> {
        let input = ListTrainedModelVersionsRequest(
            maxResults: maxResults, 
            membershipIdentifier: membershipIdentifier, 
            status: status, 
            trainedModelArn: trainedModelArn
        )
        return self.listTrainedModelVersionsPaginator(input, logger: logger)
    }

    /// Return PaginatorSequence for operation ``listTrainedModels(_:logger:)``.
    ///
    /// - Parameters:
    ///   - input: Input for operation
    ///   - logger: Logger used for logging
    @inlinable
    public func listTrainedModelsPaginator(
        _ input: ListTrainedModelsRequest,
        logger: Logger = AWSClient.loggingDisabled
    ) -> AWSClient.PaginatorSequence<ListTrainedModelsRequest, ListTrainedModelsResponse> {
        return .init(
            input: input,
            command: self.listTrainedModels,
            inputKey: \ListTrainedModelsRequest.nextToken,
            outputKey: \ListTrainedModelsResponse.nextToken,
            logger: logger
        )
    }
    /// Return PaginatorSequence for operation ``listTrainedModels(_:logger:)``.
    ///
    /// - Parameters:
    ///   - maxResults: The maximum size of the results that is returned per call.
    ///   - membershipIdentifier: The membership ID of the member that created the trained models you are interested in.
    ///   - logger: Logger used for logging
    @inlinable
    public func listTrainedModelsPaginator(
        maxResults: Int? = nil,
        membershipIdentifier: String,
        logger: Logger = AWSClient.loggingDisabled        
    ) -> AWSClient.PaginatorSequence<ListTrainedModelsRequest, ListTrainedModelsResponse> {
        let input = ListTrainedModelsRequest(
            maxResults: maxResults, 
            membershipIdentifier: membershipIdentifier
        )
        return self.listTrainedModelsPaginator(input, logger: logger)
    }

    /// Return PaginatorSequence for operation ``listTrainingDatasets(_:logger:)``.
    ///
    /// - Parameters:
    ///   - input: Input for operation
    ///   - logger: Logger used for logging
    @inlinable
    public func listTrainingDatasetsPaginator(
        _ input: ListTrainingDatasetsRequest,
        logger: Logger = AWSClient.loggingDisabled
    ) -> AWSClient.PaginatorSequence<ListTrainingDatasetsRequest, ListTrainingDatasetsResponse> {
        return .init(
            input: input,
            command: self.listTrainingDatasets,
            inputKey: \ListTrainingDatasetsRequest.nextToken,
            outputKey: \ListTrainingDatasetsResponse.nextToken,
            logger: logger
        )
    }
    /// Return PaginatorSequence for operation ``listTrainingDatasets(_:logger:)``.
    ///
    /// - Parameters:
    ///   - maxResults: The maximum size of the results that is returned per call.
    ///   - logger: Logger used for logging
    @inlinable
    public func listTrainingDatasetsPaginator(
        maxResults: Int? = nil,
        logger: Logger = AWSClient.loggingDisabled        
    ) -> AWSClient.PaginatorSequence<ListTrainingDatasetsRequest, ListTrainingDatasetsResponse> {
        let input = ListTrainingDatasetsRequest(
            maxResults: maxResults
        )
        return self.listTrainingDatasetsPaginator(input, logger: logger)
    }
}

extension CleanRoomsML.ListAudienceExportJobsRequest: AWSPaginateToken {
    @inlinable
    public func usingPaginationToken(_ token: String) -> CleanRoomsML.ListAudienceExportJobsRequest {
        return .init(
            audienceGenerationJobArn: self.audienceGenerationJobArn,
            maxResults: self.maxResults,
            nextToken: token
        )
    }
}

extension CleanRoomsML.ListAudienceGenerationJobsRequest: AWSPaginateToken {
    @inlinable
    public func usingPaginationToken(_ token: String) -> CleanRoomsML.ListAudienceGenerationJobsRequest {
        return .init(
            collaborationId: self.collaborationId,
            configuredAudienceModelArn: self.configuredAudienceModelArn,
            maxResults: self.maxResults,
            nextToken: token
        )
    }
}

extension CleanRoomsML.ListAudienceModelsRequest: AWSPaginateToken {
    @inlinable
    public func usingPaginationToken(_ token: String) -> CleanRoomsML.ListAudienceModelsRequest {
        return .init(
            maxResults: self.maxResults,
            nextToken: token
        )
    }
}

extension CleanRoomsML.ListCollaborationConfiguredModelAlgorithmAssociationsRequest: AWSPaginateToken {
    @inlinable
    public func usingPaginationToken(_ token: String) -> CleanRoomsML.ListCollaborationConfiguredModelAlgorithmAssociationsRequest {
        return .init(
            collaborationIdentifier: self.collaborationIdentifier,
            maxResults: self.maxResults,
            nextToken: token
        )
    }
}

extension CleanRoomsML.ListCollaborationMLInputChannelsRequest: AWSPaginateToken {
    @inlinable
    public func usingPaginationToken(_ token: String) -> CleanRoomsML.ListCollaborationMLInputChannelsRequest {
        return .init(
            collaborationIdentifier: self.collaborationIdentifier,
            maxResults: self.maxResults,
            nextToken: token
        )
    }
}

extension CleanRoomsML.ListCollaborationTrainedModelExportJobsRequest: AWSPaginateToken {
    @inlinable
    public func usingPaginationToken(_ token: String) -> CleanRoomsML.ListCollaborationTrainedModelExportJobsRequest {
        return .init(
            collaborationIdentifier: self.collaborationIdentifier,
            maxResults: self.maxResults,
            nextToken: token,
            trainedModelArn: self.trainedModelArn,
            trainedModelVersionIdentifier: self.trainedModelVersionIdentifier
        )
    }
}

extension CleanRoomsML.ListCollaborationTrainedModelInferenceJobsRequest: AWSPaginateToken {
    @inlinable
    public func usingPaginationToken(_ token: String) -> CleanRoomsML.ListCollaborationTrainedModelInferenceJobsRequest {
        return .init(
            collaborationIdentifier: self.collaborationIdentifier,
            maxResults: self.maxResults,
            nextToken: token,
            trainedModelArn: self.trainedModelArn,
            trainedModelVersionIdentifier: self.trainedModelVersionIdentifier
        )
    }
}

extension CleanRoomsML.ListCollaborationTrainedModelsRequest: AWSPaginateToken {
    @inlinable
    public func usingPaginationToken(_ token: String) -> CleanRoomsML.ListCollaborationTrainedModelsRequest {
        return .init(
            collaborationIdentifier: self.collaborationIdentifier,
            maxResults: self.maxResults,
            nextToken: token
        )
    }
}

extension CleanRoomsML.ListConfiguredAudienceModelsRequest: AWSPaginateToken {
    @inlinable
    public func usingPaginationToken(_ token: String) -> CleanRoomsML.ListConfiguredAudienceModelsRequest {
        return .init(
            maxResults: self.maxResults,
            nextToken: token
        )
    }
}

extension CleanRoomsML.ListConfiguredModelAlgorithmAssociationsRequest: AWSPaginateToken {
    @inlinable
    public func usingPaginationToken(_ token: String) -> CleanRoomsML.ListConfiguredModelAlgorithmAssociationsRequest {
        return .init(
            maxResults: self.maxResults,
            membershipIdentifier: self.membershipIdentifier,
            nextToken: token
        )
    }
}

extension CleanRoomsML.ListConfiguredModelAlgorithmsRequest: AWSPaginateToken {
    @inlinable
    public func usingPaginationToken(_ token: String) -> CleanRoomsML.ListConfiguredModelAlgorithmsRequest {
        return .init(
            maxResults: self.maxResults,
            nextToken: token
        )
    }
}

extension CleanRoomsML.ListMLInputChannelsRequest: AWSPaginateToken {
    @inlinable
    public func usingPaginationToken(_ token: String) -> CleanRoomsML.ListMLInputChannelsRequest {
        return .init(
            maxResults: self.maxResults,
            membershipIdentifier: self.membershipIdentifier,
            nextToken: token
        )
    }
}

extension CleanRoomsML.ListTrainedModelInferenceJobsRequest: AWSPaginateToken {
    @inlinable
    public func usingPaginationToken(_ token: String) -> CleanRoomsML.ListTrainedModelInferenceJobsRequest {
        return .init(
            maxResults: self.maxResults,
            membershipIdentifier: self.membershipIdentifier,
            nextToken: token,
            trainedModelArn: self.trainedModelArn,
            trainedModelVersionIdentifier: self.trainedModelVersionIdentifier
        )
    }
}

extension CleanRoomsML.ListTrainedModelVersionsRequest: AWSPaginateToken {
    @inlinable
    public func usingPaginationToken(_ token: String) -> CleanRoomsML.ListTrainedModelVersionsRequest {
        return .init(
            maxResults: self.maxResults,
            membershipIdentifier: self.membershipIdentifier,
            nextToken: token,
            status: self.status,
            trainedModelArn: self.trainedModelArn
        )
    }
}

extension CleanRoomsML.ListTrainedModelsRequest: AWSPaginateToken {
    @inlinable
    public func usingPaginationToken(_ token: String) -> CleanRoomsML.ListTrainedModelsRequest {
        return .init(
            maxResults: self.maxResults,
            membershipIdentifier: self.membershipIdentifier,
            nextToken: token
        )
    }
}

extension CleanRoomsML.ListTrainingDatasetsRequest: AWSPaginateToken {
    @inlinable
    public func usingPaginationToken(_ token: String) -> CleanRoomsML.ListTrainingDatasetsRequest {
        return .init(
            maxResults: self.maxResults,
            nextToken: token
        )
    }
}
