//===----------------------------------------------------------------------===//
//
// This source file is part of the Soto for AWS open source project
//
// Copyright (c) 2017-2024 the Soto project authors
// Licensed under Apache License v2.0
//
// See LICENSE.txt for license information
// See CONTRIBUTORS.txt for the list of Soto project authors
//
// SPDX-License-Identifier: Apache-2.0
//
//===----------------------------------------------------------------------===//

// THIS FILE IS AUTOMATICALLY GENERATED by https://github.com/soto-project/soto-codegenerator.
// DO NOT EDIT.

#if canImport(FoundationEssentials)
import FoundationEssentials
#else
import Foundation
#endif
@_exported import SotoCore

/// Service object for interacting with AWS Bedrock service.
///
/// Describes the API operations for creating, managing, fine-turning, and evaluating Amazon Bedrock models.
public struct Bedrock: AWSService {
    // MARK: Member variables

    /// Client used for communication with AWS
    public let client: AWSClient
    /// Service configuration
    public let config: AWSServiceConfig

    // MARK: Initialization

    /// Initialize the Bedrock client
    /// - parameters:
    ///     - client: AWSClient used to process requests
    ///     - region: Region of server you want to communicate with. This will override the partition parameter.
    ///     - partition: AWS partition where service resides, standard (.aws), china (.awscn), government (.awsusgov).
    ///     - endpoint: Custom endpoint URL to use instead of standard AWS servers
    ///     - middleware: Middleware chain used to edit requests before they are sent and responses before they are decoded 
    ///     - timeout: Timeout value for HTTP requests
    ///     - byteBufferAllocator: Allocator for ByteBuffers
    ///     - options: Service options
    public init(
        client: AWSClient,
        region: SotoCore.Region? = nil,
        partition: AWSPartition = .aws,
        endpoint: String? = nil,
        middleware: AWSMiddlewareProtocol? = nil,
        timeout: TimeAmount? = nil,
        byteBufferAllocator: ByteBufferAllocator = ByteBufferAllocator(),
        options: AWSServiceConfig.Options = []
    ) {
        self.client = client
        self.config = AWSServiceConfig(
            region: region,
            partition: region?.partition ?? partition,
            serviceName: "Bedrock",
            serviceIdentifier: "bedrock",
            serviceProtocol: .restjson,
            apiVersion: "2023-04-20",
            endpoint: endpoint,
            serviceEndpoints: Self.serviceEndpoints,
            errorType: BedrockErrorType.self,
            middleware: middleware,
            timeout: timeout,
            byteBufferAllocator: byteBufferAllocator,
            options: options
        )
    }


    /// custom endpoints for regions
    static var serviceEndpoints: [String: String] {[
        "bedrock-ap-northeast-1": "bedrock.ap-northeast-1.amazonaws.com",
        "bedrock-ap-northeast-2": "bedrock.ap-northeast-2.amazonaws.com",
        "bedrock-ap-northeast-3": "bedrock.ap-northeast-3.amazonaws.com",
        "bedrock-ap-south-1": "bedrock.ap-south-1.amazonaws.com",
        "bedrock-ap-south-2": "bedrock.ap-south-2.amazonaws.com",
        "bedrock-ap-southeast-1": "bedrock.ap-southeast-1.amazonaws.com",
        "bedrock-ap-southeast-2": "bedrock.ap-southeast-2.amazonaws.com",
        "bedrock-ca-central-1": "bedrock.ca-central-1.amazonaws.com",
        "bedrock-eu-central-1": "bedrock.eu-central-1.amazonaws.com",
        "bedrock-eu-central-2": "bedrock.eu-central-2.amazonaws.com",
        "bedrock-eu-north-1": "bedrock.eu-north-1.amazonaws.com",
        "bedrock-eu-south-1": "bedrock.eu-south-1.amazonaws.com",
        "bedrock-eu-south-2": "bedrock.eu-south-2.amazonaws.com",
        "bedrock-eu-west-1": "bedrock.eu-west-1.amazonaws.com",
        "bedrock-eu-west-2": "bedrock.eu-west-2.amazonaws.com",
        "bedrock-eu-west-3": "bedrock.eu-west-3.amazonaws.com",
        "bedrock-fips-ca-central-1": "bedrock-fips.ca-central-1.amazonaws.com",
        "bedrock-fips-us-east-1": "bedrock-fips.us-east-1.amazonaws.com",
        "bedrock-fips-us-east-2": "bedrock-fips.us-east-2.amazonaws.com",
        "bedrock-fips-us-gov-east-1": "bedrock-fips.us-gov-east-1.amazonaws.com",
        "bedrock-fips-us-gov-west-1": "bedrock-fips.us-gov-west-1.amazonaws.com",
        "bedrock-fips-us-west-2": "bedrock-fips.us-west-2.amazonaws.com",
        "bedrock-runtime-ap-northeast-1": "bedrock-runtime.ap-northeast-1.amazonaws.com",
        "bedrock-runtime-ap-northeast-2": "bedrock-runtime.ap-northeast-2.amazonaws.com",
        "bedrock-runtime-ap-northeast-3": "bedrock-runtime.ap-northeast-3.amazonaws.com",
        "bedrock-runtime-ap-south-1": "bedrock-runtime.ap-south-1.amazonaws.com",
        "bedrock-runtime-ap-south-2": "bedrock-runtime.ap-south-2.amazonaws.com",
        "bedrock-runtime-ap-southeast-1": "bedrock-runtime.ap-southeast-1.amazonaws.com",
        "bedrock-runtime-ap-southeast-2": "bedrock-runtime.ap-southeast-2.amazonaws.com",
        "bedrock-runtime-ca-central-1": "bedrock-runtime.ca-central-1.amazonaws.com",
        "bedrock-runtime-eu-central-1": "bedrock-runtime.eu-central-1.amazonaws.com",
        "bedrock-runtime-eu-central-2": "bedrock-runtime.eu-central-2.amazonaws.com",
        "bedrock-runtime-eu-north-1": "bedrock-runtime.eu-north-1.amazonaws.com",
        "bedrock-runtime-eu-south-1": "bedrock-runtime.eu-south-1.amazonaws.com",
        "bedrock-runtime-eu-south-2": "bedrock-runtime.eu-south-2.amazonaws.com",
        "bedrock-runtime-eu-west-1": "bedrock-runtime.eu-west-1.amazonaws.com",
        "bedrock-runtime-eu-west-2": "bedrock-runtime.eu-west-2.amazonaws.com",
        "bedrock-runtime-eu-west-3": "bedrock-runtime.eu-west-3.amazonaws.com",
        "bedrock-runtime-fips-ca-central-1": "bedrock-runtime-fips.ca-central-1.amazonaws.com",
        "bedrock-runtime-fips-us-east-1": "bedrock-runtime-fips.us-east-1.amazonaws.com",
        "bedrock-runtime-fips-us-east-2": "bedrock-runtime-fips.us-east-2.amazonaws.com",
        "bedrock-runtime-fips-us-gov-east-1": "bedrock-runtime-fips.us-gov-east-1.amazonaws.com",
        "bedrock-runtime-fips-us-gov-west-1": "bedrock-runtime-fips.us-gov-west-1.amazonaws.com",
        "bedrock-runtime-fips-us-west-2": "bedrock-runtime-fips.us-west-2.amazonaws.com",
        "bedrock-runtime-sa-east-1": "bedrock-runtime.sa-east-1.amazonaws.com",
        "bedrock-runtime-us-east-1": "bedrock-runtime.us-east-1.amazonaws.com",
        "bedrock-runtime-us-east-2": "bedrock-runtime.us-east-2.amazonaws.com",
        "bedrock-runtime-us-gov-east-1": "bedrock-runtime.us-gov-east-1.amazonaws.com",
        "bedrock-runtime-us-gov-west-1": "bedrock-runtime.us-gov-west-1.amazonaws.com",
        "bedrock-runtime-us-iso-east-1": "bedrock-runtime.us-iso-east-1.c2s.ic.gov",
        "bedrock-runtime-us-west-2": "bedrock-runtime.us-west-2.amazonaws.com",
        "bedrock-sa-east-1": "bedrock.sa-east-1.amazonaws.com",
        "bedrock-us-east-1": "bedrock.us-east-1.amazonaws.com",
        "bedrock-us-east-2": "bedrock.us-east-2.amazonaws.com",
        "bedrock-us-gov-east-1": "bedrock.us-gov-east-1.amazonaws.com",
        "bedrock-us-gov-west-1": "bedrock.us-gov-west-1.amazonaws.com",
        "bedrock-us-iso-east-1": "bedrock.us-iso-east-1.c2s.ic.gov",
        "bedrock-us-west-2": "bedrock.us-west-2.amazonaws.com"
    ]}



    // MARK: API Calls

    /// Deletes a batch of evaluation jobs. An evaluation job can only be deleted if it has following status FAILED, COMPLETED, and STOPPED. You can request up to 25 model evaluation jobs be deleted in a single request.
    @Sendable
    @inlinable
    public func batchDeleteEvaluationJob(_ input: BatchDeleteEvaluationJobRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> BatchDeleteEvaluationJobResponse {
        try await self.client.execute(
            operation: "BatchDeleteEvaluationJob", 
            path: "/evaluation-jobs/batch-delete", 
            httpMethod: .POST, 
            serviceConfig: self.config, 
            input: input, 
            logger: logger
        )
    }
    /// Deletes a batch of evaluation jobs. An evaluation job can only be deleted if it has following status FAILED, COMPLETED, and STOPPED. You can request up to 25 model evaluation jobs be deleted in a single request.
    ///
    /// Parameters:
    ///   - jobIdentifiers: A list of one or more evaluation job Amazon Resource Names (ARNs) you want to delete.
    ///   - logger: Logger use during operation
    @inlinable
    public func batchDeleteEvaluationJob(
        jobIdentifiers: [String],
        logger: Logger = AWSClient.loggingDisabled        
    ) async throws -> BatchDeleteEvaluationJobResponse {
        let input = BatchDeleteEvaluationJobRequest(
            jobIdentifiers: jobIdentifiers
        )
        return try await self.batchDeleteEvaluationJob(input, logger: logger)
    }

    /// Creates a new custom model in Amazon Bedrock. After the model is active, you can use it for inference. To use the model for inference, you must purchase Provisioned Throughput for it. You can't use On-demand inference with these custom models. For more information about Provisioned Throughput, see Provisioned Throughput. The model appears in ListCustomModels with a customizationType of imported. To track the status of the new model, you use the GetCustomModel API operation. The model can be in the following states:    Creating - Initial state during validation and registration    Active - Model is ready for use in inference    Failed - Creation process encountered an error    Related APIs     GetCustomModel     ListCustomModels     DeleteCustomModel
    @Sendable
    @inlinable
    public func createCustomModel(_ input: CreateCustomModelRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateCustomModelResponse {
        try await self.client.execute(
            operation: "CreateCustomModel", 
            path: "/custom-models/create-custom-model", 
            httpMethod: .POST, 
            serviceConfig: self.config, 
            input: input, 
            logger: logger
        )
    }
    /// Creates a new custom model in Amazon Bedrock. After the model is active, you can use it for inference. To use the model for inference, you must purchase Provisioned Throughput for it. You can't use On-demand inference with these custom models. For more information about Provisioned Throughput, see Provisioned Throughput. The model appears in ListCustomModels with a customizationType of imported. To track the status of the new model, you use the GetCustomModel API operation. The model can be in the following states:    Creating - Initial state during validation and registration    Active - Model is ready for use in inference    Failed - Creation process encountered an error    Related APIs     GetCustomModel     ListCustomModels     DeleteCustomModel
    ///
    /// Parameters:
    ///   - clientRequestToken: A unique, case-sensitive identifier to ensure that the API request completes no more than one time. If this token matches a previous request, Amazon Bedrock ignores the request, but does not return an error. For more information, see Ensuring idempotency.
    ///   - modelKmsKeyArn: The Amazon Resource Name (ARN) of the customer managed KMS key to encrypt the custom model. If you don't provide a KMS key, Amazon Bedrock uses an Amazon Web Services-managed KMS key to encrypt the model.  If you provide a customer managed KMS key, your Amazon Bedrock service role must have permissions to use it. For more information see Encryption of imported models.
    ///   - modelName: A unique name for the custom model.
    ///   - modelSourceConfig: The data source for the model. The Amazon S3 URI in the model source must be for the Amazon-managed Amazon S3 bucket containing your model artifacts.
    ///   - modelTags: A list of key-value pairs to associate with the custom model resource. You can use these tags to organize and identify your resources. For more information, see Tagging resources in the Amazon Bedrock User Guide.
    ///   - roleArn: The Amazon Resource Name (ARN) of an IAM service role that Amazon Bedrock assumes to perform tasks on your behalf. This role must have permissions to access the Amazon S3 bucket containing your model artifacts and the KMS key (if specified). For more information, see Setting up an IAM service role for importing models in the Amazon Bedrock User Guide.
    ///   - logger: Logger use during operation
    @inlinable
    public func createCustomModel(
        clientRequestToken: String? = CreateCustomModelRequest.idempotencyToken(),
        modelKmsKeyArn: String? = nil,
        modelName: String,
        modelSourceConfig: ModelDataSource,
        modelTags: [Tag]? = nil,
        roleArn: String? = nil,
        logger: Logger = AWSClient.loggingDisabled        
    ) async throws -> CreateCustomModelResponse {
        let input = CreateCustomModelRequest(
            clientRequestToken: clientRequestToken, 
            modelKmsKeyArn: modelKmsKeyArn, 
            modelName: modelName, 
            modelSourceConfig: modelSourceConfig, 
            modelTags: modelTags, 
            roleArn: roleArn
        )
        return try await self.createCustomModel(input, logger: logger)
    }

    /// Deploys a custom model for on-demand inference in Amazon Bedrock. After you deploy your custom model, you use the deployment's Amazon Resource Name (ARN) as the modelId parameter when you submit prompts and generate responses with model inference.  For more information about setting up on-demand inference for custom models, see Set up inference for a custom model.  The following actions are related to the CreateCustomModelDeployment operation:    GetCustomModelDeployment     ListCustomModelDeployments     DeleteCustomModelDeployment
    @Sendable
    @inlinable
    public func createCustomModelDeployment(_ input: CreateCustomModelDeploymentRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateCustomModelDeploymentResponse {
        try await self.client.execute(
            operation: "CreateCustomModelDeployment", 
            path: "/model-customization/custom-model-deployments", 
            httpMethod: .POST, 
            serviceConfig: self.config, 
            input: input, 
            logger: logger
        )
    }
    /// Deploys a custom model for on-demand inference in Amazon Bedrock. After you deploy your custom model, you use the deployment's Amazon Resource Name (ARN) as the modelId parameter when you submit prompts and generate responses with model inference.  For more information about setting up on-demand inference for custom models, see Set up inference for a custom model.  The following actions are related to the CreateCustomModelDeployment operation:    GetCustomModelDeployment     ListCustomModelDeployments     DeleteCustomModelDeployment
    ///
    /// Parameters:
    ///   - clientRequestToken: A unique, case-sensitive identifier to ensure that the operation completes no more than one time. If this token matches a previous request, Amazon Bedrock ignores the request, but does not return an error. For more information, see Ensuring idempotency.
    ///   - description: A description for the custom model deployment to help you identify its purpose.
    ///   - modelArn: The Amazon Resource Name (ARN) of the custom model to deploy for on-demand inference. The custom model must be in the Active state.
    ///   - modelDeploymentName: The name for the custom model deployment. The name must be unique within your Amazon Web Services account and Region.
    ///   - tags: Tags to assign to the custom model deployment. You can use tags to organize and track your Amazon Web Services resources for cost allocation and management purposes.
    ///   - logger: Logger use during operation
    @inlinable
    public func createCustomModelDeployment(
        clientRequestToken: String? = CreateCustomModelDeploymentRequest.idempotencyToken(),
        description: String? = nil,
        modelArn: String,
        modelDeploymentName: String,
        tags: [Tag]? = nil,
        logger: Logger = AWSClient.loggingDisabled        
    ) async throws -> CreateCustomModelDeploymentResponse {
        let input = CreateCustomModelDeploymentRequest(
            clientRequestToken: clientRequestToken, 
            description: description, 
            modelArn: modelArn, 
            modelDeploymentName: modelDeploymentName, 
            tags: tags
        )
        return try await self.createCustomModelDeployment(input, logger: logger)
    }

    /// Creates an evaluation job.
    @Sendable
    @inlinable
    public func createEvaluationJob(_ input: CreateEvaluationJobRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateEvaluationJobResponse {
        try await self.client.execute(
            operation: "CreateEvaluationJob", 
            path: "/evaluation-jobs", 
            httpMethod: .POST, 
            serviceConfig: self.config, 
            input: input, 
            logger: logger
        )
    }
    /// Creates an evaluation job.
    ///
    /// Parameters:
    ///   - applicationType: Specifies whether the evaluation job is for evaluating a model or evaluating a knowledge base (retrieval and response generation).
    ///   - clientRequestToken: A unique, case-sensitive identifier to ensure that the API request completes no more than one time. If this token matches a previous request, Amazon Bedrock ignores the request, but does not return an error. For more information, see Ensuring idempotency.
    ///   - customerEncryptionKeyId: Specify your customer managed encryption key Amazon Resource Name (ARN) that will be used to encrypt your evaluation job.
    ///   - evaluationConfig: Contains the configuration details of either an automated or human-based evaluation job.
    ///   - inferenceConfig: Contains the configuration details of the inference model for the evaluation job. For model evaluation jobs, automated jobs support a single model or inference profile, and jobs that use human workers support two models or inference profiles.
    ///   - jobDescription: A description of the evaluation job.
    ///   - jobName: A name for the evaluation job. Names must unique with your Amazon Web Services account, and your account's Amazon Web Services region.
    ///   - jobTags: Tags to attach to the model evaluation job.
    ///   - outputDataConfig: Contains the configuration details of the Amazon S3 bucket for storing the results of the evaluation job.
    ///   - roleArn: The Amazon Resource Name (ARN) of an IAM service role that Amazon Bedrock can assume to perform tasks on your behalf. To learn more about the required permissions, see Required permissions for model evaluations.
    ///   - logger: Logger use during operation
    @inlinable
    public func createEvaluationJob(
        applicationType: ApplicationType? = nil,
        clientRequestToken: String? = CreateEvaluationJobRequest.idempotencyToken(),
        customerEncryptionKeyId: String? = nil,
        evaluationConfig: EvaluationConfig,
        inferenceConfig: EvaluationInferenceConfig,
        jobDescription: String? = nil,
        jobName: String,
        jobTags: [Tag]? = nil,
        outputDataConfig: EvaluationOutputDataConfig,
        roleArn: String,
        logger: Logger = AWSClient.loggingDisabled        
    ) async throws -> CreateEvaluationJobResponse {
        let input = CreateEvaluationJobRequest(
            applicationType: applicationType, 
            clientRequestToken: clientRequestToken, 
            customerEncryptionKeyId: customerEncryptionKeyId, 
            evaluationConfig: evaluationConfig, 
            inferenceConfig: inferenceConfig, 
            jobDescription: jobDescription, 
            jobName: jobName, 
            jobTags: jobTags, 
            outputDataConfig: outputDataConfig, 
            roleArn: roleArn
        )
        return try await self.createEvaluationJob(input, logger: logger)
    }

    /// Request a model access agreement for the specified model.
    @Sendable
    @inlinable
    public func createFoundationModelAgreement(_ input: CreateFoundationModelAgreementRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateFoundationModelAgreementResponse {
        try await self.client.execute(
            operation: "CreateFoundationModelAgreement", 
            path: "/create-foundation-model-agreement", 
            httpMethod: .POST, 
            serviceConfig: self.config, 
            input: input, 
            logger: logger
        )
    }
    /// Request a model access agreement for the specified model.
    ///
    /// Parameters:
    ///   - modelId: Model Id of the model for the access request.
    ///   - offerToken: An offer token encapsulates the information for an offer.
    ///   - logger: Logger use during operation
    @inlinable
    public func createFoundationModelAgreement(
        modelId: String,
        offerToken: String,
        logger: Logger = AWSClient.loggingDisabled        
    ) async throws -> CreateFoundationModelAgreementResponse {
        let input = CreateFoundationModelAgreementRequest(
            modelId: modelId, 
            offerToken: offerToken
        )
        return try await self.createFoundationModelAgreement(input, logger: logger)
    }

    /// Creates a guardrail to block topics and to implement safeguards for your generative AI applications. You can configure the following policies in a guardrail to avoid undesirable and harmful content, filter out denied topics and words, and remove sensitive information for privacy protection.    Content filters - Adjust filter strengths to block input prompts or model responses containing harmful content.    Denied topics - Define a set of topics that are undesirable in the context of your application. These topics will be blocked if detected in user queries or model responses.    Word filters - Configure filters to block undesirable words, phrases, and profanity. Such words can include offensive terms, competitor names etc.    Sensitive information filters - Block or mask sensitive information such as personally identifiable information (PII) or custom regex in user inputs and model responses.   In addition to the above policies, you can also configure the messages to be returned to the user if a user input or model response is in violation of the policies defined in the guardrail. For more information, see Amazon Bedrock Guardrails in the Amazon Bedrock User Guide.
    @Sendable
    @inlinable
    public func createGuardrail(_ input: CreateGuardrailRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateGuardrailResponse {
        try await self.client.execute(
            operation: "CreateGuardrail", 
            path: "/guardrails", 
            httpMethod: .POST, 
            serviceConfig: self.config, 
            input: input, 
            logger: logger
        )
    }
    /// Creates a guardrail to block topics and to implement safeguards for your generative AI applications. You can configure the following policies in a guardrail to avoid undesirable and harmful content, filter out denied topics and words, and remove sensitive information for privacy protection.    Content filters - Adjust filter strengths to block input prompts or model responses containing harmful content.    Denied topics - Define a set of topics that are undesirable in the context of your application. These topics will be blocked if detected in user queries or model responses.    Word filters - Configure filters to block undesirable words, phrases, and profanity. Such words can include offensive terms, competitor names etc.    Sensitive information filters - Block or mask sensitive information such as personally identifiable information (PII) or custom regex in user inputs and model responses.   In addition to the above policies, you can also configure the messages to be returned to the user if a user input or model response is in violation of the policies defined in the guardrail. For more information, see Amazon Bedrock Guardrails in the Amazon Bedrock User Guide.
    ///
    /// Parameters:
    ///   - blockedInputMessaging: The message to return when the guardrail blocks a prompt.
    ///   - blockedOutputsMessaging: The message to return when the guardrail blocks a model response.
    ///   - clientRequestToken: A unique, case-sensitive identifier to ensure that the API request completes no more than once. If this token matches a previous request, Amazon Bedrock ignores the request, but does not return an error. For more information, see Ensuring idempotency in the Amazon S3 User Guide.
    ///   - contentPolicyConfig: The content filter policies to configure for the guardrail.
    ///   - contextualGroundingPolicyConfig: The contextual grounding policy configuration used to create a guardrail.
    ///   - crossRegionConfig: The system-defined guardrail profile that you're using with your guardrail. Guardrail profiles define the destination Amazon Web Services Regions where guardrail inference requests can be automatically routed. For more information, see the Amazon Bedrock User Guide.
    ///   - description: A description of the guardrail.
    ///   - kmsKeyId: The ARN of the KMS key that you use to encrypt the guardrail.
    ///   - name: The name to give the guardrail.
    ///   - sensitiveInformationPolicyConfig: The sensitive information policy to configure for the guardrail.
    ///   - tags: The tags that you want to attach to the guardrail.
    ///   - topicPolicyConfig: The topic policies to configure for the guardrail.
    ///   - wordPolicyConfig: The word policy you configure for the guardrail.
    ///   - logger: Logger use during operation
    @inlinable
    public func createGuardrail(
        blockedInputMessaging: String,
        blockedOutputsMessaging: String,
        clientRequestToken: String? = CreateGuardrailRequest.idempotencyToken(),
        contentPolicyConfig: GuardrailContentPolicyConfig? = nil,
        contextualGroundingPolicyConfig: GuardrailContextualGroundingPolicyConfig? = nil,
        crossRegionConfig: GuardrailCrossRegionConfig? = nil,
        description: String? = nil,
        kmsKeyId: String? = nil,
        name: String,
        sensitiveInformationPolicyConfig: GuardrailSensitiveInformationPolicyConfig? = nil,
        tags: [Tag]? = nil,
        topicPolicyConfig: GuardrailTopicPolicyConfig? = nil,
        wordPolicyConfig: GuardrailWordPolicyConfig? = nil,
        logger: Logger = AWSClient.loggingDisabled        
    ) async throws -> CreateGuardrailResponse {
        let input = CreateGuardrailRequest(
            blockedInputMessaging: blockedInputMessaging, 
            blockedOutputsMessaging: blockedOutputsMessaging, 
            clientRequestToken: clientRequestToken, 
            contentPolicyConfig: contentPolicyConfig, 
            contextualGroundingPolicyConfig: contextualGroundingPolicyConfig, 
            crossRegionConfig: crossRegionConfig, 
            description: description, 
            kmsKeyId: kmsKeyId, 
            name: name, 
            sensitiveInformationPolicyConfig: sensitiveInformationPolicyConfig, 
            tags: tags, 
            topicPolicyConfig: topicPolicyConfig, 
            wordPolicyConfig: wordPolicyConfig
        )
        return try await self.createGuardrail(input, logger: logger)
    }

    /// Creates a version of the guardrail. Use this API to create a snapshot of the guardrail when you are satisfied with a configuration, or to compare the configuration with another version.
    @Sendable
    @inlinable
    public func createGuardrailVersion(_ input: CreateGuardrailVersionRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateGuardrailVersionResponse {
        try await self.client.execute(
            operation: "CreateGuardrailVersion", 
            path: "/guardrails/{guardrailIdentifier}", 
            httpMethod: .POST, 
            serviceConfig: self.config, 
            input: input, 
            logger: logger
        )
    }
    /// Creates a version of the guardrail. Use this API to create a snapshot of the guardrail when you are satisfied with a configuration, or to compare the configuration with another version.
    ///
    /// Parameters:
    ///   - clientRequestToken: A unique, case-sensitive identifier to ensure that the API request completes no more than once. If this token matches a previous request, Amazon Bedrock ignores the request, but does not return an error. For more information, see Ensuring idempotency in the Amazon S3 User Guide.
    ///   - description: A description of the guardrail version.
    ///   - guardrailIdentifier: The unique identifier of the guardrail. This can be an ID or the ARN.
    ///   - logger: Logger use during operation
    @inlinable
    public func createGuardrailVersion(
        clientRequestToken: String? = CreateGuardrailVersionRequest.idempotencyToken(),
        description: String? = nil,
        guardrailIdentifier: String,
        logger: Logger = AWSClient.loggingDisabled        
    ) async throws -> CreateGuardrailVersionResponse {
        let input = CreateGuardrailVersionRequest(
            clientRequestToken: clientRequestToken, 
            description: description, 
            guardrailIdentifier: guardrailIdentifier
        )
        return try await self.createGuardrailVersion(input, logger: logger)
    }

    /// Creates an application inference profile to track metrics and costs when invoking a model. To create an application inference profile for a foundation model in one region, specify the ARN of the model in that region. To create an application inference profile for a foundation model across multiple regions, specify the ARN of the system-defined inference profile that contains the regions that you want to route requests to. For more information, see Increase throughput and resilience with cross-region inference in Amazon Bedrock. in the Amazon Bedrock User Guide.
    @Sendable
    @inlinable
    public func createInferenceProfile(_ input: CreateInferenceProfileRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateInferenceProfileResponse {
        try await self.client.execute(
            operation: "CreateInferenceProfile", 
            path: "/inference-profiles", 
            httpMethod: .POST, 
            serviceConfig: self.config, 
            input: input, 
            logger: logger
        )
    }
    /// Creates an application inference profile to track metrics and costs when invoking a model. To create an application inference profile for a foundation model in one region, specify the ARN of the model in that region. To create an application inference profile for a foundation model across multiple regions, specify the ARN of the system-defined inference profile that contains the regions that you want to route requests to. For more information, see Increase throughput and resilience with cross-region inference in Amazon Bedrock. in the Amazon Bedrock User Guide.
    ///
    /// Parameters:
    ///   - clientRequestToken: A unique, case-sensitive identifier to ensure that the API request completes no more than one time. If this token matches a previous request, Amazon Bedrock ignores the request, but does not return an error. For more information, see Ensuring idempotency.
    ///   - description: A description for the inference profile.
    ///   - inferenceProfileName: A name for the inference profile.
    ///   - modelSource: The foundation model or system-defined inference profile that the inference profile will track metrics and costs for.
    ///   - tags: An array of objects, each of which contains a tag and its value. For more information, see Tagging resources in the Amazon Bedrock User Guide.
    ///   - logger: Logger use during operation
    @inlinable
    public func createInferenceProfile(
        clientRequestToken: String? = CreateInferenceProfileRequest.idempotencyToken(),
        description: String? = nil,
        inferenceProfileName: String,
        modelSource: InferenceProfileModelSource,
        tags: [Tag]? = nil,
        logger: Logger = AWSClient.loggingDisabled        
    ) async throws -> CreateInferenceProfileResponse {
        let input = CreateInferenceProfileRequest(
            clientRequestToken: clientRequestToken, 
            description: description, 
            inferenceProfileName: inferenceProfileName, 
            modelSource: modelSource, 
            tags: tags
        )
        return try await self.createInferenceProfile(input, logger: logger)
    }

    /// Creates an endpoint for a model from Amazon Bedrock Marketplace. The endpoint is hosted by Amazon SageMaker.
    @Sendable
    @inlinable
    public func createMarketplaceModelEndpoint(_ input: CreateMarketplaceModelEndpointRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateMarketplaceModelEndpointResponse {
        try await self.client.execute(
            operation: "CreateMarketplaceModelEndpoint", 
            path: "/marketplace-model/endpoints", 
            httpMethod: .POST, 
            serviceConfig: self.config, 
            input: input, 
            logger: logger
        )
    }
    /// Creates an endpoint for a model from Amazon Bedrock Marketplace. The endpoint is hosted by Amazon SageMaker.
    ///
    /// Parameters:
    ///   - acceptEula: Indicates whether you accept the end-user license agreement (EULA) for the model. Set to true to accept the EULA.
    ///   - clientRequestToken: A unique, case-sensitive identifier that you provide to ensure the idempotency of the request. This token is listed as not required because Amazon Web Services SDKs automatically generate it for you and set this parameter. If you're not using the Amazon Web Services SDK or the CLI, you must provide this token or the action will fail.
    ///   - endpointConfig: The configuration for the endpoint, including the number and type of instances to use.
    ///   - endpointName: The name of the endpoint. This name must be unique within your Amazon Web Services account and region.
    ///   - modelSourceIdentifier: The ARN of the model from Amazon Bedrock Marketplace that you want to deploy to the endpoint.
    ///   - tags: An array of key-value pairs to apply to the underlying Amazon SageMaker endpoint. You can use these tags to organize and identify your Amazon Web Services resources.
    ///   - logger: Logger use during operation
    @inlinable
    public func createMarketplaceModelEndpoint(
        acceptEula: Bool? = nil,
        clientRequestToken: String? = CreateMarketplaceModelEndpointRequest.idempotencyToken(),
        endpointConfig: EndpointConfig,
        endpointName: String,
        modelSourceIdentifier: String,
        tags: [Tag]? = nil,
        logger: Logger = AWSClient.loggingDisabled        
    ) async throws -> CreateMarketplaceModelEndpointResponse {
        let input = CreateMarketplaceModelEndpointRequest(
            acceptEula: acceptEula, 
            clientRequestToken: clientRequestToken, 
            endpointConfig: endpointConfig, 
            endpointName: endpointName, 
            modelSourceIdentifier: modelSourceIdentifier, 
            tags: tags
        )
        return try await self.createMarketplaceModelEndpoint(input, logger: logger)
    }

    /// Copies a model to another region so that it can be used there. For more information, see Copy models to be used in other regions in the Amazon Bedrock User Guide.
    @Sendable
    @inlinable
    public func createModelCopyJob(_ input: CreateModelCopyJobRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateModelCopyJobResponse {
        try await self.client.execute(
            operation: "CreateModelCopyJob", 
            path: "/model-copy-jobs", 
            httpMethod: .POST, 
            serviceConfig: self.config, 
            input: input, 
            logger: logger
        )
    }
    /// Copies a model to another region so that it can be used there. For more information, see Copy models to be used in other regions in the Amazon Bedrock User Guide.
    ///
    /// Parameters:
    ///   - clientRequestToken: A unique, case-sensitive identifier to ensure that the API request completes no more than one time. If this token matches a previous request, Amazon Bedrock ignores the request, but does not return an error. For more information, see Ensuring idempotency.
    ///   - modelKmsKeyId: The ARN of the KMS key that you use to encrypt the model copy.
    ///   - sourceModelArn: The Amazon Resource Name (ARN) of the model to be copied.
    ///   - targetModelName: A name for the copied model.
    ///   - targetModelTags: Tags to associate with the target model. For more information, see Tag resources in the Amazon Bedrock User Guide.
    ///   - logger: Logger use during operation
    @inlinable
    public func createModelCopyJob(
        clientRequestToken: String? = CreateModelCopyJobRequest.idempotencyToken(),
        modelKmsKeyId: String? = nil,
        sourceModelArn: String,
        targetModelName: String,
        targetModelTags: [Tag]? = nil,
        logger: Logger = AWSClient.loggingDisabled        
    ) async throws -> CreateModelCopyJobResponse {
        let input = CreateModelCopyJobRequest(
            clientRequestToken: clientRequestToken, 
            modelKmsKeyId: modelKmsKeyId, 
            sourceModelArn: sourceModelArn, 
            targetModelName: targetModelName, 
            targetModelTags: targetModelTags
        )
        return try await self.createModelCopyJob(input, logger: logger)
    }

    /// Creates a fine-tuning job to customize a base model. You specify the base foundation model and the location of the training data. After the model-customization job completes successfully, your custom model resource will be ready to use. Amazon Bedrock returns validation loss metrics and output generations after the job completes.  For information on the format of training and validation data, see Prepare the datasets.  Model-customization jobs are asynchronous and the completion time depends on the base model and the training/validation data size. To monitor a job, use the GetModelCustomizationJob operation to retrieve the job status. For more information, see Custom models in the Amazon Bedrock User Guide.
    @Sendable
    @inlinable
    public func createModelCustomizationJob(_ input: CreateModelCustomizationJobRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateModelCustomizationJobResponse {
        try await self.client.execute(
            operation: "CreateModelCustomizationJob", 
            path: "/model-customization-jobs", 
            httpMethod: .POST, 
            serviceConfig: self.config, 
            input: input, 
            logger: logger
        )
    }
    /// Creates a fine-tuning job to customize a base model. You specify the base foundation model and the location of the training data. After the model-customization job completes successfully, your custom model resource will be ready to use. Amazon Bedrock returns validation loss metrics and output generations after the job completes.  For information on the format of training and validation data, see Prepare the datasets.  Model-customization jobs are asynchronous and the completion time depends on the base model and the training/validation data size. To monitor a job, use the GetModelCustomizationJob operation to retrieve the job status. For more information, see Custom models in the Amazon Bedrock User Guide.
    ///
    /// Parameters:
    ///   - baseModelIdentifier: Name of the base model.
    ///   - clientRequestToken: A unique, case-sensitive identifier to ensure that the API request completes no more than one time. If this token matches a previous request, Amazon Bedrock ignores the request, but does not return an error. For more information, see Ensuring idempotency.
    ///   - customizationConfig: The customization configuration for the model customization job.
    ///   - customizationType: The customization type.
    ///   - customModelKmsKeyId: The custom model is encrypted at rest using this key.
    ///   - customModelName: A name for the resulting custom model.
    ///   - customModelTags: Tags to attach to the resulting custom model.
    ///   - hyperParameters: Parameters related to tuning the model. For details on the format for different models, see Custom model hyperparameters.
    ///   - jobName: A name for the fine-tuning job.
    ///   - jobTags: Tags to attach to the job.
    ///   - outputDataConfig: S3 location for the output data.
    ///   - roleArn: The Amazon Resource Name (ARN) of an IAM service role that Amazon Bedrock can assume to perform tasks on your behalf. For example, during model training, Amazon Bedrock needs your permission to read input data from an S3 bucket, write model artifacts to an S3 bucket. To pass this role to Amazon Bedrock, the caller of this API must have the iam:PassRole permission.
    ///   - trainingDataConfig: Information about the training dataset.
    ///   - validationDataConfig: Information about the validation dataset.
    ///   - vpcConfig: The configuration of the Virtual Private Cloud (VPC) that contains the resources that you're using for this job. For more information, see Protect your model customization jobs using a VPC.
    ///   - logger: Logger use during operation
    @inlinable
    public func createModelCustomizationJob(
        baseModelIdentifier: String,
        clientRequestToken: String? = CreateModelCustomizationJobRequest.idempotencyToken(),
        customizationConfig: CustomizationConfig? = nil,
        customizationType: CustomizationType? = nil,
        customModelKmsKeyId: String? = nil,
        customModelName: String,
        customModelTags: [Tag]? = nil,
        hyperParameters: [String: String]? = nil,
        jobName: String,
        jobTags: [Tag]? = nil,
        outputDataConfig: OutputDataConfig,
        roleArn: String,
        trainingDataConfig: TrainingDataConfig,
        validationDataConfig: ValidationDataConfig? = nil,
        vpcConfig: VpcConfig? = nil,
        logger: Logger = AWSClient.loggingDisabled        
    ) async throws -> CreateModelCustomizationJobResponse {
        let input = CreateModelCustomizationJobRequest(
            baseModelIdentifier: baseModelIdentifier, 
            clientRequestToken: clientRequestToken, 
            customizationConfig: customizationConfig, 
            customizationType: customizationType, 
            customModelKmsKeyId: customModelKmsKeyId, 
            customModelName: customModelName, 
            customModelTags: customModelTags, 
            hyperParameters: hyperParameters, 
            jobName: jobName, 
            jobTags: jobTags, 
            outputDataConfig: outputDataConfig, 
            roleArn: roleArn, 
            trainingDataConfig: trainingDataConfig, 
            validationDataConfig: validationDataConfig, 
            vpcConfig: vpcConfig
        )
        return try await self.createModelCustomizationJob(input, logger: logger)
    }

    /// Creates a model import job to import model that you have customized in other environments, such as Amazon SageMaker. For more information, see Import a customized model
    @Sendable
    @inlinable
    public func createModelImportJob(_ input: CreateModelImportJobRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateModelImportJobResponse {
        try await self.client.execute(
            operation: "CreateModelImportJob", 
            path: "/model-import-jobs", 
            httpMethod: .POST, 
            serviceConfig: self.config, 
            input: input, 
            logger: logger
        )
    }
    /// Creates a model import job to import model that you have customized in other environments, such as Amazon SageMaker. For more information, see Import a customized model
    ///
    /// Parameters:
    ///   - clientRequestToken: A unique, case-sensitive identifier to ensure that the API request completes no more than one time. If this token matches a previous request, Amazon Bedrock ignores the request, but does not return an error. For more information, see Ensuring idempotency.
    ///   - importedModelKmsKeyId: The imported model is encrypted at rest using this key.
    ///   - importedModelName: The name of the imported model.
    ///   - importedModelTags: Tags to attach to the imported model.
    ///   - jobName: The name of the import job.
    ///   - jobTags: Tags to attach to this import job.
    ///   - modelDataSource: The data source for the imported model.
    ///   - roleArn: The Amazon Resource Name (ARN) of the model import job.
    ///   - vpcConfig: VPC configuration parameters for the private Virtual Private Cloud (VPC) that contains the resources you are using for the import job.
    ///   - logger: Logger use during operation
    @inlinable
    public func createModelImportJob(
        clientRequestToken: String? = nil,
        importedModelKmsKeyId: String? = nil,
        importedModelName: String,
        importedModelTags: [Tag]? = nil,
        jobName: String,
        jobTags: [Tag]? = nil,
        modelDataSource: ModelDataSource,
        roleArn: String,
        vpcConfig: VpcConfig? = nil,
        logger: Logger = AWSClient.loggingDisabled        
    ) async throws -> CreateModelImportJobResponse {
        let input = CreateModelImportJobRequest(
            clientRequestToken: clientRequestToken, 
            importedModelKmsKeyId: importedModelKmsKeyId, 
            importedModelName: importedModelName, 
            importedModelTags: importedModelTags, 
            jobName: jobName, 
            jobTags: jobTags, 
            modelDataSource: modelDataSource, 
            roleArn: roleArn, 
            vpcConfig: vpcConfig
        )
        return try await self.createModelImportJob(input, logger: logger)
    }

    /// Creates a batch inference job to invoke a model on multiple prompts. Format your data according to Format your inference data and upload it to an Amazon S3 bucket. For more information, see Process multiple prompts with batch inference. The response returns a jobArn that you can use to stop or get details about the job.
    @Sendable
    @inlinable
    public func createModelInvocationJob(_ input: CreateModelInvocationJobRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateModelInvocationJobResponse {
        try await self.client.execute(
            operation: "CreateModelInvocationJob", 
            path: "/model-invocation-job", 
            httpMethod: .POST, 
            serviceConfig: self.config, 
            input: input, 
            logger: logger
        )
    }
    /// Creates a batch inference job to invoke a model on multiple prompts. Format your data according to Format your inference data and upload it to an Amazon S3 bucket. For more information, see Process multiple prompts with batch inference. The response returns a jobArn that you can use to stop or get details about the job.
    ///
    /// Parameters:
    ///   - clientRequestToken: A unique, case-sensitive identifier to ensure that the API request completes no more than one time. If this token matches a previous request, Amazon Bedrock ignores the request, but does not return an error. For more information, see Ensuring idempotency.
    ///   - inputDataConfig: Details about the location of the input to the batch inference job.
    ///   - jobName: A name to give the batch inference job.
    ///   - modelId: The unique identifier of the foundation model to use for the batch inference job.
    ///   - outputDataConfig: Details about the location of the output of the batch inference job.
    ///   - roleArn: The Amazon Resource Name (ARN) of the service role with permissions to carry out and manage batch inference. You can use the console to create a default service role or follow the steps at Create a service role for batch inference.
    ///   - tags: Any tags to associate with the batch inference job. For more information, see Tagging Amazon Bedrock resources.
    ///   - timeoutDurationInHours: The number of hours after which to force the batch inference job to time out.
    ///   - vpcConfig: The configuration of the Virtual Private Cloud (VPC) for the data in the batch inference job. For more information, see Protect batch inference jobs using a VPC.
    ///   - logger: Logger use during operation
    @inlinable
    public func createModelInvocationJob(
        clientRequestToken: String? = CreateModelInvocationJobRequest.idempotencyToken(),
        inputDataConfig: ModelInvocationJobInputDataConfig,
        jobName: String,
        modelId: String,
        outputDataConfig: ModelInvocationJobOutputDataConfig,
        roleArn: String,
        tags: [Tag]? = nil,
        timeoutDurationInHours: Int? = nil,
        vpcConfig: VpcConfig? = nil,
        logger: Logger = AWSClient.loggingDisabled        
    ) async throws -> CreateModelInvocationJobResponse {
        let input = CreateModelInvocationJobRequest(
            clientRequestToken: clientRequestToken, 
            inputDataConfig: inputDataConfig, 
            jobName: jobName, 
            modelId: modelId, 
            outputDataConfig: outputDataConfig, 
            roleArn: roleArn, 
            tags: tags, 
            timeoutDurationInHours: timeoutDurationInHours, 
            vpcConfig: vpcConfig
        )
        return try await self.createModelInvocationJob(input, logger: logger)
    }

    /// Creates a prompt router that manages the routing of requests between multiple foundation models based on the routing criteria.
    @Sendable
    @inlinable
    public func createPromptRouter(_ input: CreatePromptRouterRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreatePromptRouterResponse {
        try await self.client.execute(
            operation: "CreatePromptRouter", 
            path: "/prompt-routers", 
            httpMethod: .POST, 
            serviceConfig: self.config, 
            input: input, 
            logger: logger
        )
    }
    /// Creates a prompt router that manages the routing of requests between multiple foundation models based on the routing criteria.
    ///
    /// Parameters:
    ///   - clientRequestToken: A unique, case-sensitive identifier that you provide to ensure idempotency of your requests. If not specified, the Amazon Web Services SDK automatically generates one for you.
    ///   - description: An optional description of the prompt router to help identify its purpose.
    ///   - fallbackModel: The default model to use when the routing criteria is not met.
    ///   - models: A list of foundation models that the prompt router can route requests to. At least one model must be specified.
    ///   - promptRouterName: The name of the prompt router. The name must be unique within your Amazon Web Services account in the current region.
    ///   - routingCriteria: The criteria, which is the response quality difference, used to determine how incoming requests are routed to different models.
    ///   - tags: An array of key-value pairs to apply to this resource as tags. You can use tags to categorize and manage your Amazon Web Services resources.
    ///   - logger: Logger use during operation
    @inlinable
    public func createPromptRouter(
        clientRequestToken: String? = CreatePromptRouterRequest.idempotencyToken(),
        description: String? = nil,
        fallbackModel: PromptRouterTargetModel,
        models: [PromptRouterTargetModel],
        promptRouterName: String,
        routingCriteria: RoutingCriteria,
        tags: [Tag]? = nil,
        logger: Logger = AWSClient.loggingDisabled        
    ) async throws -> CreatePromptRouterResponse {
        let input = CreatePromptRouterRequest(
            clientRequestToken: clientRequestToken, 
            description: description, 
            fallbackModel: fallbackModel, 
            models: models, 
            promptRouterName: promptRouterName, 
            routingCriteria: routingCriteria, 
            tags: tags
        )
        return try await self.createPromptRouter(input, logger: logger)
    }

    /// Creates dedicated throughput for a base or custom model with the model units and for the duration that you specify. For pricing details, see Amazon Bedrock Pricing. For more information, see Provisioned Throughput in the Amazon Bedrock User Guide.
    @Sendable
    @inlinable
    public func createProvisionedModelThroughput(_ input: CreateProvisionedModelThroughputRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateProvisionedModelThroughputResponse {
        try await self.client.execute(
            operation: "CreateProvisionedModelThroughput", 
            path: "/provisioned-model-throughput", 
            httpMethod: .POST, 
            serviceConfig: self.config, 
            input: input, 
            logger: logger
        )
    }
    /// Creates dedicated throughput for a base or custom model with the model units and for the duration that you specify. For pricing details, see Amazon Bedrock Pricing. For more information, see Provisioned Throughput in the Amazon Bedrock User Guide.
    ///
    /// Parameters:
    ///   - clientRequestToken: A unique, case-sensitive identifier to ensure that the API request completes no more than one time. If this token matches a previous request, Amazon Bedrock ignores the request, but does not return an error. For more information, see Ensuring idempotency in the Amazon S3 User Guide.
    ///   - commitmentDuration: The commitment duration requested for the Provisioned Throughput. Billing occurs hourly and is discounted for longer commitment terms. To request a no-commit Provisioned Throughput, omit this field. Custom models support all levels of commitment. To see which base models support no commitment, see Supported regions and models for Provisioned Throughput in the Amazon Bedrock User Guide
    ///   - modelId: The Amazon Resource Name (ARN) or name of the model to associate with this Provisioned Throughput. For a list of models for which you can purchase Provisioned Throughput, see Amazon Bedrock model IDs for purchasing Provisioned Throughput in the Amazon Bedrock User Guide.
    ///   - modelUnits: Number of model units to allocate. A model unit delivers a specific throughput level for the specified model. The throughput level of a model unit specifies the total number of input and output tokens that it can process and generate within a span of one minute. By default, your account has no model units for purchasing Provisioned Throughputs with commitment. You must first visit the Amazon Web Services support center to request MUs. For model unit quotas, see Provisioned Throughput quotas in the Amazon Bedrock User Guide. For more information about what an MU specifies, contact your Amazon Web Services account manager.
    ///   - provisionedModelName: The name for this Provisioned Throughput.
    ///   - tags: Tags to associate with this Provisioned Throughput.
    ///   - logger: Logger use during operation
    @inlinable
    public func createProvisionedModelThroughput(
        clientRequestToken: String? = CreateProvisionedModelThroughputRequest.idempotencyToken(),
        commitmentDuration: CommitmentDuration? = nil,
        modelId: String,
        modelUnits: Int,
        provisionedModelName: String,
        tags: [Tag]? = nil,
        logger: Logger = AWSClient.loggingDisabled        
    ) async throws -> CreateProvisionedModelThroughputResponse {
        let input = CreateProvisionedModelThroughputRequest(
            clientRequestToken: clientRequestToken, 
            commitmentDuration: commitmentDuration, 
            modelId: modelId, 
            modelUnits: modelUnits, 
            provisionedModelName: provisionedModelName, 
            tags: tags
        )
        return try await self.createProvisionedModelThroughput(input, logger: logger)
    }

    /// Deletes a custom model that you created earlier. For more information, see Custom models in the Amazon Bedrock User Guide.
    @Sendable
    @inlinable
    public func deleteCustomModel(_ input: DeleteCustomModelRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DeleteCustomModelResponse {
        try await self.client.execute(
            operation: "DeleteCustomModel", 
            path: "/custom-models/{modelIdentifier}", 
            httpMethod: .DELETE, 
            serviceConfig: self.config, 
            input: input, 
            logger: logger
        )
    }
    /// Deletes a custom model that you created earlier. For more information, see Custom models in the Amazon Bedrock User Guide.
    ///
    /// Parameters:
    ///   - modelIdentifier: Name of the model to delete.
    ///   - logger: Logger use during operation
    @inlinable
    public func deleteCustomModel(
        modelIdentifier: String,
        logger: Logger = AWSClient.loggingDisabled        
    ) async throws -> DeleteCustomModelResponse {
        let input = DeleteCustomModelRequest(
            modelIdentifier: modelIdentifier
        )
        return try await self.deleteCustomModel(input, logger: logger)
    }

    /// Deletes a custom model deployment. This operation stops the deployment and removes it from your account. After deletion, the deployment ARN can no longer be used for inference requests. The following actions are related to the DeleteCustomModelDeployment operation:    CreateCustomModelDeployment     GetCustomModelDeployment     ListCustomModelDeployments
    @Sendable
    @inlinable
    public func deleteCustomModelDeployment(_ input: DeleteCustomModelDeploymentRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DeleteCustomModelDeploymentResponse {
        try await self.client.execute(
            operation: "DeleteCustomModelDeployment", 
            path: "/model-customization/custom-model-deployments/{customModelDeploymentIdentifier}", 
            httpMethod: .DELETE, 
            serviceConfig: self.config, 
            input: input, 
            logger: logger
        )
    }
    /// Deletes a custom model deployment. This operation stops the deployment and removes it from your account. After deletion, the deployment ARN can no longer be used for inference requests. The following actions are related to the DeleteCustomModelDeployment operation:    CreateCustomModelDeployment     GetCustomModelDeployment     ListCustomModelDeployments
    ///
    /// Parameters:
    ///   - customModelDeploymentIdentifier: The Amazon Resource Name (ARN) or name of the custom model deployment to delete.
    ///   - logger: Logger use during operation
    @inlinable
    public func deleteCustomModelDeployment(
        customModelDeploymentIdentifier: String,
        logger: Logger = AWSClient.loggingDisabled        
    ) async throws -> DeleteCustomModelDeploymentResponse {
        let input = DeleteCustomModelDeploymentRequest(
            customModelDeploymentIdentifier: customModelDeploymentIdentifier
        )
        return try await self.deleteCustomModelDeployment(input, logger: logger)
    }

    /// Delete the model access agreement for the specified model.
    @Sendable
    @inlinable
    public func deleteFoundationModelAgreement(_ input: DeleteFoundationModelAgreementRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DeleteFoundationModelAgreementResponse {
        try await self.client.execute(
            operation: "DeleteFoundationModelAgreement", 
            path: "/delete-foundation-model-agreement", 
            httpMethod: .POST, 
            serviceConfig: self.config, 
            input: input, 
            logger: logger
        )
    }
    /// Delete the model access agreement for the specified model.
    ///
    /// Parameters:
    ///   - modelId: Model Id of the model access to delete.
    ///   - logger: Logger use during operation
    @inlinable
    public func deleteFoundationModelAgreement(
        modelId: String,
        logger: Logger = AWSClient.loggingDisabled        
    ) async throws -> DeleteFoundationModelAgreementResponse {
        let input = DeleteFoundationModelAgreementRequest(
            modelId: modelId
        )
        return try await self.deleteFoundationModelAgreement(input, logger: logger)
    }

    /// Deletes a guardrail.   To delete a guardrail, only specify the ARN of the guardrail in the guardrailIdentifier field. If you delete a guardrail, all of its versions will be deleted.   To delete a version of a guardrail, specify the ARN of the guardrail in the guardrailIdentifier field and the version in the guardrailVersion field.
    @Sendable
    @inlinable
    public func deleteGuardrail(_ input: DeleteGuardrailRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DeleteGuardrailResponse {
        try await self.client.execute(
            operation: "DeleteGuardrail", 
            path: "/guardrails/{guardrailIdentifier}", 
            httpMethod: .DELETE, 
            serviceConfig: self.config, 
            input: input, 
            logger: logger
        )
    }
    /// Deletes a guardrail.   To delete a guardrail, only specify the ARN of the guardrail in the guardrailIdentifier field. If you delete a guardrail, all of its versions will be deleted.   To delete a version of a guardrail, specify the ARN of the guardrail in the guardrailIdentifier field and the version in the guardrailVersion field.
    ///
    /// Parameters:
    ///   - guardrailIdentifier: The unique identifier of the guardrail. This can be an ID or the ARN.
    ///   - guardrailVersion: The version of the guardrail.
    ///   - logger: Logger use during operation
    @inlinable
    public func deleteGuardrail(
        guardrailIdentifier: String,
        guardrailVersion: String? = nil,
        logger: Logger = AWSClient.loggingDisabled        
    ) async throws -> DeleteGuardrailResponse {
        let input = DeleteGuardrailRequest(
            guardrailIdentifier: guardrailIdentifier, 
            guardrailVersion: guardrailVersion
        )
        return try await self.deleteGuardrail(input, logger: logger)
    }

    /// Deletes a custom model that you imported earlier. For more information, see Import a customized model in the Amazon Bedrock User Guide.
    @Sendable
    @inlinable
    public func deleteImportedModel(_ input: DeleteImportedModelRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DeleteImportedModelResponse {
        try await self.client.execute(
            operation: "DeleteImportedModel", 
            path: "/imported-models/{modelIdentifier}", 
            httpMethod: .DELETE, 
            serviceConfig: self.config, 
            input: input, 
            logger: logger
        )
    }
    /// Deletes a custom model that you imported earlier. For more information, see Import a customized model in the Amazon Bedrock User Guide.
    ///
    /// Parameters:
    ///   - modelIdentifier: Name of the imported model to delete.
    ///   - logger: Logger use during operation
    @inlinable
    public func deleteImportedModel(
        modelIdentifier: String,
        logger: Logger = AWSClient.loggingDisabled        
    ) async throws -> DeleteImportedModelResponse {
        let input = DeleteImportedModelRequest(
            modelIdentifier: modelIdentifier
        )
        return try await self.deleteImportedModel(input, logger: logger)
    }

    /// Deletes an application inference profile. For more information, see Increase throughput and resilience with cross-region inference in Amazon Bedrock. in the Amazon Bedrock User Guide.
    @Sendable
    @inlinable
    public func deleteInferenceProfile(_ input: DeleteInferenceProfileRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DeleteInferenceProfileResponse {
        try await self.client.execute(
            operation: "DeleteInferenceProfile", 
            path: "/inference-profiles/{inferenceProfileIdentifier}", 
            httpMethod: .DELETE, 
            serviceConfig: self.config, 
            input: input, 
            logger: logger
        )
    }
    /// Deletes an application inference profile. For more information, see Increase throughput and resilience with cross-region inference in Amazon Bedrock. in the Amazon Bedrock User Guide.
    ///
    /// Parameters:
    ///   - inferenceProfileIdentifier: The Amazon Resource Name (ARN) or ID of the application inference profile to delete.
    ///   - logger: Logger use during operation
    @inlinable
    public func deleteInferenceProfile(
        inferenceProfileIdentifier: String,
        logger: Logger = AWSClient.loggingDisabled        
    ) async throws -> DeleteInferenceProfileResponse {
        let input = DeleteInferenceProfileRequest(
            inferenceProfileIdentifier: inferenceProfileIdentifier
        )
        return try await self.deleteInferenceProfile(input, logger: logger)
    }

    /// Deletes an endpoint for a model from Amazon Bedrock Marketplace.
    @Sendable
    @inlinable
    public func deleteMarketplaceModelEndpoint(_ input: DeleteMarketplaceModelEndpointRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DeleteMarketplaceModelEndpointResponse {
        try await self.client.execute(
            operation: "DeleteMarketplaceModelEndpoint", 
            path: "/marketplace-model/endpoints/{endpointArn}", 
            httpMethod: .DELETE, 
            serviceConfig: self.config, 
            input: input, 
            logger: logger
        )
    }
    /// Deletes an endpoint for a model from Amazon Bedrock Marketplace.
    ///
    /// Parameters:
    ///   - endpointArn: The Amazon Resource Name (ARN) of the endpoint you want to delete.
    ///   - logger: Logger use during operation
    @inlinable
    public func deleteMarketplaceModelEndpoint(
        endpointArn: String,
        logger: Logger = AWSClient.loggingDisabled        
    ) async throws -> DeleteMarketplaceModelEndpointResponse {
        let input = DeleteMarketplaceModelEndpointRequest(
            endpointArn: endpointArn
        )
        return try await self.deleteMarketplaceModelEndpoint(input, logger: logger)
    }

    /// Delete the invocation logging.
    @Sendable
    @inlinable
    public func deleteModelInvocationLoggingConfiguration(_ input: DeleteModelInvocationLoggingConfigurationRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DeleteModelInvocationLoggingConfigurationResponse {
        try await self.client.execute(
            operation: "DeleteModelInvocationLoggingConfiguration", 
            path: "/logging/modelinvocations", 
            httpMethod: .DELETE, 
            serviceConfig: self.config, 
            input: input, 
            logger: logger
        )
    }
    /// Delete the invocation logging.
    ///
    /// Parameters:
    ///   - logger: Logger use during operation
    @inlinable
    public func deleteModelInvocationLoggingConfiguration(
        logger: Logger = AWSClient.loggingDisabled        
    ) async throws -> DeleteModelInvocationLoggingConfigurationResponse {
        let input = DeleteModelInvocationLoggingConfigurationRequest(
        )
        return try await self.deleteModelInvocationLoggingConfiguration(input, logger: logger)
    }

    /// Deletes a specified prompt router. This action cannot be undone.
    @Sendable
    @inlinable
    public func deletePromptRouter(_ input: DeletePromptRouterRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DeletePromptRouterResponse {
        try await self.client.execute(
            operation: "DeletePromptRouter", 
            path: "/prompt-routers/{promptRouterArn}", 
            httpMethod: .DELETE, 
            serviceConfig: self.config, 
            input: input, 
            logger: logger
        )
    }
    /// Deletes a specified prompt router. This action cannot be undone.
    ///
    /// Parameters:
    ///   - promptRouterArn: The Amazon Resource Name (ARN) of the prompt router to delete.
    ///   - logger: Logger use during operation
    @inlinable
    public func deletePromptRouter(
        promptRouterArn: String,
        logger: Logger = AWSClient.loggingDisabled        
    ) async throws -> DeletePromptRouterResponse {
        let input = DeletePromptRouterRequest(
            promptRouterArn: promptRouterArn
        )
        return try await self.deletePromptRouter(input, logger: logger)
    }

    /// Deletes a Provisioned Throughput. You can't delete a Provisioned Throughput before the commitment term is over. For more information, see Provisioned Throughput in the Amazon Bedrock User Guide.
    @Sendable
    @inlinable
    public func deleteProvisionedModelThroughput(_ input: DeleteProvisionedModelThroughputRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DeleteProvisionedModelThroughputResponse {
        try await self.client.execute(
            operation: "DeleteProvisionedModelThroughput", 
            path: "/provisioned-model-throughput/{provisionedModelId}", 
            httpMethod: .DELETE, 
            serviceConfig: self.config, 
            input: input, 
            logger: logger
        )
    }
    /// Deletes a Provisioned Throughput. You can't delete a Provisioned Throughput before the commitment term is over. For more information, see Provisioned Throughput in the Amazon Bedrock User Guide.
    ///
    /// Parameters:
    ///   - provisionedModelId: The Amazon Resource Name (ARN) or name of the Provisioned Throughput.
    ///   - logger: Logger use during operation
    @inlinable
    public func deleteProvisionedModelThroughput(
        provisionedModelId: String,
        logger: Logger = AWSClient.loggingDisabled        
    ) async throws -> DeleteProvisionedModelThroughputResponse {
        let input = DeleteProvisionedModelThroughputRequest(
            provisionedModelId: provisionedModelId
        )
        return try await self.deleteProvisionedModelThroughput(input, logger: logger)
    }

    /// Deregisters an endpoint for a model from Amazon Bedrock Marketplace. This operation removes the endpoint's association with Amazon Bedrock but does not delete the underlying Amazon SageMaker endpoint.
    @Sendable
    @inlinable
    public func deregisterMarketplaceModelEndpoint(_ input: DeregisterMarketplaceModelEndpointRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DeregisterMarketplaceModelEndpointResponse {
        try await self.client.execute(
            operation: "DeregisterMarketplaceModelEndpoint", 
            path: "/marketplace-model/endpoints/{endpointArn}/registration", 
            httpMethod: .DELETE, 
            serviceConfig: self.config, 
            input: input, 
            logger: logger
        )
    }
    /// Deregisters an endpoint for a model from Amazon Bedrock Marketplace. This operation removes the endpoint's association with Amazon Bedrock but does not delete the underlying Amazon SageMaker endpoint.
    ///
    /// Parameters:
    ///   - endpointArn: The Amazon Resource Name (ARN) of the endpoint you want to deregister.
    ///   - logger: Logger use during operation
    @inlinable
    public func deregisterMarketplaceModelEndpoint(
        endpointArn: String,
        logger: Logger = AWSClient.loggingDisabled        
    ) async throws -> DeregisterMarketplaceModelEndpointResponse {
        let input = DeregisterMarketplaceModelEndpointRequest(
            endpointArn: endpointArn
        )
        return try await self.deregisterMarketplaceModelEndpoint(input, logger: logger)
    }

    /// Get the properties associated with a Amazon Bedrock custom model that you have created. For more information, see Custom models in the Amazon Bedrock User Guide.
    @Sendable
    @inlinable
    public func getCustomModel(_ input: GetCustomModelRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetCustomModelResponse {
        try await self.client.execute(
            operation: "GetCustomModel", 
            path: "/custom-models/{modelIdentifier}", 
            httpMethod: .GET, 
            serviceConfig: self.config, 
            input: input, 
            logger: logger
        )
    }
    /// Get the properties associated with a Amazon Bedrock custom model that you have created. For more information, see Custom models in the Amazon Bedrock User Guide.
    ///
    /// Parameters:
    ///   - modelIdentifier: Name or Amazon Resource Name (ARN) of the custom model.
    ///   - logger: Logger use during operation
    @inlinable
    public func getCustomModel(
        modelIdentifier: String,
        logger: Logger = AWSClient.loggingDisabled        
    ) async throws -> GetCustomModelResponse {
        let input = GetCustomModelRequest(
            modelIdentifier: modelIdentifier
        )
        return try await self.getCustomModel(input, logger: logger)
    }

    /// Retrieves information about a custom model deployment, including its status, configuration, and metadata. Use this operation to monitor the deployment status and retrieve details needed for inference requests. The following actions are related to the GetCustomModelDeployment operation:    CreateCustomModelDeployment     ListCustomModelDeployments     DeleteCustomModelDeployment
    @Sendable
    @inlinable
    public func getCustomModelDeployment(_ input: GetCustomModelDeploymentRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetCustomModelDeploymentResponse {
        try await self.client.execute(
            operation: "GetCustomModelDeployment", 
            path: "/model-customization/custom-model-deployments/{customModelDeploymentIdentifier}", 
            httpMethod: .GET, 
            serviceConfig: self.config, 
            input: input, 
            logger: logger
        )
    }
    /// Retrieves information about a custom model deployment, including its status, configuration, and metadata. Use this operation to monitor the deployment status and retrieve details needed for inference requests. The following actions are related to the GetCustomModelDeployment operation:    CreateCustomModelDeployment     ListCustomModelDeployments     DeleteCustomModelDeployment
    ///
    /// Parameters:
    ///   - customModelDeploymentIdentifier: The Amazon Resource Name (ARN) or name of the custom model deployment to retrieve information about.
    ///   - logger: Logger use during operation
    @inlinable
    public func getCustomModelDeployment(
        customModelDeploymentIdentifier: String,
        logger: Logger = AWSClient.loggingDisabled        
    ) async throws -> GetCustomModelDeploymentResponse {
        let input = GetCustomModelDeploymentRequest(
            customModelDeploymentIdentifier: customModelDeploymentIdentifier
        )
        return try await self.getCustomModelDeployment(input, logger: logger)
    }

    /// Gets information about an evaluation job, such as the status of the job.
    @Sendable
    @inlinable
    public func getEvaluationJob(_ input: GetEvaluationJobRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetEvaluationJobResponse {
        try await self.client.execute(
            operation: "GetEvaluationJob", 
            path: "/evaluation-jobs/{jobIdentifier}", 
            httpMethod: .GET, 
            serviceConfig: self.config, 
            input: input, 
            logger: logger
        )
    }
    /// Gets information about an evaluation job, such as the status of the job.
    ///
    /// Parameters:
    ///   - jobIdentifier: The Amazon Resource Name (ARN) of the evaluation job you want get information on.
    ///   - logger: Logger use during operation
    @inlinable
    public func getEvaluationJob(
        jobIdentifier: String,
        logger: Logger = AWSClient.loggingDisabled        
    ) async throws -> GetEvaluationJobResponse {
        let input = GetEvaluationJobRequest(
            jobIdentifier: jobIdentifier
        )
        return try await self.getEvaluationJob(input, logger: logger)
    }

    /// Get details about a Amazon Bedrock foundation model.
    @Sendable
    @inlinable
    public func getFoundationModel(_ input: GetFoundationModelRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetFoundationModelResponse {
        try await self.client.execute(
            operation: "GetFoundationModel", 
            path: "/foundation-models/{modelIdentifier}", 
            httpMethod: .GET, 
            serviceConfig: self.config, 
            input: input, 
            logger: logger
        )
    }
    /// Get details about a Amazon Bedrock foundation model.
    ///
    /// Parameters:
    ///   - modelIdentifier: The model identifier.
    ///   - logger: Logger use during operation
    @inlinable
    public func getFoundationModel(
        modelIdentifier: String,
        logger: Logger = AWSClient.loggingDisabled        
    ) async throws -> GetFoundationModelResponse {
        let input = GetFoundationModelRequest(
            modelIdentifier: modelIdentifier
        )
        return try await self.getFoundationModel(input, logger: logger)
    }

    /// Get information about the Foundation model availability.
    @Sendable
    @inlinable
    public func getFoundationModelAvailability(_ input: GetFoundationModelAvailabilityRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetFoundationModelAvailabilityResponse {
        try await self.client.execute(
            operation: "GetFoundationModelAvailability", 
            path: "/foundation-model-availability/{modelId}", 
            httpMethod: .GET, 
            serviceConfig: self.config, 
            input: input, 
            logger: logger
        )
    }
    /// Get information about the Foundation model availability.
    ///
    /// Parameters:
    ///   - modelId: The model Id of the foundation model.
    ///   - logger: Logger use during operation
    @inlinable
    public func getFoundationModelAvailability(
        modelId: String,
        logger: Logger = AWSClient.loggingDisabled        
    ) async throws -> GetFoundationModelAvailabilityResponse {
        let input = GetFoundationModelAvailabilityRequest(
            modelId: modelId
        )
        return try await self.getFoundationModelAvailability(input, logger: logger)
    }

    /// Gets details about a guardrail. If you don't specify a version, the response returns details for the DRAFT version.
    @Sendable
    @inlinable
    public func getGuardrail(_ input: GetGuardrailRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetGuardrailResponse {
        try await self.client.execute(
            operation: "GetGuardrail", 
            path: "/guardrails/{guardrailIdentifier}", 
            httpMethod: .GET, 
            serviceConfig: self.config, 
            input: input, 
            logger: logger
        )
    }
    /// Gets details about a guardrail. If you don't specify a version, the response returns details for the DRAFT version.
    ///
    /// Parameters:
    ///   - guardrailIdentifier: The unique identifier of the guardrail for which to get details. This can be an ID or the ARN.
    ///   - guardrailVersion: The version of the guardrail for which to get details. If you don't specify a version, the response returns details for the DRAFT version.
    ///   - logger: Logger use during operation
    @inlinable
    public func getGuardrail(
        guardrailIdentifier: String,
        guardrailVersion: String? = nil,
        logger: Logger = AWSClient.loggingDisabled        
    ) async throws -> GetGuardrailResponse {
        let input = GetGuardrailRequest(
            guardrailIdentifier: guardrailIdentifier, 
            guardrailVersion: guardrailVersion
        )
        return try await self.getGuardrail(input, logger: logger)
    }

    /// Gets properties associated with a customized model you imported.
    @Sendable
    @inlinable
    public func getImportedModel(_ input: GetImportedModelRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetImportedModelResponse {
        try await self.client.execute(
            operation: "GetImportedModel", 
            path: "/imported-models/{modelIdentifier}", 
            httpMethod: .GET, 
            serviceConfig: self.config, 
            input: input, 
            logger: logger
        )
    }
    /// Gets properties associated with a customized model you imported.
    ///
    /// Parameters:
    ///   - modelIdentifier: Name or Amazon Resource Name (ARN) of the imported model.
    ///   - logger: Logger use during operation
    @inlinable
    public func getImportedModel(
        modelIdentifier: String,
        logger: Logger = AWSClient.loggingDisabled        
    ) async throws -> GetImportedModelResponse {
        let input = GetImportedModelRequest(
            modelIdentifier: modelIdentifier
        )
        return try await self.getImportedModel(input, logger: logger)
    }

    /// Gets information about an inference profile. For more information, see Increase throughput and resilience with cross-region inference in Amazon Bedrock. in the Amazon Bedrock User Guide.
    @Sendable
    @inlinable
    public func getInferenceProfile(_ input: GetInferenceProfileRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetInferenceProfileResponse {
        try await self.client.execute(
            operation: "GetInferenceProfile", 
            path: "/inference-profiles/{inferenceProfileIdentifier}", 
            httpMethod: .GET, 
            serviceConfig: self.config, 
            input: input, 
            logger: logger
        )
    }
    /// Gets information about an inference profile. For more information, see Increase throughput and resilience with cross-region inference in Amazon Bedrock. in the Amazon Bedrock User Guide.
    ///
    /// Parameters:
    ///   - inferenceProfileIdentifier: The ID or Amazon Resource Name (ARN) of the inference profile.
    ///   - logger: Logger use during operation
    @inlinable
    public func getInferenceProfile(
        inferenceProfileIdentifier: String,
        logger: Logger = AWSClient.loggingDisabled        
    ) async throws -> GetInferenceProfileResponse {
        let input = GetInferenceProfileRequest(
            inferenceProfileIdentifier: inferenceProfileIdentifier
        )
        return try await self.getInferenceProfile(input, logger: logger)
    }

    /// Retrieves details about a specific endpoint for a model from Amazon Bedrock Marketplace.
    @Sendable
    @inlinable
    public func getMarketplaceModelEndpoint(_ input: GetMarketplaceModelEndpointRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetMarketplaceModelEndpointResponse {
        try await self.client.execute(
            operation: "GetMarketplaceModelEndpoint", 
            path: "/marketplace-model/endpoints/{endpointArn}", 
            httpMethod: .GET, 
            serviceConfig: self.config, 
            input: input, 
            logger: logger
        )
    }
    /// Retrieves details about a specific endpoint for a model from Amazon Bedrock Marketplace.
    ///
    /// Parameters:
    ///   - endpointArn: The Amazon Resource Name (ARN) of the endpoint you want to get information about.
    ///   - logger: Logger use during operation
    @inlinable
    public func getMarketplaceModelEndpoint(
        endpointArn: String,
        logger: Logger = AWSClient.loggingDisabled        
    ) async throws -> GetMarketplaceModelEndpointResponse {
        let input = GetMarketplaceModelEndpointRequest(
            endpointArn: endpointArn
        )
        return try await self.getMarketplaceModelEndpoint(input, logger: logger)
    }

    /// Retrieves information about a model copy job. For more information, see Copy models to be used in other regions in the Amazon Bedrock User Guide.
    @Sendable
    @inlinable
    public func getModelCopyJob(_ input: GetModelCopyJobRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetModelCopyJobResponse {
        try await self.client.execute(
            operation: "GetModelCopyJob", 
            path: "/model-copy-jobs/{jobArn}", 
            httpMethod: .GET, 
            serviceConfig: self.config, 
            input: input, 
            logger: logger
        )
    }
    /// Retrieves information about a model copy job. For more information, see Copy models to be used in other regions in the Amazon Bedrock User Guide.
    ///
    /// Parameters:
    ///   - jobArn: The Amazon Resource Name (ARN) of the model copy job.
    ///   - logger: Logger use during operation
    @inlinable
    public func getModelCopyJob(
        jobArn: String,
        logger: Logger = AWSClient.loggingDisabled        
    ) async throws -> GetModelCopyJobResponse {
        let input = GetModelCopyJobRequest(
            jobArn: jobArn
        )
        return try await self.getModelCopyJob(input, logger: logger)
    }

    /// Retrieves the properties associated with a model-customization job, including the status of the job. For more information, see Custom models in the Amazon Bedrock User Guide.
    @Sendable
    @inlinable
    public func getModelCustomizationJob(_ input: GetModelCustomizationJobRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetModelCustomizationJobResponse {
        try await self.client.execute(
            operation: "GetModelCustomizationJob", 
            path: "/model-customization-jobs/{jobIdentifier}", 
            httpMethod: .GET, 
            serviceConfig: self.config, 
            input: input, 
            logger: logger
        )
    }
    /// Retrieves the properties associated with a model-customization job, including the status of the job. For more information, see Custom models in the Amazon Bedrock User Guide.
    ///
    /// Parameters:
    ///   - jobIdentifier: Identifier for the customization job.
    ///   - logger: Logger use during operation
    @inlinable
    public func getModelCustomizationJob(
        jobIdentifier: String,
        logger: Logger = AWSClient.loggingDisabled        
    ) async throws -> GetModelCustomizationJobResponse {
        let input = GetModelCustomizationJobRequest(
            jobIdentifier: jobIdentifier
        )
        return try await self.getModelCustomizationJob(input, logger: logger)
    }

    /// Retrieves the properties associated with import model job, including the status of the job. For more information, see Import a customized model in the Amazon Bedrock User Guide.
    @Sendable
    @inlinable
    public func getModelImportJob(_ input: GetModelImportJobRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetModelImportJobResponse {
        try await self.client.execute(
            operation: "GetModelImportJob", 
            path: "/model-import-jobs/{jobIdentifier}", 
            httpMethod: .GET, 
            serviceConfig: self.config, 
            input: input, 
            logger: logger
        )
    }
    /// Retrieves the properties associated with import model job, including the status of the job. For more information, see Import a customized model in the Amazon Bedrock User Guide.
    ///
    /// Parameters:
    ///   - jobIdentifier: The identifier of the import job.
    ///   - logger: Logger use during operation
    @inlinable
    public func getModelImportJob(
        jobIdentifier: String,
        logger: Logger = AWSClient.loggingDisabled        
    ) async throws -> GetModelImportJobResponse {
        let input = GetModelImportJobRequest(
            jobIdentifier: jobIdentifier
        )
        return try await self.getModelImportJob(input, logger: logger)
    }

    /// Gets details about a batch inference job. For more information, see Monitor batch inference jobs
    @Sendable
    @inlinable
    public func getModelInvocationJob(_ input: GetModelInvocationJobRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetModelInvocationJobResponse {
        try await self.client.execute(
            operation: "GetModelInvocationJob", 
            path: "/model-invocation-job/{jobIdentifier}", 
            httpMethod: .GET, 
            serviceConfig: self.config, 
            input: input, 
            logger: logger
        )
    }
    /// Gets details about a batch inference job. For more information, see Monitor batch inference jobs
    ///
    /// Parameters:
    ///   - jobIdentifier: The Amazon Resource Name (ARN) of the batch inference job.
    ///   - logger: Logger use during operation
    @inlinable
    public func getModelInvocationJob(
        jobIdentifier: String,
        logger: Logger = AWSClient.loggingDisabled        
    ) async throws -> GetModelInvocationJobResponse {
        let input = GetModelInvocationJobRequest(
            jobIdentifier: jobIdentifier
        )
        return try await self.getModelInvocationJob(input, logger: logger)
    }

    /// Get the current configuration values for model invocation logging.
    @Sendable
    @inlinable
    public func getModelInvocationLoggingConfiguration(_ input: GetModelInvocationLoggingConfigurationRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetModelInvocationLoggingConfigurationResponse {
        try await self.client.execute(
            operation: "GetModelInvocationLoggingConfiguration", 
            path: "/logging/modelinvocations", 
            httpMethod: .GET, 
            serviceConfig: self.config, 
            input: input, 
            logger: logger
        )
    }
    /// Get the current configuration values for model invocation logging.
    ///
    /// Parameters:
    ///   - logger: Logger use during operation
    @inlinable
    public func getModelInvocationLoggingConfiguration(
        logger: Logger = AWSClient.loggingDisabled        
    ) async throws -> GetModelInvocationLoggingConfigurationResponse {
        let input = GetModelInvocationLoggingConfigurationRequest(
        )
        return try await self.getModelInvocationLoggingConfiguration(input, logger: logger)
    }

    /// Retrieves details about a prompt router.
    @Sendable
    @inlinable
    public func getPromptRouter(_ input: GetPromptRouterRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetPromptRouterResponse {
        try await self.client.execute(
            operation: "GetPromptRouter", 
            path: "/prompt-routers/{promptRouterArn}", 
            httpMethod: .GET, 
            serviceConfig: self.config, 
            input: input, 
            logger: logger
        )
    }
    /// Retrieves details about a prompt router.
    ///
    /// Parameters:
    ///   - promptRouterArn: The prompt router's ARN
    ///   - logger: Logger use during operation
    @inlinable
    public func getPromptRouter(
        promptRouterArn: String,
        logger: Logger = AWSClient.loggingDisabled        
    ) async throws -> GetPromptRouterResponse {
        let input = GetPromptRouterRequest(
            promptRouterArn: promptRouterArn
        )
        return try await self.getPromptRouter(input, logger: logger)
    }

    /// Returns details for a Provisioned Throughput. For more information, see Provisioned Throughput in the Amazon Bedrock User Guide.
    @Sendable
    @inlinable
    public func getProvisionedModelThroughput(_ input: GetProvisionedModelThroughputRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetProvisionedModelThroughputResponse {
        try await self.client.execute(
            operation: "GetProvisionedModelThroughput", 
            path: "/provisioned-model-throughput/{provisionedModelId}", 
            httpMethod: .GET, 
            serviceConfig: self.config, 
            input: input, 
            logger: logger
        )
    }
    /// Returns details for a Provisioned Throughput. For more information, see Provisioned Throughput in the Amazon Bedrock User Guide.
    ///
    /// Parameters:
    ///   - provisionedModelId: The Amazon Resource Name (ARN) or name of the Provisioned Throughput.
    ///   - logger: Logger use during operation
    @inlinable
    public func getProvisionedModelThroughput(
        provisionedModelId: String,
        logger: Logger = AWSClient.loggingDisabled        
    ) async throws -> GetProvisionedModelThroughputResponse {
        let input = GetProvisionedModelThroughputRequest(
            provisionedModelId: provisionedModelId
        )
        return try await self.getProvisionedModelThroughput(input, logger: logger)
    }

    /// Get usecase for model access.
    @Sendable
    @inlinable
    public func getUseCaseForModelAccess(_ input: GetUseCaseForModelAccessRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetUseCaseForModelAccessResponse {
        try await self.client.execute(
            operation: "GetUseCaseForModelAccess", 
            path: "/use-case-for-model-access", 
            httpMethod: .GET, 
            serviceConfig: self.config, 
            input: input, 
            logger: logger
        )
    }
    /// Get usecase for model access.
    ///
    /// Parameters:
    ///   - logger: Logger use during operation
    @inlinable
    public func getUseCaseForModelAccess(
        logger: Logger = AWSClient.loggingDisabled        
    ) async throws -> GetUseCaseForModelAccessResponse {
        let input = GetUseCaseForModelAccessRequest(
        )
        return try await self.getUseCaseForModelAccess(input, logger: logger)
    }

    /// Lists custom model deployments in your account. You can filter the results by creation time, name, status, and associated model. Use this operation to manage and monitor your custom model deployments. We recommend using pagination to ensure that the operation returns quickly and successfully. The following actions are related to the ListCustomModelDeployments operation:    CreateCustomModelDeployment     GetCustomModelDeployment     DeleteCustomModelDeployment
    @Sendable
    @inlinable
    public func listCustomModelDeployments(_ input: ListCustomModelDeploymentsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListCustomModelDeploymentsResponse {
        try await self.client.execute(
            operation: "ListCustomModelDeployments", 
            path: "/model-customization/custom-model-deployments", 
            httpMethod: .GET, 
            serviceConfig: self.config, 
            input: input, 
            logger: logger
        )
    }
    /// Lists custom model deployments in your account. You can filter the results by creation time, name, status, and associated model. Use this operation to manage and monitor your custom model deployments. We recommend using pagination to ensure that the operation returns quickly and successfully. The following actions are related to the ListCustomModelDeployments operation:    CreateCustomModelDeployment     GetCustomModelDeployment     DeleteCustomModelDeployment
    ///
    /// Parameters:
    ///   - createdAfter: Filters deployments created after the specified date and time.
    ///   - createdBefore: Filters deployments created before the specified date and time.
    ///   - maxResults: The maximum number of results to return in a single call.
    ///   - modelArnEquals: Filters deployments by the Amazon Resource Name (ARN) of the associated custom model.
    ///   - nameContains: Filters deployments whose names contain the specified string.
    ///   - nextToken: The token for the next set of results. Use this token to retrieve additional results when the response is truncated.
    ///   - sortBy: The field to sort the results by. The only supported value is CreationTime.
    ///   - sortOrder: The sort order for the results. Valid values are Ascending and Descending. Default is Descending.
    ///   - statusEquals: Filters deployments by status. Valid values are CREATING, ACTIVE, and FAILED.
    ///   - logger: Logger use during operation
    @inlinable
    public func listCustomModelDeployments(
        createdAfter: Date? = nil,
        createdBefore: Date? = nil,
        maxResults: Int? = nil,
        modelArnEquals: String? = nil,
        nameContains: String? = nil,
        nextToken: String? = nil,
        sortBy: SortModelsBy? = nil,
        sortOrder: SortOrder? = nil,
        statusEquals: CustomModelDeploymentStatus? = nil,
        logger: Logger = AWSClient.loggingDisabled        
    ) async throws -> ListCustomModelDeploymentsResponse {
        let input = ListCustomModelDeploymentsRequest(
            createdAfter: createdAfter, 
            createdBefore: createdBefore, 
            maxResults: maxResults, 
            modelArnEquals: modelArnEquals, 
            nameContains: nameContains, 
            nextToken: nextToken, 
            sortBy: sortBy, 
            sortOrder: sortOrder, 
            statusEquals: statusEquals
        )
        return try await self.listCustomModelDeployments(input, logger: logger)
    }

    /// Returns a list of the custom models that you have created with the CreateModelCustomizationJob operation. For more information, see Custom models in the Amazon Bedrock User Guide.
    @Sendable
    @inlinable
    public func listCustomModels(_ input: ListCustomModelsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListCustomModelsResponse {
        try await self.client.execute(
            operation: "ListCustomModels", 
            path: "/custom-models", 
            httpMethod: .GET, 
            serviceConfig: self.config, 
            input: input, 
            logger: logger
        )
    }
    /// Returns a list of the custom models that you have created with the CreateModelCustomizationJob operation. For more information, see Custom models in the Amazon Bedrock User Guide.
    ///
    /// Parameters:
    ///   - baseModelArnEquals: Return custom models only if the base model Amazon Resource Name (ARN) matches this parameter.
    ///   - creationTimeAfter: Return custom models created after the specified time.
    ///   - creationTimeBefore: Return custom models created before the specified time.
    ///   - foundationModelArnEquals: Return custom models only if the foundation model Amazon Resource Name (ARN) matches this parameter.
    ///   - isOwned: Return custom models depending on if the current account owns them (true) or if they were shared with the current account (false).
    ///   - maxResults: The maximum number of results to return in the response. If the total number of results is greater than this value, use the token returned in the response in the nextToken field when making another request to return the next batch of results.
    ///   - modelStatus: The status of them model to filter results by. Possible values include:    Creating - Include only models that are currently being created and validated.    Active - Include only models that have been successfully created and are ready for use.    Failed - Include only models where the creation process failed.   If you don't specify a status, the API returns models in all states.
    ///   - nameContains: Return custom models only if the job name contains these characters.
    ///   - nextToken: If the total number of results is greater than the maxResults value provided in the request, enter the token returned in the nextToken field in the response in this field to return the next batch of results.
    ///   - sortBy: The field to sort by in the returned list of models.
    ///   - sortOrder: The sort order of the results.
    ///   - logger: Logger use during operation
    @inlinable
    public func listCustomModels(
        baseModelArnEquals: String? = nil,
        creationTimeAfter: Date? = nil,
        creationTimeBefore: Date? = nil,
        foundationModelArnEquals: String? = nil,
        isOwned: Bool? = nil,
        maxResults: Int? = nil,
        modelStatus: ModelStatus? = nil,
        nameContains: String? = nil,
        nextToken: String? = nil,
        sortBy: SortModelsBy? = nil,
        sortOrder: SortOrder? = nil,
        logger: Logger = AWSClient.loggingDisabled        
    ) async throws -> ListCustomModelsResponse {
        let input = ListCustomModelsRequest(
            baseModelArnEquals: baseModelArnEquals, 
            creationTimeAfter: creationTimeAfter, 
            creationTimeBefore: creationTimeBefore, 
            foundationModelArnEquals: foundationModelArnEquals, 
            isOwned: isOwned, 
            maxResults: maxResults, 
            modelStatus: modelStatus, 
            nameContains: nameContains, 
            nextToken: nextToken, 
            sortBy: sortBy, 
            sortOrder: sortOrder
        )
        return try await self.listCustomModels(input, logger: logger)
    }

    /// Lists all existing evaluation jobs.
    @Sendable
    @inlinable
    public func listEvaluationJobs(_ input: ListEvaluationJobsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListEvaluationJobsResponse {
        try await self.client.execute(
            operation: "ListEvaluationJobs", 
            path: "/evaluation-jobs", 
            httpMethod: .GET, 
            serviceConfig: self.config, 
            input: input, 
            logger: logger
        )
    }
    /// Lists all existing evaluation jobs.
    ///
    /// Parameters:
    ///   - applicationTypeEquals: A filter to only list evaluation jobs that are either model evaluations or knowledge base evaluations.
    ///   - creationTimeAfter: A filter to only list evaluation jobs created after a specified time.
    ///   - creationTimeBefore: A filter to only list evaluation jobs created before a specified time.
    ///   - maxResults: The maximum number of results to return.
    ///   - nameContains: A filter to only list evaluation jobs that contain a specified string in the job name.
    ///   - nextToken: Continuation token from the previous response, for Amazon Bedrock to list the next set of results.
    ///   - sortBy: Specifies a creation time to sort the list of evaluation jobs by when they were created.
    ///   - sortOrder: Specifies whether to sort the list of evaluation jobs by either ascending or descending order.
    ///   - statusEquals: A filter to only list evaluation jobs that are of a certain status.
    ///   - logger: Logger use during operation
    @inlinable
    public func listEvaluationJobs(
        applicationTypeEquals: ApplicationType? = nil,
        creationTimeAfter: Date? = nil,
        creationTimeBefore: Date? = nil,
        maxResults: Int? = nil,
        nameContains: String? = nil,
        nextToken: String? = nil,
        sortBy: SortJobsBy? = nil,
        sortOrder: SortOrder? = nil,
        statusEquals: EvaluationJobStatus? = nil,
        logger: Logger = AWSClient.loggingDisabled        
    ) async throws -> ListEvaluationJobsResponse {
        let input = ListEvaluationJobsRequest(
            applicationTypeEquals: applicationTypeEquals, 
            creationTimeAfter: creationTimeAfter, 
            creationTimeBefore: creationTimeBefore, 
            maxResults: maxResults, 
            nameContains: nameContains, 
            nextToken: nextToken, 
            sortBy: sortBy, 
            sortOrder: sortOrder, 
            statusEquals: statusEquals
        )
        return try await self.listEvaluationJobs(input, logger: logger)
    }

    /// Get the offers associated with the specified model.
    @Sendable
    @inlinable
    public func listFoundationModelAgreementOffers(_ input: ListFoundationModelAgreementOffersRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListFoundationModelAgreementOffersResponse {
        try await self.client.execute(
            operation: "ListFoundationModelAgreementOffers", 
            path: "/list-foundation-model-agreement-offers/{modelId}", 
            httpMethod: .GET, 
            serviceConfig: self.config, 
            input: input, 
            logger: logger
        )
    }
    /// Get the offers associated with the specified model.
    ///
    /// Parameters:
    ///   - modelId: Model Id of the foundation model.
    ///   - offerType: Type of offer associated with the model.
    ///   - logger: Logger use during operation
    @inlinable
    public func listFoundationModelAgreementOffers(
        modelId: String,
        offerType: OfferType? = nil,
        logger: Logger = AWSClient.loggingDisabled        
    ) async throws -> ListFoundationModelAgreementOffersResponse {
        let input = ListFoundationModelAgreementOffersRequest(
            modelId: modelId, 
            offerType: offerType
        )
        return try await self.listFoundationModelAgreementOffers(input, logger: logger)
    }

    /// Lists Amazon Bedrock foundation models that you can use. You can filter the results with the request parameters. For more information, see Foundation models in the Amazon Bedrock User Guide.
    @Sendable
    @inlinable
    public func listFoundationModels(_ input: ListFoundationModelsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListFoundationModelsResponse {
        try await self.client.execute(
            operation: "ListFoundationModels", 
            path: "/foundation-models", 
            httpMethod: .GET, 
            serviceConfig: self.config, 
            input: input, 
            logger: logger
        )
    }
    /// Lists Amazon Bedrock foundation models that you can use. You can filter the results with the request parameters. For more information, see Foundation models in the Amazon Bedrock User Guide.
    ///
    /// Parameters:
    ///   - byCustomizationType: Return models that support the customization type that you specify. For more information, see Custom models in the Amazon Bedrock User Guide.
    ///   - byInferenceType: Return models that support the inference type that you specify. For more information, see Provisioned Throughput in the Amazon Bedrock User Guide.
    ///   - byOutputModality: Return models that support the output modality that you specify.
    ///   - byProvider: Return models belonging to the model provider that you specify.
    ///   - logger: Logger use during operation
    @inlinable
    public func listFoundationModels(
        byCustomizationType: ModelCustomization? = nil,
        byInferenceType: InferenceType? = nil,
        byOutputModality: ModelModality? = nil,
        byProvider: String? = nil,
        logger: Logger = AWSClient.loggingDisabled        
    ) async throws -> ListFoundationModelsResponse {
        let input = ListFoundationModelsRequest(
            byCustomizationType: byCustomizationType, 
            byInferenceType: byInferenceType, 
            byOutputModality: byOutputModality, 
            byProvider: byProvider
        )
        return try await self.listFoundationModels(input, logger: logger)
    }

    /// Lists details about all the guardrails in an account. To list the DRAFT version of all your guardrails, don't specify the guardrailIdentifier field. To list all versions of a guardrail, specify the ARN of the guardrail in the guardrailIdentifier field. You can set the maximum number of results to return in a response in the maxResults field. If there are more results than the number you set, the response returns a nextToken that you can send in another ListGuardrails request to see the next batch of results.
    @Sendable
    @inlinable
    public func listGuardrails(_ input: ListGuardrailsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListGuardrailsResponse {
        try await self.client.execute(
            operation: "ListGuardrails", 
            path: "/guardrails", 
            httpMethod: .GET, 
            serviceConfig: self.config, 
            input: input, 
            logger: logger
        )
    }
    /// Lists details about all the guardrails in an account. To list the DRAFT version of all your guardrails, don't specify the guardrailIdentifier field. To list all versions of a guardrail, specify the ARN of the guardrail in the guardrailIdentifier field. You can set the maximum number of results to return in a response in the maxResults field. If there are more results than the number you set, the response returns a nextToken that you can send in another ListGuardrails request to see the next batch of results.
    ///
    /// Parameters:
    ///   - guardrailIdentifier: The unique identifier of the guardrail. This can be an ID or the ARN.
    ///   - maxResults: The maximum number of results to return in the response.
    ///   - nextToken: If there are more results than were returned in the response, the response returns a nextToken that you can send in another ListGuardrails request to see the next batch of results.
    ///   - logger: Logger use during operation
    @inlinable
    public func listGuardrails(
        guardrailIdentifier: String? = nil,
        maxResults: Int? = nil,
        nextToken: String? = nil,
        logger: Logger = AWSClient.loggingDisabled        
    ) async throws -> ListGuardrailsResponse {
        let input = ListGuardrailsRequest(
            guardrailIdentifier: guardrailIdentifier, 
            maxResults: maxResults, 
            nextToken: nextToken
        )
        return try await self.listGuardrails(input, logger: logger)
    }

    /// Returns a list of models you've imported. You can filter the results to return based on one or more criteria. For more information, see Import a customized model in the Amazon Bedrock User Guide.
    @Sendable
    @inlinable
    public func listImportedModels(_ input: ListImportedModelsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListImportedModelsResponse {
        try await self.client.execute(
            operation: "ListImportedModels", 
            path: "/imported-models", 
            httpMethod: .GET, 
            serviceConfig: self.config, 
            input: input, 
            logger: logger
        )
    }
    /// Returns a list of models you've imported. You can filter the results to return based on one or more criteria. For more information, see Import a customized model in the Amazon Bedrock User Guide.
    ///
    /// Parameters:
    ///   - creationTimeAfter: Return imported models that were created after the specified time.
    ///   - creationTimeBefore: Return imported models that created before the specified time.
    ///   - maxResults: The maximum number of results to return in the response. If the total number of results is greater than this value, use the token returned in the response in the nextToken field when making another request to return the next batch of results.
    ///   - nameContains: Return imported models only if the model name contains these characters.
    ///   - nextToken: If the total number of results is greater than the maxResults value provided in the request, enter the token returned in the nextToken field in the response in this field to return the next batch of results.
    ///   - sortBy: The field to sort by in the returned list of imported models.
    ///   - sortOrder: Specifies whetehr to sort the results in ascending or descending order.
    ///   - logger: Logger use during operation
    @inlinable
    public func listImportedModels(
        creationTimeAfter: Date? = nil,
        creationTimeBefore: Date? = nil,
        maxResults: Int? = nil,
        nameContains: String? = nil,
        nextToken: String? = nil,
        sortBy: SortModelsBy? = nil,
        sortOrder: SortOrder? = nil,
        logger: Logger = AWSClient.loggingDisabled        
    ) async throws -> ListImportedModelsResponse {
        let input = ListImportedModelsRequest(
            creationTimeAfter: creationTimeAfter, 
            creationTimeBefore: creationTimeBefore, 
            maxResults: maxResults, 
            nameContains: nameContains, 
            nextToken: nextToken, 
            sortBy: sortBy, 
            sortOrder: sortOrder
        )
        return try await self.listImportedModels(input, logger: logger)
    }

    /// Returns a list of inference profiles that you can use. For more information, see Increase throughput and resilience with cross-region inference in Amazon Bedrock. in the Amazon Bedrock User Guide.
    @Sendable
    @inlinable
    public func listInferenceProfiles(_ input: ListInferenceProfilesRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListInferenceProfilesResponse {
        try await self.client.execute(
            operation: "ListInferenceProfiles", 
            path: "/inference-profiles", 
            httpMethod: .GET, 
            serviceConfig: self.config, 
            input: input, 
            logger: logger
        )
    }
    /// Returns a list of inference profiles that you can use. For more information, see Increase throughput and resilience with cross-region inference in Amazon Bedrock. in the Amazon Bedrock User Guide.
    ///
    /// Parameters:
    ///   - maxResults: The maximum number of results to return in the response. If the total number of results is greater than this value, use the token returned in the response in the nextToken field when making another request to return the next batch of results.
    ///   - nextToken: If the total number of results is greater than the maxResults value provided in the request, enter the token returned in the nextToken field in the response in this field to return the next batch of results.
    ///   - typeEquals: Filters for inference profiles that match the type you specify.    SYSTEM_DEFINED – The inference profile is defined by Amazon Bedrock. You can route inference requests across regions with these inference profiles.    APPLICATION – The inference profile was created by a user. This type of inference profile can track metrics and costs when invoking the model in it. The inference profile may route requests to one or multiple regions.
    ///   - logger: Logger use during operation
    @inlinable
    public func listInferenceProfiles(
        maxResults: Int? = nil,
        nextToken: String? = nil,
        typeEquals: InferenceProfileType? = nil,
        logger: Logger = AWSClient.loggingDisabled        
    ) async throws -> ListInferenceProfilesResponse {
        let input = ListInferenceProfilesRequest(
            maxResults: maxResults, 
            nextToken: nextToken, 
            typeEquals: typeEquals
        )
        return try await self.listInferenceProfiles(input, logger: logger)
    }

    /// Lists the endpoints for models from Amazon Bedrock Marketplace in your Amazon Web Services account.
    @Sendable
    @inlinable
    public func listMarketplaceModelEndpoints(_ input: ListMarketplaceModelEndpointsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListMarketplaceModelEndpointsResponse {
        try await self.client.execute(
            operation: "ListMarketplaceModelEndpoints", 
            path: "/marketplace-model/endpoints", 
            httpMethod: .GET, 
            serviceConfig: self.config, 
            input: input, 
            logger: logger
        )
    }
    /// Lists the endpoints for models from Amazon Bedrock Marketplace in your Amazon Web Services account.
    ///
    /// Parameters:
    ///   - maxResults: The maximum number of results to return in a single call. If more results are available, the operation returns a NextToken value.
    ///   - modelSourceEquals: If specified, only endpoints for the given model source identifier are returned.
    ///   - nextToken: The token for the next set of results. You receive this token from a previous ListMarketplaceModelEndpoints call.
    ///   - logger: Logger use during operation
    @inlinable
    public func listMarketplaceModelEndpoints(
        maxResults: Int? = nil,
        modelSourceEquals: String? = nil,
        nextToken: String? = nil,
        logger: Logger = AWSClient.loggingDisabled        
    ) async throws -> ListMarketplaceModelEndpointsResponse {
        let input = ListMarketplaceModelEndpointsRequest(
            maxResults: maxResults, 
            modelSourceEquals: modelSourceEquals, 
            nextToken: nextToken
        )
        return try await self.listMarketplaceModelEndpoints(input, logger: logger)
    }

    /// Returns a list of model copy jobs that you have submitted. You can filter the jobs to return based on one or more criteria. For more information, see Copy models to be used in other regions in the Amazon Bedrock User Guide.
    @Sendable
    @inlinable
    public func listModelCopyJobs(_ input: ListModelCopyJobsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListModelCopyJobsResponse {
        try await self.client.execute(
            operation: "ListModelCopyJobs", 
            path: "/model-copy-jobs", 
            httpMethod: .GET, 
            serviceConfig: self.config, 
            input: input, 
            logger: logger
        )
    }
    /// Returns a list of model copy jobs that you have submitted. You can filter the jobs to return based on one or more criteria. For more information, see Copy models to be used in other regions in the Amazon Bedrock User Guide.
    ///
    /// Parameters:
    ///   - creationTimeAfter: Filters for model copy jobs created after the specified time.
    ///   - creationTimeBefore: Filters for model copy jobs created before the specified time.
    ///   - maxResults: The maximum number of results to return in the response. If the total number of results is greater than this value, use the token returned in the response in the nextToken field when making another request to return the next batch of results.
    ///   - nextToken: If the total number of results is greater than the maxResults value provided in the request, enter the token returned in the nextToken field in the response in this field to return the next batch of results.
    ///   - sortBy: The field to sort by in the returned list of model copy jobs.
    ///   - sortOrder: Specifies whether to sort the results in ascending or descending order.
    ///   - sourceAccountEquals: Filters for model copy jobs in which the account that the source model belongs to is equal to the value that you specify.
    ///   - sourceModelArnEquals: Filters for model copy jobs in which the Amazon Resource Name (ARN) of the source model to is equal to the value that you specify.
    ///   - statusEquals: Filters for model copy jobs whose status matches the value that you specify.
    ///   - targetModelNameContains: Filters for model copy jobs in which the name of the copied model contains the string that you specify.
    ///   - logger: Logger use during operation
    @inlinable
    public func listModelCopyJobs(
        creationTimeAfter: Date? = nil,
        creationTimeBefore: Date? = nil,
        maxResults: Int? = nil,
        nextToken: String? = nil,
        sortBy: SortJobsBy? = nil,
        sortOrder: SortOrder? = nil,
        sourceAccountEquals: String? = nil,
        sourceModelArnEquals: String? = nil,
        statusEquals: ModelCopyJobStatus? = nil,
        targetModelNameContains: String? = nil,
        logger: Logger = AWSClient.loggingDisabled        
    ) async throws -> ListModelCopyJobsResponse {
        let input = ListModelCopyJobsRequest(
            creationTimeAfter: creationTimeAfter, 
            creationTimeBefore: creationTimeBefore, 
            maxResults: maxResults, 
            nextToken: nextToken, 
            sortBy: sortBy, 
            sortOrder: sortOrder, 
            sourceAccountEquals: sourceAccountEquals, 
            sourceModelArnEquals: sourceModelArnEquals, 
            statusEquals: statusEquals, 
            targetModelNameContains: targetModelNameContains
        )
        return try await self.listModelCopyJobs(input, logger: logger)
    }

    /// Returns a list of model customization jobs that you have submitted. You can filter the jobs to return based on one or more criteria. For more information, see Custom models in the Amazon Bedrock User Guide.
    @Sendable
    @inlinable
    public func listModelCustomizationJobs(_ input: ListModelCustomizationJobsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListModelCustomizationJobsResponse {
        try await self.client.execute(
            operation: "ListModelCustomizationJobs", 
            path: "/model-customization-jobs", 
            httpMethod: .GET, 
            serviceConfig: self.config, 
            input: input, 
            logger: logger
        )
    }
    /// Returns a list of model customization jobs that you have submitted. You can filter the jobs to return based on one or more criteria. For more information, see Custom models in the Amazon Bedrock User Guide.
    ///
    /// Parameters:
    ///   - creationTimeAfter: Return customization jobs created after the specified time.
    ///   - creationTimeBefore: Return customization jobs created before the specified time.
    ///   - maxResults: The maximum number of results to return in the response. If the total number of results is greater than this value, use the token returned in the response in the nextToken field when making another request to return the next batch of results.
    ///   - nameContains: Return customization jobs only if the job name contains these characters.
    ///   - nextToken: If the total number of results is greater than the maxResults value provided in the request, enter the token returned in the nextToken field in the response in this field to return the next batch of results.
    ///   - sortBy: The field to sort by in the returned list of jobs.
    ///   - sortOrder: The sort order of the results.
    ///   - statusEquals: Return customization jobs with the specified status.
    ///   - logger: Logger use during operation
    @inlinable
    public func listModelCustomizationJobs(
        creationTimeAfter: Date? = nil,
        creationTimeBefore: Date? = nil,
        maxResults: Int? = nil,
        nameContains: String? = nil,
        nextToken: String? = nil,
        sortBy: SortJobsBy? = nil,
        sortOrder: SortOrder? = nil,
        statusEquals: FineTuningJobStatus? = nil,
        logger: Logger = AWSClient.loggingDisabled        
    ) async throws -> ListModelCustomizationJobsResponse {
        let input = ListModelCustomizationJobsRequest(
            creationTimeAfter: creationTimeAfter, 
            creationTimeBefore: creationTimeBefore, 
            maxResults: maxResults, 
            nameContains: nameContains, 
            nextToken: nextToken, 
            sortBy: sortBy, 
            sortOrder: sortOrder, 
            statusEquals: statusEquals
        )
        return try await self.listModelCustomizationJobs(input, logger: logger)
    }

    /// Returns a list of import jobs you've submitted. You can filter the results to return based on one or more criteria. For more information, see Import a customized model in the Amazon Bedrock User Guide.
    @Sendable
    @inlinable
    public func listModelImportJobs(_ input: ListModelImportJobsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListModelImportJobsResponse {
        try await self.client.execute(
            operation: "ListModelImportJobs", 
            path: "/model-import-jobs", 
            httpMethod: .GET, 
            serviceConfig: self.config, 
            input: input, 
            logger: logger
        )
    }
    /// Returns a list of import jobs you've submitted. You can filter the results to return based on one or more criteria. For more information, see Import a customized model in the Amazon Bedrock User Guide.
    ///
    /// Parameters:
    ///   - creationTimeAfter: Return import jobs that were created after the specified time.
    ///   - creationTimeBefore: Return import jobs that were created before the specified time.
    ///   - maxResults: The maximum number of results to return in the response. If the total number of results is greater than this value, use the token returned in the response in the nextToken field when making another request to return the next batch of results.
    ///   - nameContains: Return imported jobs only if the job name contains these characters.
    ///   - nextToken: If the total number of results is greater than the maxResults value provided in the request, enter the token returned in the nextToken field in the response in this field to return the next batch of results.
    ///   - sortBy: The field to sort by in the returned list of imported jobs.
    ///   - sortOrder: Specifies whether to sort the results in ascending or descending order.
    ///   - statusEquals: Return imported jobs with the specified status.
    ///   - logger: Logger use during operation
    @inlinable
    public func listModelImportJobs(
        creationTimeAfter: Date? = nil,
        creationTimeBefore: Date? = nil,
        maxResults: Int? = nil,
        nameContains: String? = nil,
        nextToken: String? = nil,
        sortBy: SortJobsBy? = nil,
        sortOrder: SortOrder? = nil,
        statusEquals: ModelImportJobStatus? = nil,
        logger: Logger = AWSClient.loggingDisabled        
    ) async throws -> ListModelImportJobsResponse {
        let input = ListModelImportJobsRequest(
            creationTimeAfter: creationTimeAfter, 
            creationTimeBefore: creationTimeBefore, 
            maxResults: maxResults, 
            nameContains: nameContains, 
            nextToken: nextToken, 
            sortBy: sortBy, 
            sortOrder: sortOrder, 
            statusEquals: statusEquals
        )
        return try await self.listModelImportJobs(input, logger: logger)
    }

    /// Lists all batch inference jobs in the account. For more information, see View details about a batch inference job.
    @Sendable
    @inlinable
    public func listModelInvocationJobs(_ input: ListModelInvocationJobsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListModelInvocationJobsResponse {
        try await self.client.execute(
            operation: "ListModelInvocationJobs", 
            path: "/model-invocation-jobs", 
            httpMethod: .GET, 
            serviceConfig: self.config, 
            input: input, 
            logger: logger
        )
    }
    /// Lists all batch inference jobs in the account. For more information, see View details about a batch inference job.
    ///
    /// Parameters:
    ///   - maxResults: The maximum number of results to return. If there are more results than the number that you specify, a nextToken value is returned. Use the nextToken in a request to return the next batch of results.
    ///   - nameContains: Specify a string to filter for batch inference jobs whose names contain the string.
    ///   - nextToken: If there were more results than the value you specified in the maxResults field in a previous ListModelInvocationJobs request, the response would have returned a nextToken value. To see the next batch of results, send the nextToken value in another request.
    ///   - sortBy: An attribute by which to sort the results.
    ///   - sortOrder: Specifies whether to sort the results by ascending or descending order.
    ///   - statusEquals: Specify a status to filter for batch inference jobs whose statuses match the string you specify. The following statuses are possible:   Submitted – This job has been submitted to a queue for validation.   Validating – This job is being validated for the requirements described in Format and upload your batch inference data. The criteria include the following:   Your IAM service role has access to the Amazon S3 buckets containing your files.   Your files are .jsonl files and each individual record is a JSON object in the correct format. Note that validation doesn't check if the modelInput value matches the request body for the model.   Your files fulfill the requirements for file size and number of records. For more information, see Quotas for Amazon Bedrock.     Scheduled – This job has been validated and is now in a queue. The job will automatically start when it reaches its turn.   Expired – This job timed out because it was scheduled but didn't begin before the set timeout duration. Submit a new job request.   InProgress – This job has begun. You can start viewing the results in the output S3 location.   Completed – This job has successfully completed. View the output files in the output S3 location.   PartiallyCompleted – This job has partially completed. Not all of your records could be processed in time. View the output files in the output S3 location.   Failed – This job has failed. Check the failure message for any further details. For further assistance, reach out to the Amazon Web ServicesSupport Center.   Stopped – This job was stopped by a user.   Stopping – This job is being stopped by a user.
    ///   - submitTimeAfter: Specify a time to filter for batch inference jobs that were submitted after the time you specify.
    ///   - submitTimeBefore: Specify a time to filter for batch inference jobs that were submitted before the time you specify.
    ///   - logger: Logger use during operation
    @inlinable
    public func listModelInvocationJobs(
        maxResults: Int? = nil,
        nameContains: String? = nil,
        nextToken: String? = nil,
        sortBy: SortJobsBy? = nil,
        sortOrder: SortOrder? = nil,
        statusEquals: ModelInvocationJobStatus? = nil,
        submitTimeAfter: Date? = nil,
        submitTimeBefore: Date? = nil,
        logger: Logger = AWSClient.loggingDisabled        
    ) async throws -> ListModelInvocationJobsResponse {
        let input = ListModelInvocationJobsRequest(
            maxResults: maxResults, 
            nameContains: nameContains, 
            nextToken: nextToken, 
            sortBy: sortBy, 
            sortOrder: sortOrder, 
            statusEquals: statusEquals, 
            submitTimeAfter: submitTimeAfter, 
            submitTimeBefore: submitTimeBefore
        )
        return try await self.listModelInvocationJobs(input, logger: logger)
    }

    /// Retrieves a list of prompt routers.
    @Sendable
    @inlinable
    public func listPromptRouters(_ input: ListPromptRoutersRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListPromptRoutersResponse {
        try await self.client.execute(
            operation: "ListPromptRouters", 
            path: "/prompt-routers", 
            httpMethod: .GET, 
            serviceConfig: self.config, 
            input: input, 
            logger: logger
        )
    }
    /// Retrieves a list of prompt routers.
    ///
    /// Parameters:
    ///   - maxResults: The maximum number of prompt routers to return in one page of results.
    ///   - nextToken: Specify the pagination token from a previous request to retrieve the next page of results.
    ///   - type: The type of the prompt routers, such as whether it's default or custom.
    ///   - logger: Logger use during operation
    @inlinable
    public func listPromptRouters(
        maxResults: Int? = nil,
        nextToken: String? = nil,
        type: PromptRouterType? = nil,
        logger: Logger = AWSClient.loggingDisabled        
    ) async throws -> ListPromptRoutersResponse {
        let input = ListPromptRoutersRequest(
            maxResults: maxResults, 
            nextToken: nextToken, 
            type: type
        )
        return try await self.listPromptRouters(input, logger: logger)
    }

    /// Lists the Provisioned Throughputs in the account. For more information, see Provisioned Throughput in the Amazon Bedrock User Guide.
    @Sendable
    @inlinable
    public func listProvisionedModelThroughputs(_ input: ListProvisionedModelThroughputsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListProvisionedModelThroughputsResponse {
        try await self.client.execute(
            operation: "ListProvisionedModelThroughputs", 
            path: "/provisioned-model-throughputs", 
            httpMethod: .GET, 
            serviceConfig: self.config, 
            input: input, 
            logger: logger
        )
    }
    /// Lists the Provisioned Throughputs in the account. For more information, see Provisioned Throughput in the Amazon Bedrock User Guide.
    ///
    /// Parameters:
    ///   - creationTimeAfter: A filter that returns Provisioned Throughputs created after the specified time.
    ///   - creationTimeBefore: A filter that returns Provisioned Throughputs created before the specified time.
    ///   - maxResults: THe maximum number of results to return in the response. If there are more results than the number you specified, the response returns a nextToken value. To see the next batch of results, send the nextToken value in another list request.
    ///   - modelArnEquals: A filter that returns Provisioned Throughputs whose model Amazon Resource Name (ARN) is equal to the value that you specify.
    ///   - nameContains: A filter that returns Provisioned Throughputs if their name contains the expression that you specify.
    ///   - nextToken: If there are more results than the number you specified in the maxResults field, the response returns a nextToken value. To see the next batch of results, specify the nextToken value in this field.
    ///   - sortBy: The field by which to sort the returned list of Provisioned Throughputs.
    ///   - sortOrder: The sort order of the results.
    ///   - statusEquals: A filter that returns Provisioned Throughputs if their statuses matches the value that you specify.
    ///   - logger: Logger use during operation
    @inlinable
    public func listProvisionedModelThroughputs(
        creationTimeAfter: Date? = nil,
        creationTimeBefore: Date? = nil,
        maxResults: Int? = nil,
        modelArnEquals: String? = nil,
        nameContains: String? = nil,
        nextToken: String? = nil,
        sortBy: SortByProvisionedModels? = nil,
        sortOrder: SortOrder? = nil,
        statusEquals: ProvisionedModelStatus? = nil,
        logger: Logger = AWSClient.loggingDisabled        
    ) async throws -> ListProvisionedModelThroughputsResponse {
        let input = ListProvisionedModelThroughputsRequest(
            creationTimeAfter: creationTimeAfter, 
            creationTimeBefore: creationTimeBefore, 
            maxResults: maxResults, 
            modelArnEquals: modelArnEquals, 
            nameContains: nameContains, 
            nextToken: nextToken, 
            sortBy: sortBy, 
            sortOrder: sortOrder, 
            statusEquals: statusEquals
        )
        return try await self.listProvisionedModelThroughputs(input, logger: logger)
    }

    /// List the tags associated with the specified resource. For more information, see Tagging resources in the Amazon Bedrock User Guide.
    @Sendable
    @inlinable
    public func listTagsForResource(_ input: ListTagsForResourceRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListTagsForResourceResponse {
        try await self.client.execute(
            operation: "ListTagsForResource", 
            path: "/listTagsForResource", 
            httpMethod: .POST, 
            serviceConfig: self.config, 
            input: input, 
            logger: logger
        )
    }
    /// List the tags associated with the specified resource. For more information, see Tagging resources in the Amazon Bedrock User Guide.
    ///
    /// Parameters:
    ///   - resourceARN: The Amazon Resource Name (ARN) of the resource.
    ///   - logger: Logger use during operation
    @inlinable
    public func listTagsForResource(
        resourceARN: String,
        logger: Logger = AWSClient.loggingDisabled        
    ) async throws -> ListTagsForResourceResponse {
        let input = ListTagsForResourceRequest(
            resourceARN: resourceARN
        )
        return try await self.listTagsForResource(input, logger: logger)
    }

    /// Set the configuration values for model invocation logging.
    @Sendable
    @inlinable
    public func putModelInvocationLoggingConfiguration(_ input: PutModelInvocationLoggingConfigurationRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> PutModelInvocationLoggingConfigurationResponse {
        try await self.client.execute(
            operation: "PutModelInvocationLoggingConfiguration", 
            path: "/logging/modelinvocations", 
            httpMethod: .PUT, 
            serviceConfig: self.config, 
            input: input, 
            logger: logger
        )
    }
    /// Set the configuration values for model invocation logging.
    ///
    /// Parameters:
    ///   - loggingConfig: The logging configuration values to set.
    ///   - logger: Logger use during operation
    @inlinable
    public func putModelInvocationLoggingConfiguration(
        loggingConfig: LoggingConfig,
        logger: Logger = AWSClient.loggingDisabled        
    ) async throws -> PutModelInvocationLoggingConfigurationResponse {
        let input = PutModelInvocationLoggingConfigurationRequest(
            loggingConfig: loggingConfig
        )
        return try await self.putModelInvocationLoggingConfiguration(input, logger: logger)
    }

    /// Put usecase for model access.
    @Sendable
    @inlinable
    public func putUseCaseForModelAccess(_ input: PutUseCaseForModelAccessRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> PutUseCaseForModelAccessResponse {
        try await self.client.execute(
            operation: "PutUseCaseForModelAccess", 
            path: "/use-case-for-model-access", 
            httpMethod: .POST, 
            serviceConfig: self.config, 
            input: input, 
            logger: logger
        )
    }
    /// Put usecase for model access.
    ///
    /// Parameters:
    ///   - formData: Put customer profile Request.
    ///   - logger: Logger use during operation
    @inlinable
    public func putUseCaseForModelAccess(
        formData: AWSBase64Data,
        logger: Logger = AWSClient.loggingDisabled        
    ) async throws -> PutUseCaseForModelAccessResponse {
        let input = PutUseCaseForModelAccessRequest(
            formData: formData
        )
        return try await self.putUseCaseForModelAccess(input, logger: logger)
    }

    /// Registers an existing Amazon SageMaker endpoint with Amazon Bedrock Marketplace, allowing it to be used with Amazon Bedrock APIs.
    @Sendable
    @inlinable
    public func registerMarketplaceModelEndpoint(_ input: RegisterMarketplaceModelEndpointRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> RegisterMarketplaceModelEndpointResponse {
        try await self.client.execute(
            operation: "RegisterMarketplaceModelEndpoint", 
            path: "/marketplace-model/endpoints/{endpointIdentifier}/registration", 
            httpMethod: .POST, 
            serviceConfig: self.config, 
            input: input, 
            logger: logger
        )
    }
    /// Registers an existing Amazon SageMaker endpoint with Amazon Bedrock Marketplace, allowing it to be used with Amazon Bedrock APIs.
    ///
    /// Parameters:
    ///   - endpointIdentifier: The ARN of the Amazon SageMaker endpoint you want to register with Amazon Bedrock Marketplace.
    ///   - modelSourceIdentifier: The ARN of the model from Amazon Bedrock Marketplace that is deployed on the endpoint.
    ///   - logger: Logger use during operation
    @inlinable
    public func registerMarketplaceModelEndpoint(
        endpointIdentifier: String,
        modelSourceIdentifier: String,
        logger: Logger = AWSClient.loggingDisabled        
    ) async throws -> RegisterMarketplaceModelEndpointResponse {
        let input = RegisterMarketplaceModelEndpointRequest(
            endpointIdentifier: endpointIdentifier, 
            modelSourceIdentifier: modelSourceIdentifier
        )
        return try await self.registerMarketplaceModelEndpoint(input, logger: logger)
    }

    /// Stops an evaluation job that is current being created or running.
    @Sendable
    @inlinable
    public func stopEvaluationJob(_ input: StopEvaluationJobRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> StopEvaluationJobResponse {
        try await self.client.execute(
            operation: "StopEvaluationJob", 
            path: "/evaluation-job/{jobIdentifier}/stop", 
            httpMethod: .POST, 
            serviceConfig: self.config, 
            input: input, 
            logger: logger
        )
    }
    /// Stops an evaluation job that is current being created or running.
    ///
    /// Parameters:
    ///   - jobIdentifier: The Amazon Resource Name (ARN) of the evaluation job you want to stop.
    ///   - logger: Logger use during operation
    @inlinable
    public func stopEvaluationJob(
        jobIdentifier: String,
        logger: Logger = AWSClient.loggingDisabled        
    ) async throws -> StopEvaluationJobResponse {
        let input = StopEvaluationJobRequest(
            jobIdentifier: jobIdentifier
        )
        return try await self.stopEvaluationJob(input, logger: logger)
    }

    /// Stops an active model customization job. For more information, see Custom models in the Amazon Bedrock User Guide.
    @Sendable
    @inlinable
    public func stopModelCustomizationJob(_ input: StopModelCustomizationJobRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> StopModelCustomizationJobResponse {
        try await self.client.execute(
            operation: "StopModelCustomizationJob", 
            path: "/model-customization-jobs/{jobIdentifier}/stop", 
            httpMethod: .POST, 
            serviceConfig: self.config, 
            input: input, 
            logger: logger
        )
    }
    /// Stops an active model customization job. For more information, see Custom models in the Amazon Bedrock User Guide.
    ///
    /// Parameters:
    ///   - jobIdentifier: Job identifier of the job to stop.
    ///   - logger: Logger use during operation
    @inlinable
    public func stopModelCustomizationJob(
        jobIdentifier: String,
        logger: Logger = AWSClient.loggingDisabled        
    ) async throws -> StopModelCustomizationJobResponse {
        let input = StopModelCustomizationJobRequest(
            jobIdentifier: jobIdentifier
        )
        return try await self.stopModelCustomizationJob(input, logger: logger)
    }

    /// Stops a batch inference job. You're only charged for tokens that were already processed. For more information, see Stop a batch inference job.
    @Sendable
    @inlinable
    public func stopModelInvocationJob(_ input: StopModelInvocationJobRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> StopModelInvocationJobResponse {
        try await self.client.execute(
            operation: "StopModelInvocationJob", 
            path: "/model-invocation-job/{jobIdentifier}/stop", 
            httpMethod: .POST, 
            serviceConfig: self.config, 
            input: input, 
            logger: logger
        )
    }
    /// Stops a batch inference job. You're only charged for tokens that were already processed. For more information, see Stop a batch inference job.
    ///
    /// Parameters:
    ///   - jobIdentifier: The Amazon Resource Name (ARN) of the batch inference job to stop.
    ///   - logger: Logger use during operation
    @inlinable
    public func stopModelInvocationJob(
        jobIdentifier: String,
        logger: Logger = AWSClient.loggingDisabled        
    ) async throws -> StopModelInvocationJobResponse {
        let input = StopModelInvocationJobRequest(
            jobIdentifier: jobIdentifier
        )
        return try await self.stopModelInvocationJob(input, logger: logger)
    }

    /// Associate tags with a resource. For more information, see Tagging resources in the Amazon Bedrock User Guide.
    @Sendable
    @inlinable
    public func tagResource(_ input: TagResourceRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> TagResourceResponse {
        try await self.client.execute(
            operation: "TagResource", 
            path: "/tagResource", 
            httpMethod: .POST, 
            serviceConfig: self.config, 
            input: input, 
            logger: logger
        )
    }
    /// Associate tags with a resource. For more information, see Tagging resources in the Amazon Bedrock User Guide.
    ///
    /// Parameters:
    ///   - resourceARN: The Amazon Resource Name (ARN) of the resource to tag.
    ///   - tags: Tags to associate with the resource.
    ///   - logger: Logger use during operation
    @inlinable
    public func tagResource(
        resourceARN: String,
        tags: [Tag],
        logger: Logger = AWSClient.loggingDisabled        
    ) async throws -> TagResourceResponse {
        let input = TagResourceRequest(
            resourceARN: resourceARN, 
            tags: tags
        )
        return try await self.tagResource(input, logger: logger)
    }

    /// Remove one or more tags from a resource. For more information, see Tagging resources in the Amazon Bedrock User Guide.
    @Sendable
    @inlinable
    public func untagResource(_ input: UntagResourceRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UntagResourceResponse {
        try await self.client.execute(
            operation: "UntagResource", 
            path: "/untagResource", 
            httpMethod: .POST, 
            serviceConfig: self.config, 
            input: input, 
            logger: logger
        )
    }
    /// Remove one or more tags from a resource. For more information, see Tagging resources in the Amazon Bedrock User Guide.
    ///
    /// Parameters:
    ///   - resourceARN: The Amazon Resource Name (ARN) of the resource to untag.
    ///   - tagKeys: Tag keys of the tags to remove from the resource.
    ///   - logger: Logger use during operation
    @inlinable
    public func untagResource(
        resourceARN: String,
        tagKeys: [String],
        logger: Logger = AWSClient.loggingDisabled        
    ) async throws -> UntagResourceResponse {
        let input = UntagResourceRequest(
            resourceARN: resourceARN, 
            tagKeys: tagKeys
        )
        return try await self.untagResource(input, logger: logger)
    }

    /// Updates a guardrail with the values you specify.   Specify a name and optional description.   Specify messages for when the guardrail successfully blocks a prompt or a model response in the blockedInputMessaging and blockedOutputsMessaging fields.   Specify topics for the guardrail to deny in the topicPolicyConfig object. Each GuardrailTopicConfig object in the topicsConfig list pertains to one topic.   Give a name and description so that the guardrail can properly identify the topic.   Specify DENY in the type field.   (Optional) Provide up to five prompts that you would categorize as belonging to the topic in the examples list.     Specify filter strengths for the harmful categories defined in Amazon Bedrock in the contentPolicyConfig object. Each GuardrailContentFilterConfig object in the filtersConfig list pertains to a harmful category. For more information, see Content filters. For more information about the fields in a content filter, see GuardrailContentFilterConfig.   Specify the category in the type field.   Specify the strength of the filter for prompts in the inputStrength field and for model responses in the strength field of the GuardrailContentFilterConfig.     (Optional) For security, include the ARN of a KMS key in the kmsKeyId field.
    @Sendable
    @inlinable
    public func updateGuardrail(_ input: UpdateGuardrailRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateGuardrailResponse {
        try await self.client.execute(
            operation: "UpdateGuardrail", 
            path: "/guardrails/{guardrailIdentifier}", 
            httpMethod: .PUT, 
            serviceConfig: self.config, 
            input: input, 
            logger: logger
        )
    }
    /// Updates a guardrail with the values you specify.   Specify a name and optional description.   Specify messages for when the guardrail successfully blocks a prompt or a model response in the blockedInputMessaging and blockedOutputsMessaging fields.   Specify topics for the guardrail to deny in the topicPolicyConfig object. Each GuardrailTopicConfig object in the topicsConfig list pertains to one topic.   Give a name and description so that the guardrail can properly identify the topic.   Specify DENY in the type field.   (Optional) Provide up to five prompts that you would categorize as belonging to the topic in the examples list.     Specify filter strengths for the harmful categories defined in Amazon Bedrock in the contentPolicyConfig object. Each GuardrailContentFilterConfig object in the filtersConfig list pertains to a harmful category. For more information, see Content filters. For more information about the fields in a content filter, see GuardrailContentFilterConfig.   Specify the category in the type field.   Specify the strength of the filter for prompts in the inputStrength field and for model responses in the strength field of the GuardrailContentFilterConfig.     (Optional) For security, include the ARN of a KMS key in the kmsKeyId field.
    ///
    /// Parameters:
    ///   - blockedInputMessaging: The message to return when the guardrail blocks a prompt.
    ///   - blockedOutputsMessaging: The message to return when the guardrail blocks a model response.
    ///   - contentPolicyConfig: The content policy to configure for the guardrail.
    ///   - contextualGroundingPolicyConfig: The contextual grounding policy configuration used to update a guardrail.
    ///   - crossRegionConfig: The system-defined guardrail profile that you're using with your guardrail. Guardrail profiles define the destination Amazon Web Services Regions where guardrail inference requests can be automatically routed. For more information, see the Amazon Bedrock User Guide.
    ///   - description: A description of the guardrail.
    ///   - guardrailIdentifier: The unique identifier of the guardrail. This can be an ID or the ARN.
    ///   - kmsKeyId: The ARN of the KMS key with which to encrypt the guardrail.
    ///   - name: A name for the guardrail.
    ///   - sensitiveInformationPolicyConfig: The sensitive information policy to configure for the guardrail.
    ///   - topicPolicyConfig: The topic policy to configure for the guardrail.
    ///   - wordPolicyConfig: The word policy to configure for the guardrail.
    ///   - logger: Logger use during operation
    @inlinable
    public func updateGuardrail(
        blockedInputMessaging: String,
        blockedOutputsMessaging: String,
        contentPolicyConfig: GuardrailContentPolicyConfig? = nil,
        contextualGroundingPolicyConfig: GuardrailContextualGroundingPolicyConfig? = nil,
        crossRegionConfig: GuardrailCrossRegionConfig? = nil,
        description: String? = nil,
        guardrailIdentifier: String,
        kmsKeyId: String? = nil,
        name: String,
        sensitiveInformationPolicyConfig: GuardrailSensitiveInformationPolicyConfig? = nil,
        topicPolicyConfig: GuardrailTopicPolicyConfig? = nil,
        wordPolicyConfig: GuardrailWordPolicyConfig? = nil,
        logger: Logger = AWSClient.loggingDisabled        
    ) async throws -> UpdateGuardrailResponse {
        let input = UpdateGuardrailRequest(
            blockedInputMessaging: blockedInputMessaging, 
            blockedOutputsMessaging: blockedOutputsMessaging, 
            contentPolicyConfig: contentPolicyConfig, 
            contextualGroundingPolicyConfig: contextualGroundingPolicyConfig, 
            crossRegionConfig: crossRegionConfig, 
            description: description, 
            guardrailIdentifier: guardrailIdentifier, 
            kmsKeyId: kmsKeyId, 
            name: name, 
            sensitiveInformationPolicyConfig: sensitiveInformationPolicyConfig, 
            topicPolicyConfig: topicPolicyConfig, 
            wordPolicyConfig: wordPolicyConfig
        )
        return try await self.updateGuardrail(input, logger: logger)
    }

    /// Updates the configuration of an existing endpoint for a model from Amazon Bedrock Marketplace.
    @Sendable
    @inlinable
    public func updateMarketplaceModelEndpoint(_ input: UpdateMarketplaceModelEndpointRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateMarketplaceModelEndpointResponse {
        try await self.client.execute(
            operation: "UpdateMarketplaceModelEndpoint", 
            path: "/marketplace-model/endpoints/{endpointArn}", 
            httpMethod: .PATCH, 
            serviceConfig: self.config, 
            input: input, 
            logger: logger
        )
    }
    /// Updates the configuration of an existing endpoint for a model from Amazon Bedrock Marketplace.
    ///
    /// Parameters:
    ///   - clientRequestToken: A unique, case-sensitive identifier that you provide to ensure the idempotency of the request. This token is listed as not required because Amazon Web Services SDKs automatically generate it for you and set this parameter. If you're not using the Amazon Web Services SDK or the CLI, you must provide this token or the action will fail.
    ///   - endpointArn: The Amazon Resource Name (ARN) of the endpoint you want to update.
    ///   - endpointConfig: The new configuration for the endpoint, including the number and type of instances to use.
    ///   - logger: Logger use during operation
    @inlinable
    public func updateMarketplaceModelEndpoint(
        clientRequestToken: String? = UpdateMarketplaceModelEndpointRequest.idempotencyToken(),
        endpointArn: String,
        endpointConfig: EndpointConfig,
        logger: Logger = AWSClient.loggingDisabled        
    ) async throws -> UpdateMarketplaceModelEndpointResponse {
        let input = UpdateMarketplaceModelEndpointRequest(
            clientRequestToken: clientRequestToken, 
            endpointArn: endpointArn, 
            endpointConfig: endpointConfig
        )
        return try await self.updateMarketplaceModelEndpoint(input, logger: logger)
    }

    /// Updates the name or associated model for a Provisioned Throughput. For more information, see Provisioned Throughput in the Amazon Bedrock User Guide.
    @Sendable
    @inlinable
    public func updateProvisionedModelThroughput(_ input: UpdateProvisionedModelThroughputRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateProvisionedModelThroughputResponse {
        try await self.client.execute(
            operation: "UpdateProvisionedModelThroughput", 
            path: "/provisioned-model-throughput/{provisionedModelId}", 
            httpMethod: .PATCH, 
            serviceConfig: self.config, 
            input: input, 
            logger: logger
        )
    }
    /// Updates the name or associated model for a Provisioned Throughput. For more information, see Provisioned Throughput in the Amazon Bedrock User Guide.
    ///
    /// Parameters:
    ///   - desiredModelId: The Amazon Resource Name (ARN) of the new model to associate with this Provisioned Throughput. You can't specify this field if this Provisioned Throughput is associated with a base model. If this Provisioned Throughput is associated with a custom model, you can specify one of the following options:   The base model from which the custom model was customized.   Another custom model that was customized from the same base model as the custom model.
    ///   - desiredProvisionedModelName: The new name for this Provisioned Throughput.
    ///   - provisionedModelId: The Amazon Resource Name (ARN) or name of the Provisioned Throughput to update.
    ///   - logger: Logger use during operation
    @inlinable
    public func updateProvisionedModelThroughput(
        desiredModelId: String? = nil,
        desiredProvisionedModelName: String? = nil,
        provisionedModelId: String,
        logger: Logger = AWSClient.loggingDisabled        
    ) async throws -> UpdateProvisionedModelThroughputResponse {
        let input = UpdateProvisionedModelThroughputRequest(
            desiredModelId: desiredModelId, 
            desiredProvisionedModelName: desiredProvisionedModelName, 
            provisionedModelId: provisionedModelId
        )
        return try await self.updateProvisionedModelThroughput(input, logger: logger)
    }
}

extension Bedrock {
    /// Initializer required by `AWSService.with(middlewares:timeout:byteBufferAllocator:options)`. You are not able to use this initializer directly as there are not public
    /// initializers for `AWSServiceConfig.Patch`. Please use `AWSService.with(middlewares:timeout:byteBufferAllocator:options)` instead.
    public init(from: Bedrock, patch: AWSServiceConfig.Patch) {
        self.client = from.client
        self.config = from.config.with(patch: patch)
    }
}

// MARK: Paginators

@available(macOS 10.15, iOS 13.0, tvOS 13.0, watchOS 6.0, *)
extension Bedrock {
    /// Return PaginatorSequence for operation ``listCustomModelDeployments(_:logger:)``.
    ///
    /// - Parameters:
    ///   - input: Input for operation
    ///   - logger: Logger used for logging
    @inlinable
    public func listCustomModelDeploymentsPaginator(
        _ input: ListCustomModelDeploymentsRequest,
        logger: Logger = AWSClient.loggingDisabled
    ) -> AWSClient.PaginatorSequence<ListCustomModelDeploymentsRequest, ListCustomModelDeploymentsResponse> {
        return .init(
            input: input,
            command: self.listCustomModelDeployments,
            inputKey: \ListCustomModelDeploymentsRequest.nextToken,
            outputKey: \ListCustomModelDeploymentsResponse.nextToken,
            logger: logger
        )
    }
    /// Return PaginatorSequence for operation ``listCustomModelDeployments(_:logger:)``.
    ///
    /// - Parameters:
    ///   - createdAfter: Filters deployments created after the specified date and time.
    ///   - createdBefore: Filters deployments created before the specified date and time.
    ///   - maxResults: The maximum number of results to return in a single call.
    ///   - modelArnEquals: Filters deployments by the Amazon Resource Name (ARN) of the associated custom model.
    ///   - nameContains: Filters deployments whose names contain the specified string.
    ///   - sortBy: The field to sort the results by. The only supported value is CreationTime.
    ///   - sortOrder: The sort order for the results. Valid values are Ascending and Descending. Default is Descending.
    ///   - statusEquals: Filters deployments by status. Valid values are CREATING, ACTIVE, and FAILED.
    ///   - logger: Logger used for logging
    @inlinable
    public func listCustomModelDeploymentsPaginator(
        createdAfter: Date? = nil,
        createdBefore: Date? = nil,
        maxResults: Int? = nil,
        modelArnEquals: String? = nil,
        nameContains: String? = nil,
        sortBy: SortModelsBy? = nil,
        sortOrder: SortOrder? = nil,
        statusEquals: CustomModelDeploymentStatus? = nil,
        logger: Logger = AWSClient.loggingDisabled        
    ) -> AWSClient.PaginatorSequence<ListCustomModelDeploymentsRequest, ListCustomModelDeploymentsResponse> {
        let input = ListCustomModelDeploymentsRequest(
            createdAfter: createdAfter, 
            createdBefore: createdBefore, 
            maxResults: maxResults, 
            modelArnEquals: modelArnEquals, 
            nameContains: nameContains, 
            sortBy: sortBy, 
            sortOrder: sortOrder, 
            statusEquals: statusEquals
        )
        return self.listCustomModelDeploymentsPaginator(input, logger: logger)
    }

    /// Return PaginatorSequence for operation ``listCustomModels(_:logger:)``.
    ///
    /// - Parameters:
    ///   - input: Input for operation
    ///   - logger: Logger used for logging
    @inlinable
    public func listCustomModelsPaginator(
        _ input: ListCustomModelsRequest,
        logger: Logger = AWSClient.loggingDisabled
    ) -> AWSClient.PaginatorSequence<ListCustomModelsRequest, ListCustomModelsResponse> {
        return .init(
            input: input,
            command: self.listCustomModels,
            inputKey: \ListCustomModelsRequest.nextToken,
            outputKey: \ListCustomModelsResponse.nextToken,
            logger: logger
        )
    }
    /// Return PaginatorSequence for operation ``listCustomModels(_:logger:)``.
    ///
    /// - Parameters:
    ///   - baseModelArnEquals: Return custom models only if the base model Amazon Resource Name (ARN) matches this parameter.
    ///   - creationTimeAfter: Return custom models created after the specified time.
    ///   - creationTimeBefore: Return custom models created before the specified time.
    ///   - foundationModelArnEquals: Return custom models only if the foundation model Amazon Resource Name (ARN) matches this parameter.
    ///   - isOwned: Return custom models depending on if the current account owns them (true) or if they were shared with the current account (false).
    ///   - maxResults: The maximum number of results to return in the response. If the total number of results is greater than this value, use the token returned in the response in the nextToken field when making another request to return the next batch of results.
    ///   - modelStatus: The status of them model to filter results by. Possible values include:    Creating - Include only models that are currently being created and validated.    Active - Include only models that have been successfully created and are ready for use.    Failed - Include only models where the creation process failed.   If you don't specify a status, the API returns models in all states.
    ///   - nameContains: Return custom models only if the job name contains these characters.
    ///   - sortBy: The field to sort by in the returned list of models.
    ///   - sortOrder: The sort order of the results.
    ///   - logger: Logger used for logging
    @inlinable
    public func listCustomModelsPaginator(
        baseModelArnEquals: String? = nil,
        creationTimeAfter: Date? = nil,
        creationTimeBefore: Date? = nil,
        foundationModelArnEquals: String? = nil,
        isOwned: Bool? = nil,
        maxResults: Int? = nil,
        modelStatus: ModelStatus? = nil,
        nameContains: String? = nil,
        sortBy: SortModelsBy? = nil,
        sortOrder: SortOrder? = nil,
        logger: Logger = AWSClient.loggingDisabled        
    ) -> AWSClient.PaginatorSequence<ListCustomModelsRequest, ListCustomModelsResponse> {
        let input = ListCustomModelsRequest(
            baseModelArnEquals: baseModelArnEquals, 
            creationTimeAfter: creationTimeAfter, 
            creationTimeBefore: creationTimeBefore, 
            foundationModelArnEquals: foundationModelArnEquals, 
            isOwned: isOwned, 
            maxResults: maxResults, 
            modelStatus: modelStatus, 
            nameContains: nameContains, 
            sortBy: sortBy, 
            sortOrder: sortOrder
        )
        return self.listCustomModelsPaginator(input, logger: logger)
    }

    /// Return PaginatorSequence for operation ``listEvaluationJobs(_:logger:)``.
    ///
    /// - Parameters:
    ///   - input: Input for operation
    ///   - logger: Logger used for logging
    @inlinable
    public func listEvaluationJobsPaginator(
        _ input: ListEvaluationJobsRequest,
        logger: Logger = AWSClient.loggingDisabled
    ) -> AWSClient.PaginatorSequence<ListEvaluationJobsRequest, ListEvaluationJobsResponse> {
        return .init(
            input: input,
            command: self.listEvaluationJobs,
            inputKey: \ListEvaluationJobsRequest.nextToken,
            outputKey: \ListEvaluationJobsResponse.nextToken,
            logger: logger
        )
    }
    /// Return PaginatorSequence for operation ``listEvaluationJobs(_:logger:)``.
    ///
    /// - Parameters:
    ///   - applicationTypeEquals: A filter to only list evaluation jobs that are either model evaluations or knowledge base evaluations.
    ///   - creationTimeAfter: A filter to only list evaluation jobs created after a specified time.
    ///   - creationTimeBefore: A filter to only list evaluation jobs created before a specified time.
    ///   - maxResults: The maximum number of results to return.
    ///   - nameContains: A filter to only list evaluation jobs that contain a specified string in the job name.
    ///   - sortBy: Specifies a creation time to sort the list of evaluation jobs by when they were created.
    ///   - sortOrder: Specifies whether to sort the list of evaluation jobs by either ascending or descending order.
    ///   - statusEquals: A filter to only list evaluation jobs that are of a certain status.
    ///   - logger: Logger used for logging
    @inlinable
    public func listEvaluationJobsPaginator(
        applicationTypeEquals: ApplicationType? = nil,
        creationTimeAfter: Date? = nil,
        creationTimeBefore: Date? = nil,
        maxResults: Int? = nil,
        nameContains: String? = nil,
        sortBy: SortJobsBy? = nil,
        sortOrder: SortOrder? = nil,
        statusEquals: EvaluationJobStatus? = nil,
        logger: Logger = AWSClient.loggingDisabled        
    ) -> AWSClient.PaginatorSequence<ListEvaluationJobsRequest, ListEvaluationJobsResponse> {
        let input = ListEvaluationJobsRequest(
            applicationTypeEquals: applicationTypeEquals, 
            creationTimeAfter: creationTimeAfter, 
            creationTimeBefore: creationTimeBefore, 
            maxResults: maxResults, 
            nameContains: nameContains, 
            sortBy: sortBy, 
            sortOrder: sortOrder, 
            statusEquals: statusEquals
        )
        return self.listEvaluationJobsPaginator(input, logger: logger)
    }

    /// Return PaginatorSequence for operation ``listGuardrails(_:logger:)``.
    ///
    /// - Parameters:
    ///   - input: Input for operation
    ///   - logger: Logger used for logging
    @inlinable
    public func listGuardrailsPaginator(
        _ input: ListGuardrailsRequest,
        logger: Logger = AWSClient.loggingDisabled
    ) -> AWSClient.PaginatorSequence<ListGuardrailsRequest, ListGuardrailsResponse> {
        return .init(
            input: input,
            command: self.listGuardrails,
            inputKey: \ListGuardrailsRequest.nextToken,
            outputKey: \ListGuardrailsResponse.nextToken,
            logger: logger
        )
    }
    /// Return PaginatorSequence for operation ``listGuardrails(_:logger:)``.
    ///
    /// - Parameters:
    ///   - guardrailIdentifier: The unique identifier of the guardrail. This can be an ID or the ARN.
    ///   - maxResults: The maximum number of results to return in the response.
    ///   - logger: Logger used for logging
    @inlinable
    public func listGuardrailsPaginator(
        guardrailIdentifier: String? = nil,
        maxResults: Int? = nil,
        logger: Logger = AWSClient.loggingDisabled        
    ) -> AWSClient.PaginatorSequence<ListGuardrailsRequest, ListGuardrailsResponse> {
        let input = ListGuardrailsRequest(
            guardrailIdentifier: guardrailIdentifier, 
            maxResults: maxResults
        )
        return self.listGuardrailsPaginator(input, logger: logger)
    }

    /// Return PaginatorSequence for operation ``listImportedModels(_:logger:)``.
    ///
    /// - Parameters:
    ///   - input: Input for operation
    ///   - logger: Logger used for logging
    @inlinable
    public func listImportedModelsPaginator(
        _ input: ListImportedModelsRequest,
        logger: Logger = AWSClient.loggingDisabled
    ) -> AWSClient.PaginatorSequence<ListImportedModelsRequest, ListImportedModelsResponse> {
        return .init(
            input: input,
            command: self.listImportedModels,
            inputKey: \ListImportedModelsRequest.nextToken,
            outputKey: \ListImportedModelsResponse.nextToken,
            logger: logger
        )
    }
    /// Return PaginatorSequence for operation ``listImportedModels(_:logger:)``.
    ///
    /// - Parameters:
    ///   - creationTimeAfter: Return imported models that were created after the specified time.
    ///   - creationTimeBefore: Return imported models that created before the specified time.
    ///   - maxResults: The maximum number of results to return in the response. If the total number of results is greater than this value, use the token returned in the response in the nextToken field when making another request to return the next batch of results.
    ///   - nameContains: Return imported models only if the model name contains these characters.
    ///   - sortBy: The field to sort by in the returned list of imported models.
    ///   - sortOrder: Specifies whetehr to sort the results in ascending or descending order.
    ///   - logger: Logger used for logging
    @inlinable
    public func listImportedModelsPaginator(
        creationTimeAfter: Date? = nil,
        creationTimeBefore: Date? = nil,
        maxResults: Int? = nil,
        nameContains: String? = nil,
        sortBy: SortModelsBy? = nil,
        sortOrder: SortOrder? = nil,
        logger: Logger = AWSClient.loggingDisabled        
    ) -> AWSClient.PaginatorSequence<ListImportedModelsRequest, ListImportedModelsResponse> {
        let input = ListImportedModelsRequest(
            creationTimeAfter: creationTimeAfter, 
            creationTimeBefore: creationTimeBefore, 
            maxResults: maxResults, 
            nameContains: nameContains, 
            sortBy: sortBy, 
            sortOrder: sortOrder
        )
        return self.listImportedModelsPaginator(input, logger: logger)
    }

    /// Return PaginatorSequence for operation ``listInferenceProfiles(_:logger:)``.
    ///
    /// - Parameters:
    ///   - input: Input for operation
    ///   - logger: Logger used for logging
    @inlinable
    public func listInferenceProfilesPaginator(
        _ input: ListInferenceProfilesRequest,
        logger: Logger = AWSClient.loggingDisabled
    ) -> AWSClient.PaginatorSequence<ListInferenceProfilesRequest, ListInferenceProfilesResponse> {
        return .init(
            input: input,
            command: self.listInferenceProfiles,
            inputKey: \ListInferenceProfilesRequest.nextToken,
            outputKey: \ListInferenceProfilesResponse.nextToken,
            logger: logger
        )
    }
    /// Return PaginatorSequence for operation ``listInferenceProfiles(_:logger:)``.
    ///
    /// - Parameters:
    ///   - maxResults: The maximum number of results to return in the response. If the total number of results is greater than this value, use the token returned in the response in the nextToken field when making another request to return the next batch of results.
    ///   - typeEquals: Filters for inference profiles that match the type you specify.    SYSTEM_DEFINED – The inference profile is defined by Amazon Bedrock. You can route inference requests across regions with these inference profiles.    APPLICATION – The inference profile was created by a user. This type of inference profile can track metrics and costs when invoking the model in it. The inference profile may route requests to one or multiple regions.
    ///   - logger: Logger used for logging
    @inlinable
    public func listInferenceProfilesPaginator(
        maxResults: Int? = nil,
        typeEquals: InferenceProfileType? = nil,
        logger: Logger = AWSClient.loggingDisabled        
    ) -> AWSClient.PaginatorSequence<ListInferenceProfilesRequest, ListInferenceProfilesResponse> {
        let input = ListInferenceProfilesRequest(
            maxResults: maxResults, 
            typeEquals: typeEquals
        )
        return self.listInferenceProfilesPaginator(input, logger: logger)
    }

    /// Return PaginatorSequence for operation ``listMarketplaceModelEndpoints(_:logger:)``.
    ///
    /// - Parameters:
    ///   - input: Input for operation
    ///   - logger: Logger used for logging
    @inlinable
    public func listMarketplaceModelEndpointsPaginator(
        _ input: ListMarketplaceModelEndpointsRequest,
        logger: Logger = AWSClient.loggingDisabled
    ) -> AWSClient.PaginatorSequence<ListMarketplaceModelEndpointsRequest, ListMarketplaceModelEndpointsResponse> {
        return .init(
            input: input,
            command: self.listMarketplaceModelEndpoints,
            inputKey: \ListMarketplaceModelEndpointsRequest.nextToken,
            outputKey: \ListMarketplaceModelEndpointsResponse.nextToken,
            logger: logger
        )
    }
    /// Return PaginatorSequence for operation ``listMarketplaceModelEndpoints(_:logger:)``.
    ///
    /// - Parameters:
    ///   - maxResults: The maximum number of results to return in a single call. If more results are available, the operation returns a NextToken value.
    ///   - modelSourceEquals: If specified, only endpoints for the given model source identifier are returned.
    ///   - logger: Logger used for logging
    @inlinable
    public func listMarketplaceModelEndpointsPaginator(
        maxResults: Int? = nil,
        modelSourceEquals: String? = nil,
        logger: Logger = AWSClient.loggingDisabled        
    ) -> AWSClient.PaginatorSequence<ListMarketplaceModelEndpointsRequest, ListMarketplaceModelEndpointsResponse> {
        let input = ListMarketplaceModelEndpointsRequest(
            maxResults: maxResults, 
            modelSourceEquals: modelSourceEquals
        )
        return self.listMarketplaceModelEndpointsPaginator(input, logger: logger)
    }

    /// Return PaginatorSequence for operation ``listModelCopyJobs(_:logger:)``.
    ///
    /// - Parameters:
    ///   - input: Input for operation
    ///   - logger: Logger used for logging
    @inlinable
    public func listModelCopyJobsPaginator(
        _ input: ListModelCopyJobsRequest,
        logger: Logger = AWSClient.loggingDisabled
    ) -> AWSClient.PaginatorSequence<ListModelCopyJobsRequest, ListModelCopyJobsResponse> {
        return .init(
            input: input,
            command: self.listModelCopyJobs,
            inputKey: \ListModelCopyJobsRequest.nextToken,
            outputKey: \ListModelCopyJobsResponse.nextToken,
            logger: logger
        )
    }
    /// Return PaginatorSequence for operation ``listModelCopyJobs(_:logger:)``.
    ///
    /// - Parameters:
    ///   - creationTimeAfter: Filters for model copy jobs created after the specified time.
    ///   - creationTimeBefore: Filters for model copy jobs created before the specified time.
    ///   - maxResults: The maximum number of results to return in the response. If the total number of results is greater than this value, use the token returned in the response in the nextToken field when making another request to return the next batch of results.
    ///   - sortBy: The field to sort by in the returned list of model copy jobs.
    ///   - sortOrder: Specifies whether to sort the results in ascending or descending order.
    ///   - sourceAccountEquals: Filters for model copy jobs in which the account that the source model belongs to is equal to the value that you specify.
    ///   - sourceModelArnEquals: Filters for model copy jobs in which the Amazon Resource Name (ARN) of the source model to is equal to the value that you specify.
    ///   - statusEquals: Filters for model copy jobs whose status matches the value that you specify.
    ///   - targetModelNameContains: Filters for model copy jobs in which the name of the copied model contains the string that you specify.
    ///   - logger: Logger used for logging
    @inlinable
    public func listModelCopyJobsPaginator(
        creationTimeAfter: Date? = nil,
        creationTimeBefore: Date? = nil,
        maxResults: Int? = nil,
        sortBy: SortJobsBy? = nil,
        sortOrder: SortOrder? = nil,
        sourceAccountEquals: String? = nil,
        sourceModelArnEquals: String? = nil,
        statusEquals: ModelCopyJobStatus? = nil,
        targetModelNameContains: String? = nil,
        logger: Logger = AWSClient.loggingDisabled        
    ) -> AWSClient.PaginatorSequence<ListModelCopyJobsRequest, ListModelCopyJobsResponse> {
        let input = ListModelCopyJobsRequest(
            creationTimeAfter: creationTimeAfter, 
            creationTimeBefore: creationTimeBefore, 
            maxResults: maxResults, 
            sortBy: sortBy, 
            sortOrder: sortOrder, 
            sourceAccountEquals: sourceAccountEquals, 
            sourceModelArnEquals: sourceModelArnEquals, 
            statusEquals: statusEquals, 
            targetModelNameContains: targetModelNameContains
        )
        return self.listModelCopyJobsPaginator(input, logger: logger)
    }

    /// Return PaginatorSequence for operation ``listModelCustomizationJobs(_:logger:)``.
    ///
    /// - Parameters:
    ///   - input: Input for operation
    ///   - logger: Logger used for logging
    @inlinable
    public func listModelCustomizationJobsPaginator(
        _ input: ListModelCustomizationJobsRequest,
        logger: Logger = AWSClient.loggingDisabled
    ) -> AWSClient.PaginatorSequence<ListModelCustomizationJobsRequest, ListModelCustomizationJobsResponse> {
        return .init(
            input: input,
            command: self.listModelCustomizationJobs,
            inputKey: \ListModelCustomizationJobsRequest.nextToken,
            outputKey: \ListModelCustomizationJobsResponse.nextToken,
            logger: logger
        )
    }
    /// Return PaginatorSequence for operation ``listModelCustomizationJobs(_:logger:)``.
    ///
    /// - Parameters:
    ///   - creationTimeAfter: Return customization jobs created after the specified time.
    ///   - creationTimeBefore: Return customization jobs created before the specified time.
    ///   - maxResults: The maximum number of results to return in the response. If the total number of results is greater than this value, use the token returned in the response in the nextToken field when making another request to return the next batch of results.
    ///   - nameContains: Return customization jobs only if the job name contains these characters.
    ///   - sortBy: The field to sort by in the returned list of jobs.
    ///   - sortOrder: The sort order of the results.
    ///   - statusEquals: Return customization jobs with the specified status.
    ///   - logger: Logger used for logging
    @inlinable
    public func listModelCustomizationJobsPaginator(
        creationTimeAfter: Date? = nil,
        creationTimeBefore: Date? = nil,
        maxResults: Int? = nil,
        nameContains: String? = nil,
        sortBy: SortJobsBy? = nil,
        sortOrder: SortOrder? = nil,
        statusEquals: FineTuningJobStatus? = nil,
        logger: Logger = AWSClient.loggingDisabled        
    ) -> AWSClient.PaginatorSequence<ListModelCustomizationJobsRequest, ListModelCustomizationJobsResponse> {
        let input = ListModelCustomizationJobsRequest(
            creationTimeAfter: creationTimeAfter, 
            creationTimeBefore: creationTimeBefore, 
            maxResults: maxResults, 
            nameContains: nameContains, 
            sortBy: sortBy, 
            sortOrder: sortOrder, 
            statusEquals: statusEquals
        )
        return self.listModelCustomizationJobsPaginator(input, logger: logger)
    }

    /// Return PaginatorSequence for operation ``listModelImportJobs(_:logger:)``.
    ///
    /// - Parameters:
    ///   - input: Input for operation
    ///   - logger: Logger used for logging
    @inlinable
    public func listModelImportJobsPaginator(
        _ input: ListModelImportJobsRequest,
        logger: Logger = AWSClient.loggingDisabled
    ) -> AWSClient.PaginatorSequence<ListModelImportJobsRequest, ListModelImportJobsResponse> {
        return .init(
            input: input,
            command: self.listModelImportJobs,
            inputKey: \ListModelImportJobsRequest.nextToken,
            outputKey: \ListModelImportJobsResponse.nextToken,
            logger: logger
        )
    }
    /// Return PaginatorSequence for operation ``listModelImportJobs(_:logger:)``.
    ///
    /// - Parameters:
    ///   - creationTimeAfter: Return import jobs that were created after the specified time.
    ///   - creationTimeBefore: Return import jobs that were created before the specified time.
    ///   - maxResults: The maximum number of results to return in the response. If the total number of results is greater than this value, use the token returned in the response in the nextToken field when making another request to return the next batch of results.
    ///   - nameContains: Return imported jobs only if the job name contains these characters.
    ///   - sortBy: The field to sort by in the returned list of imported jobs.
    ///   - sortOrder: Specifies whether to sort the results in ascending or descending order.
    ///   - statusEquals: Return imported jobs with the specified status.
    ///   - logger: Logger used for logging
    @inlinable
    public func listModelImportJobsPaginator(
        creationTimeAfter: Date? = nil,
        creationTimeBefore: Date? = nil,
        maxResults: Int? = nil,
        nameContains: String? = nil,
        sortBy: SortJobsBy? = nil,
        sortOrder: SortOrder? = nil,
        statusEquals: ModelImportJobStatus? = nil,
        logger: Logger = AWSClient.loggingDisabled        
    ) -> AWSClient.PaginatorSequence<ListModelImportJobsRequest, ListModelImportJobsResponse> {
        let input = ListModelImportJobsRequest(
            creationTimeAfter: creationTimeAfter, 
            creationTimeBefore: creationTimeBefore, 
            maxResults: maxResults, 
            nameContains: nameContains, 
            sortBy: sortBy, 
            sortOrder: sortOrder, 
            statusEquals: statusEquals
        )
        return self.listModelImportJobsPaginator(input, logger: logger)
    }

    /// Return PaginatorSequence for operation ``listModelInvocationJobs(_:logger:)``.
    ///
    /// - Parameters:
    ///   - input: Input for operation
    ///   - logger: Logger used for logging
    @inlinable
    public func listModelInvocationJobsPaginator(
        _ input: ListModelInvocationJobsRequest,
        logger: Logger = AWSClient.loggingDisabled
    ) -> AWSClient.PaginatorSequence<ListModelInvocationJobsRequest, ListModelInvocationJobsResponse> {
        return .init(
            input: input,
            command: self.listModelInvocationJobs,
            inputKey: \ListModelInvocationJobsRequest.nextToken,
            outputKey: \ListModelInvocationJobsResponse.nextToken,
            logger: logger
        )
    }
    /// Return PaginatorSequence for operation ``listModelInvocationJobs(_:logger:)``.
    ///
    /// - Parameters:
    ///   - maxResults: The maximum number of results to return. If there are more results than the number that you specify, a nextToken value is returned. Use the nextToken in a request to return the next batch of results.
    ///   - nameContains: Specify a string to filter for batch inference jobs whose names contain the string.
    ///   - sortBy: An attribute by which to sort the results.
    ///   - sortOrder: Specifies whether to sort the results by ascending or descending order.
    ///   - statusEquals: Specify a status to filter for batch inference jobs whose statuses match the string you specify. The following statuses are possible:   Submitted – This job has been submitted to a queue for validation.   Validating – This job is being validated for the requirements described in Format and upload your batch inference data. The criteria include the following:   Your IAM service role has access to the Amazon S3 buckets containing your files.   Your files are .jsonl files and each individual record is a JSON object in the correct format. Note that validation doesn't check if the modelInput value matches the request body for the model.   Your files fulfill the requirements for file size and number of records. For more information, see Quotas for Amazon Bedrock.     Scheduled – This job has been validated and is now in a queue. The job will automatically start when it reaches its turn.   Expired – This job timed out because it was scheduled but didn't begin before the set timeout duration. Submit a new job request.   InProgress – This job has begun. You can start viewing the results in the output S3 location.   Completed – This job has successfully completed. View the output files in the output S3 location.   PartiallyCompleted – This job has partially completed. Not all of your records could be processed in time. View the output files in the output S3 location.   Failed – This job has failed. Check the failure message for any further details. For further assistance, reach out to the Amazon Web ServicesSupport Center.   Stopped – This job was stopped by a user.   Stopping – This job is being stopped by a user.
    ///   - submitTimeAfter: Specify a time to filter for batch inference jobs that were submitted after the time you specify.
    ///   - submitTimeBefore: Specify a time to filter for batch inference jobs that were submitted before the time you specify.
    ///   - logger: Logger used for logging
    @inlinable
    public func listModelInvocationJobsPaginator(
        maxResults: Int? = nil,
        nameContains: String? = nil,
        sortBy: SortJobsBy? = nil,
        sortOrder: SortOrder? = nil,
        statusEquals: ModelInvocationJobStatus? = nil,
        submitTimeAfter: Date? = nil,
        submitTimeBefore: Date? = nil,
        logger: Logger = AWSClient.loggingDisabled        
    ) -> AWSClient.PaginatorSequence<ListModelInvocationJobsRequest, ListModelInvocationJobsResponse> {
        let input = ListModelInvocationJobsRequest(
            maxResults: maxResults, 
            nameContains: nameContains, 
            sortBy: sortBy, 
            sortOrder: sortOrder, 
            statusEquals: statusEquals, 
            submitTimeAfter: submitTimeAfter, 
            submitTimeBefore: submitTimeBefore
        )
        return self.listModelInvocationJobsPaginator(input, logger: logger)
    }

    /// Return PaginatorSequence for operation ``listPromptRouters(_:logger:)``.
    ///
    /// - Parameters:
    ///   - input: Input for operation
    ///   - logger: Logger used for logging
    @inlinable
    public func listPromptRoutersPaginator(
        _ input: ListPromptRoutersRequest,
        logger: Logger = AWSClient.loggingDisabled
    ) -> AWSClient.PaginatorSequence<ListPromptRoutersRequest, ListPromptRoutersResponse> {
        return .init(
            input: input,
            command: self.listPromptRouters,
            inputKey: \ListPromptRoutersRequest.nextToken,
            outputKey: \ListPromptRoutersResponse.nextToken,
            logger: logger
        )
    }
    /// Return PaginatorSequence for operation ``listPromptRouters(_:logger:)``.
    ///
    /// - Parameters:
    ///   - maxResults: The maximum number of prompt routers to return in one page of results.
    ///   - type: The type of the prompt routers, such as whether it's default or custom.
    ///   - logger: Logger used for logging
    @inlinable
    public func listPromptRoutersPaginator(
        maxResults: Int? = nil,
        type: PromptRouterType? = nil,
        logger: Logger = AWSClient.loggingDisabled        
    ) -> AWSClient.PaginatorSequence<ListPromptRoutersRequest, ListPromptRoutersResponse> {
        let input = ListPromptRoutersRequest(
            maxResults: maxResults, 
            type: type
        )
        return self.listPromptRoutersPaginator(input, logger: logger)
    }

    /// Return PaginatorSequence for operation ``listProvisionedModelThroughputs(_:logger:)``.
    ///
    /// - Parameters:
    ///   - input: Input for operation
    ///   - logger: Logger used for logging
    @inlinable
    public func listProvisionedModelThroughputsPaginator(
        _ input: ListProvisionedModelThroughputsRequest,
        logger: Logger = AWSClient.loggingDisabled
    ) -> AWSClient.PaginatorSequence<ListProvisionedModelThroughputsRequest, ListProvisionedModelThroughputsResponse> {
        return .init(
            input: input,
            command: self.listProvisionedModelThroughputs,
            inputKey: \ListProvisionedModelThroughputsRequest.nextToken,
            outputKey: \ListProvisionedModelThroughputsResponse.nextToken,
            logger: logger
        )
    }
    /// Return PaginatorSequence for operation ``listProvisionedModelThroughputs(_:logger:)``.
    ///
    /// - Parameters:
    ///   - creationTimeAfter: A filter that returns Provisioned Throughputs created after the specified time.
    ///   - creationTimeBefore: A filter that returns Provisioned Throughputs created before the specified time.
    ///   - maxResults: THe maximum number of results to return in the response. If there are more results than the number you specified, the response returns a nextToken value. To see the next batch of results, send the nextToken value in another list request.
    ///   - modelArnEquals: A filter that returns Provisioned Throughputs whose model Amazon Resource Name (ARN) is equal to the value that you specify.
    ///   - nameContains: A filter that returns Provisioned Throughputs if their name contains the expression that you specify.
    ///   - sortBy: The field by which to sort the returned list of Provisioned Throughputs.
    ///   - sortOrder: The sort order of the results.
    ///   - statusEquals: A filter that returns Provisioned Throughputs if their statuses matches the value that you specify.
    ///   - logger: Logger used for logging
    @inlinable
    public func listProvisionedModelThroughputsPaginator(
        creationTimeAfter: Date? = nil,
        creationTimeBefore: Date? = nil,
        maxResults: Int? = nil,
        modelArnEquals: String? = nil,
        nameContains: String? = nil,
        sortBy: SortByProvisionedModels? = nil,
        sortOrder: SortOrder? = nil,
        statusEquals: ProvisionedModelStatus? = nil,
        logger: Logger = AWSClient.loggingDisabled        
    ) -> AWSClient.PaginatorSequence<ListProvisionedModelThroughputsRequest, ListProvisionedModelThroughputsResponse> {
        let input = ListProvisionedModelThroughputsRequest(
            creationTimeAfter: creationTimeAfter, 
            creationTimeBefore: creationTimeBefore, 
            maxResults: maxResults, 
            modelArnEquals: modelArnEquals, 
            nameContains: nameContains, 
            sortBy: sortBy, 
            sortOrder: sortOrder, 
            statusEquals: statusEquals
        )
        return self.listProvisionedModelThroughputsPaginator(input, logger: logger)
    }
}

extension Bedrock.ListCustomModelDeploymentsRequest: AWSPaginateToken {
    @inlinable
    public func usingPaginationToken(_ token: String) -> Bedrock.ListCustomModelDeploymentsRequest {
        return .init(
            createdAfter: self.createdAfter,
            createdBefore: self.createdBefore,
            maxResults: self.maxResults,
            modelArnEquals: self.modelArnEquals,
            nameContains: self.nameContains,
            nextToken: token,
            sortBy: self.sortBy,
            sortOrder: self.sortOrder,
            statusEquals: self.statusEquals
        )
    }
}

extension Bedrock.ListCustomModelsRequest: AWSPaginateToken {
    @inlinable
    public func usingPaginationToken(_ token: String) -> Bedrock.ListCustomModelsRequest {
        return .init(
            baseModelArnEquals: self.baseModelArnEquals,
            creationTimeAfter: self.creationTimeAfter,
            creationTimeBefore: self.creationTimeBefore,
            foundationModelArnEquals: self.foundationModelArnEquals,
            isOwned: self.isOwned,
            maxResults: self.maxResults,
            modelStatus: self.modelStatus,
            nameContains: self.nameContains,
            nextToken: token,
            sortBy: self.sortBy,
            sortOrder: self.sortOrder
        )
    }
}

extension Bedrock.ListEvaluationJobsRequest: AWSPaginateToken {
    @inlinable
    public func usingPaginationToken(_ token: String) -> Bedrock.ListEvaluationJobsRequest {
        return .init(
            applicationTypeEquals: self.applicationTypeEquals,
            creationTimeAfter: self.creationTimeAfter,
            creationTimeBefore: self.creationTimeBefore,
            maxResults: self.maxResults,
            nameContains: self.nameContains,
            nextToken: token,
            sortBy: self.sortBy,
            sortOrder: self.sortOrder,
            statusEquals: self.statusEquals
        )
    }
}

extension Bedrock.ListGuardrailsRequest: AWSPaginateToken {
    @inlinable
    public func usingPaginationToken(_ token: String) -> Bedrock.ListGuardrailsRequest {
        return .init(
            guardrailIdentifier: self.guardrailIdentifier,
            maxResults: self.maxResults,
            nextToken: token
        )
    }
}

extension Bedrock.ListImportedModelsRequest: AWSPaginateToken {
    @inlinable
    public func usingPaginationToken(_ token: String) -> Bedrock.ListImportedModelsRequest {
        return .init(
            creationTimeAfter: self.creationTimeAfter,
            creationTimeBefore: self.creationTimeBefore,
            maxResults: self.maxResults,
            nameContains: self.nameContains,
            nextToken: token,
            sortBy: self.sortBy,
            sortOrder: self.sortOrder
        )
    }
}

extension Bedrock.ListInferenceProfilesRequest: AWSPaginateToken {
    @inlinable
    public func usingPaginationToken(_ token: String) -> Bedrock.ListInferenceProfilesRequest {
        return .init(
            maxResults: self.maxResults,
            nextToken: token,
            typeEquals: self.typeEquals
        )
    }
}

extension Bedrock.ListMarketplaceModelEndpointsRequest: AWSPaginateToken {
    @inlinable
    public func usingPaginationToken(_ token: String) -> Bedrock.ListMarketplaceModelEndpointsRequest {
        return .init(
            maxResults: self.maxResults,
            modelSourceEquals: self.modelSourceEquals,
            nextToken: token
        )
    }
}

extension Bedrock.ListModelCopyJobsRequest: AWSPaginateToken {
    @inlinable
    public func usingPaginationToken(_ token: String) -> Bedrock.ListModelCopyJobsRequest {
        return .init(
            creationTimeAfter: self.creationTimeAfter,
            creationTimeBefore: self.creationTimeBefore,
            maxResults: self.maxResults,
            nextToken: token,
            sortBy: self.sortBy,
            sortOrder: self.sortOrder,
            sourceAccountEquals: self.sourceAccountEquals,
            sourceModelArnEquals: self.sourceModelArnEquals,
            statusEquals: self.statusEquals,
            targetModelNameContains: self.targetModelNameContains
        )
    }
}

extension Bedrock.ListModelCustomizationJobsRequest: AWSPaginateToken {
    @inlinable
    public func usingPaginationToken(_ token: String) -> Bedrock.ListModelCustomizationJobsRequest {
        return .init(
            creationTimeAfter: self.creationTimeAfter,
            creationTimeBefore: self.creationTimeBefore,
            maxResults: self.maxResults,
            nameContains: self.nameContains,
            nextToken: token,
            sortBy: self.sortBy,
            sortOrder: self.sortOrder,
            statusEquals: self.statusEquals
        )
    }
}

extension Bedrock.ListModelImportJobsRequest: AWSPaginateToken {
    @inlinable
    public func usingPaginationToken(_ token: String) -> Bedrock.ListModelImportJobsRequest {
        return .init(
            creationTimeAfter: self.creationTimeAfter,
            creationTimeBefore: self.creationTimeBefore,
            maxResults: self.maxResults,
            nameContains: self.nameContains,
            nextToken: token,
            sortBy: self.sortBy,
            sortOrder: self.sortOrder,
            statusEquals: self.statusEquals
        )
    }
}

extension Bedrock.ListModelInvocationJobsRequest: AWSPaginateToken {
    @inlinable
    public func usingPaginationToken(_ token: String) -> Bedrock.ListModelInvocationJobsRequest {
        return .init(
            maxResults: self.maxResults,
            nameContains: self.nameContains,
            nextToken: token,
            sortBy: self.sortBy,
            sortOrder: self.sortOrder,
            statusEquals: self.statusEquals,
            submitTimeAfter: self.submitTimeAfter,
            submitTimeBefore: self.submitTimeBefore
        )
    }
}

extension Bedrock.ListPromptRoutersRequest: AWSPaginateToken {
    @inlinable
    public func usingPaginationToken(_ token: String) -> Bedrock.ListPromptRoutersRequest {
        return .init(
            maxResults: self.maxResults,
            nextToken: token,
            type: self.type
        )
    }
}

extension Bedrock.ListProvisionedModelThroughputsRequest: AWSPaginateToken {
    @inlinable
    public func usingPaginationToken(_ token: String) -> Bedrock.ListProvisionedModelThroughputsRequest {
        return .init(
            creationTimeAfter: self.creationTimeAfter,
            creationTimeBefore: self.creationTimeBefore,
            maxResults: self.maxResults,
            modelArnEquals: self.modelArnEquals,
            nameContains: self.nameContains,
            nextToken: token,
            sortBy: self.sortBy,
            sortOrder: self.sortOrder,
            statusEquals: self.statusEquals
        )
    }
}
